]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge remote-tracking branch 'airlied/drm-next' into topic/drm-misc
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 19 Jul 2016 07:27:29 +0000 (09:27 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 19 Jul 2016 07:27:29 +0000 (09:27 +0200)
Backmerge drm-next to be able to apply Chris' connector_unregister_all
cleanup (need latest i915 and sun4i state for that).

Also there's a trivial conflict in ttm_bo.c that git rerere fails to
remember.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
1107 files changed:
.mailmap
CREDITS
Documentation/ABI/testing/configfs-usb-gadget-uvc
Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt
Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
Documentation/devicetree/bindings/hwmon/ina2xx.txt
Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt
Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt [new file with mode: 0644]
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/leds/leds-class.txt
Documentation/scsi/scsi_eh.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/pgalloc.h
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/abilis_tb100.dtsi
arch/arc/boot/dts/abilis_tb101.dtsi
arch/arc/boot/dts/axc001.dtsi
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/boot/dts/eznps.dts
arch/arc/boot/dts/nsim_700.dts
arch/arc/boot/dts/nsimosci.dts
arch/arc/boot/dts/nsimosci_hs.dts
arch/arc/boot/dts/nsimosci_hs_idu.dts
arch/arc/boot/dts/skeleton.dtsi
arch/arc/boot/dts/skeleton_hs.dtsi
arch/arc/boot/dts/skeleton_hs_idu.dtsi
arch/arc/boot/dts/vdk_axc003.dtsi
arch/arc/boot/dts/vdk_axc003_idu.dtsi
arch/arc/include/asm/atomic.h
arch/arc/include/asm/entry-compact.h
arch/arc/include/asm/mmu_context.h
arch/arc/include/asm/pgalloc.h
arch/arc/include/asm/pgtable.h
arch/arc/include/asm/processor.h
arch/arc/include/asm/smp.h
arch/arc/include/asm/spinlock.h
arch/arc/include/asm/thread_info.h
arch/arc/include/asm/uaccess.h
arch/arc/include/uapi/asm/swab.h
arch/arc/kernel/entry-compact.S
arch/arc/kernel/intc-compact.c
arch/arc/kernel/perf_event.c
arch/arc/kernel/setup.c
arch/arc/kernel/signal.c
arch/arc/kernel/troubleshoot.c
arch/arc/mm/cache.c
arch/arc/mm/dma.c
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am437x-sk-evm.dts
arch/arm/boot/dts/am57xx-idk-common.dtsi
arch/arm/boot/dts/dm8148-evm.dts
arch/arm/boot/dts/dm8148-t410.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra74x.dtsi
arch/arm/boot/dts/exynos5250-snow-common.dtsi
arch/arm/boot/dts/exynos5420-peach-pit.dts
arch/arm/boot/dts/omap3-evm-37xx.dts
arch/arm/boot/dts/omap3-igep.dtsi
arch/arm/boot/dts/omap3-igep0020-common.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3-n950-n9.dtsi
arch/arm/boot/dts/omap3-zoom3.dts
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/omap5-igep0050.dts
arch/arm/boot/dts/omap5-uevm.dts
arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
arch/arm/boot/dts/stih407-family.dtsi
arch/arm/boot/dts/sun6i-a31s-primo81.dts
arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
arch/arm/configs/exynos_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/include/asm/pgalloc.h
arch/arm/include/asm/pgtable-2level.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable.h
arch/arm/kernel/smp.c
arch/arm/mach-exynos/Kconfig
arch/arm/mach-imx/mach-imx6ul.c
arch/arm/mach-omap1/ams-delta-fiq-handler.S
arch/arm/mach-omap1/ams-delta-fiq.c
arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/omap-secure.h
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/powerdomain.c
arch/arm/mach-omap2/powerdomains7xx_data.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-vexpress/spc.c
arch/arm/plat-samsung/devs.c
arch/arm64/Makefile
arch/arm64/boot/dts/lg/lg1312.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/include/asm/kgdb.h
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/smp.h
arch/arm64/include/asm/spinlock.h
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/kgdb.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/traps.c
arch/arm64/mm/context.c
arch/arm64/mm/fault.c
arch/arm64/mm/flush.c
arch/avr32/include/asm/pgalloc.h
arch/cris/include/asm/pgalloc.h
arch/frv/mm/pgalloc.c
arch/hexagon/include/asm/pgalloc.h
arch/ia64/Kconfig
arch/ia64/include/asm/thread_info.h
arch/ia64/kernel/init_task.c
arch/m68k/include/asm/mcf_pgalloc.h
arch/m68k/include/asm/motorola_pgalloc.h
arch/m68k/include/asm/sun3_pgalloc.h
arch/metag/include/asm/pgalloc.h
arch/microblaze/include/asm/pgalloc.h
arch/microblaze/mm/pgtable.c
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/pgalloc.h
arch/mips/kvm/emulate.c
arch/mips/kvm/interrupt.h
arch/mips/kvm/locore.S
arch/mips/kvm/mips.c
arch/mn10300/include/asm/thread_info.h
arch/mn10300/kernel/kgdb.c
arch/mn10300/mm/pgtable.c
arch/nios2/include/asm/pgalloc.h
arch/openrisc/include/asm/pgalloc.h
arch/openrisc/mm/ioremap.c
arch/parisc/include/asm/pgalloc.h
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/32/pgalloc.h
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/include/asm/book3s/64/pgalloc.h
arch/powerpc/include/asm/book3s/64/radix.h
arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
arch/powerpc/include/asm/book3s/64/tlbflush.h
arch/powerpc/include/asm/book3s/pgalloc.h
arch/powerpc/include/asm/nohash/64/pgalloc.h
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/mmu_context_book3s64.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/pgtable_32.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/tlb-radix.c
arch/powerpc/platforms/512x/clock-commonclk.c
arch/powerpc/platforms/cell/spufs/coredump.c
arch/powerpc/platforms/pseries/iommu.c
arch/s390/include/asm/kvm_host.h
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kvm/intercept.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/pgalloc.c
arch/s390/mm/pgtable.c
arch/score/include/asm/pgalloc.h
arch/sh/include/asm/pgalloc.h
arch/sh/mm/pgtable.c
arch/sparc/include/asm/pgalloc_64.h
arch/sparc/mm/init_64.c
arch/tile/include/asm/thread_info.h
arch/tile/kernel/process.c
arch/tile/mm/pgtable.c
arch/um/kernel/mem.c
arch/unicore32/include/asm/pgalloc.h
arch/x86/Kconfig
arch/x86/boot/Makefile
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/intel-family.h [new file with mode: 0644]
arch/x86/include/asm/kprobes.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/pgalloc.h
arch/x86/include/asm/stacktrace.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/espfix_64.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/traps.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/mm/pgtable.c
arch/x86/platform/efi/efi_64.c
arch/x86/xen/mmu.c
arch/x86/xen/p2m.c
arch/xtensa/include/asm/pgalloc.h
block/blk-lib.c
block/blk-mq.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/hwregs.c
drivers/acpi/acpica/nsparse.c
drivers/acpi/bus.c
drivers/acpi/ec.c
drivers/acpi/internal.h
drivers/ata/libata-eh.c
drivers/base/Makefile
drivers/base/isa.c
drivers/base/module.c
drivers/base/power/opp/cpu.c
drivers/base/power/opp/of.c
drivers/base/power/opp/opp.h
drivers/block/aoe/aoecmd.c
drivers/block/nbd.c
drivers/block/xen-blkfront.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/clk/Kconfig
drivers/clk/microchip/clk-pic32mzda.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/pcc-cpufreq.c
drivers/devfreq/devfreq.c
drivers/devfreq/event/exynos-nocp.c
drivers/dma/at_xdmac.c
drivers/dma/mv_xor.c
drivers/edac/edac_mc.c
drivers/edac/sb_edac.c
drivers/extcon/extcon-palmas.c
drivers/firmware/efi/arm-init.c
drivers/gpio/Kconfig
drivers/gpio/gpio-104-dio-48e.c
drivers/gpio/gpio-104-idi-48.c
drivers/gpio/gpio-bcm-kona.c
drivers/gpio/gpio-zynq.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.h
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/include/amd_pcie.h
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h
drivers/gpu/drm/amd/include/atombios.h
drivers/gpu/drm/amd/include/cgs_common.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
drivers/gpu/drm/amd/powerplay/inc/smu74.h
drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
drivers/gpu/drm/amd/scheduler/sched_fence.c
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/bochs/bochs_mm.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
drivers/gpu/drm/bridge/dw-hdmi.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
drivers/gpu/drm/etnaviv/state_hi.xml.h
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_dp.c
drivers/gpu/drm/exynos/exynos_drm_core.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_iommu.c
drivers/gpu/drm/exynos/exynos_drm_iommu.h
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
drivers/gpu/drm/hisilicon/kirin/Kconfig
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
drivers/gpu/drm/i915/Kconfig.debug
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c [deleted file]
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_fence.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_pci.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_breadcrumbs.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_device_info.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_link_training.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_dpio_phy.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
drivers/gpu/drm/i915/intel_dsi_pll.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_fifo_underrun.c
drivers/gpu/drm/i915/intel_guc.h
drivers/gpu/drm/i915/intel_guc_loader.c
drivers/gpu/drm/i915/intel_gvt.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_modes.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sideband.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/imx/dw_hdmi-imx.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-drm.h
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/imx-tve.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/imx/ipuv3-plane.h
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_rd.c
drivers/gpu/drm/msm/msm_ringbuffer.c
drivers/gpu/drm/nouveau/include/nvif/cl0080.h
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_hwmon.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nv04_fbcon.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv50_fbcon.c
drivers/gpu/drm/nouveau/nvc0_fbcon.c
drivers/gpu/drm/nouveau/nvkm/core/subdev.c
drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/omapdrm/dss/hdmi5.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/sti/Kconfig
drivers/gpu/drm/sti/sti_awg_utils.c
drivers/gpu/drm/sti/sti_crtc.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/sti/sti_hdmi.h
drivers/gpu/drm/sti/sti_plane.c
drivers/gpu/drm/sti/sti_plane.h
drivers/gpu/drm/sti/sti_vtg.c
drivers/gpu/drm/sun4i/Kconfig
drivers/gpu/drm/sun4i/sun4i_backend.c
drivers/gpu/drm/sun4i/sun4i_dotclock.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun4i_rgb.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dpaux.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/dsi.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/tegra/hdmi.h
drivers/gpu/drm/tegra/output.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/tegra/sor.h
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_dpi.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vc4/vc4_qpu_defines.h
drivers/gpu/drm/vc4/vc4_regs.h
drivers/gpu/drm/vc4/vc4_validate.c
drivers/gpu/drm/vc4/vc4_validate_shaders.c
drivers/gpu/drm/virtio/virtgpu_ttm.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
drivers/gpu/host1x/cdma.c
drivers/gpu/host1x/channel.c
drivers/gpu/host1x/debug.c
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/dev.h
drivers/gpu/host1x/hw/cdma_hw.c
drivers/gpu/host1x/hw/channel_hw.c
drivers/gpu/host1x/hw/debug_hw.c
drivers/gpu/host1x/hw/intr_hw.c
drivers/gpu/host1x/hw/syncpt_hw.c
drivers/gpu/host1x/intr.c
drivers/gpu/host1x/intr.h
drivers/gpu/host1x/job.c
drivers/gpu/host1x/syncpt.c
drivers/gpu/host1x/syncpt.h
drivers/gpu/ipu-v3/ipu-dc.c
drivers/gpu/ipu-v3/ipu-di.c
drivers/gpu/ipu-v3/ipu-dmfc.c
drivers/hid/hid-elo.c
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hiddev.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/lm90.c
drivers/hwtracing/coresight/coresight-tmc-etr.c
drivers/hwtracing/coresight/coresight.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-octeon.c
drivers/i2c/muxes/i2c-mux-reg.c
drivers/iio/accel/st_accel_buffer.c
drivers/iio/accel/st_accel_core.c
drivers/iio/common/st_sensors/st_sensors_buffer.c
drivers/iio/common/st_sensors/st_sensors_core.c
drivers/iio/common/st_sensors/st_sensors_trigger.c
drivers/iio/dac/Kconfig
drivers/iio/dac/ad5592r-base.c
drivers/iio/gyro/st_gyro_buffer.c
drivers/iio/gyro/st_gyro_core.c
drivers/iio/humidity/am2315.c
drivers/iio/humidity/hdc100x.c
drivers/iio/imu/bmi160/bmi160_core.c
drivers/iio/industrialio-trigger.c
drivers/iio/light/apds9960.c
drivers/iio/light/bh1780.c
drivers/iio/light/max44000.c
drivers/iio/magnetometer/st_magn_buffer.c
drivers/iio/magnetometer/st_magn_core.c
drivers/iio/pressure/bmp280.c
drivers/iio/pressure/st_pressure_buffer.c
drivers/iio/pressure/st_pressure_core.c
drivers/iio/proximity/as3935.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/device.c
drivers/infiniband/core/iwpm_msg.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/hfi1/affinity.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/mad.c
drivers/infiniband/hw/hfi1/mad.h
drivers/infiniband/hw/hfi1/pio.c
drivers/infiniband/hw/hfi1/qsfp.c
drivers/infiniband/hw/hfi1/trace.c
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/hfi1/verbs_txreq.c
drivers/infiniband/hw/hfi1/verbs_txreq.h
drivers/infiniband/hw/i40iw/i40iw.h
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx4/ah.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/usnic/usnic_uiom.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/sw/rdmavt/vt.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/infiniband/ulp/srpt/ib_srpt.h
drivers/iommu/arm-smmu-v3.c
drivers/iommu/intel-iommu.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-mips-gic.c
drivers/leds/led-core.c
drivers/leds/trigger/ledtrig-heartbeat.c
drivers/mcb/mcb-core.c
drivers/media/usb/uvc/uvc_v4l2.c
drivers/media/v4l2-core/v4l2-mc.c
drivers/memory/omap-gpmc.c
drivers/misc/mei/client.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/eba.c
drivers/mtd/ubi/kapi.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_sriov.h
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/sfc/mcdi_port.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/nvme/host/pci.c
drivers/of/fdt.c
drivers/of/irq.c
drivers/of/of_reserved_mem.c
drivers/pci/vc.c
drivers/perf/arm_pmu.c
drivers/phy/phy-exynos-mipi-video.c
drivers/phy/phy-ti-pipe3.c
drivers/phy/phy-twl4030-usb.c
drivers/pinctrl/pinconf-generic.c
drivers/platform/x86/Kconfig
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/pwm/core.c
drivers/pwm/pwm-atmel-hlcdc.c
drivers/pwm/sysfs.c
drivers/regulator/qcom_smd-regulator.c
drivers/regulator/tps51632-regulator.c
drivers/scsi/53c700.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_error.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
drivers/staging/rtl8188eu/core/rtw_efuse.c
drivers/staging/rtl8188eu/hal/usb_halinit.c
drivers/thermal/cpu_cooling.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/dwc3/dwc3-st.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_printer.c
drivers/usb/gadget/function/f_tcm.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/storage_common.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/udc-core.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-msm.c
drivers/usb/host/ehci-tegra.c
drivers/usb/host/ohci-q.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/musb/omap2430.c
drivers/usb/musb/sunxi.c
drivers/usb/phy/phy-twl6030-usb.c
drivers/usb/serial/mos7720.c
drivers/usb/storage/uas.c
drivers/usb/usbip/vhci_hcd.c
drivers/watchdog/Kconfig
drivers/xen/balloon.c
drivers/xen/xen-pciback/conf_space.c
drivers/xen/xen-pciback/conf_space_header.c
fs/autofs4/autofs_i.h
fs/autofs4/expire.c
fs/autofs4/root.c
fs/autofs4/waitq.c
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/btrfs/check-integrity.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-inode.h
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/hash.c
fs/btrfs/hash.h
fs/btrfs/inode.c
fs/btrfs/ordered-data.c
fs/btrfs/super.c
fs/btrfs/tests/btrfs-tests.c
fs/btrfs/tests/btrfs-tests.h
fs/btrfs/tests/extent-buffer-tests.c
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/free-space-tests.c
fs/btrfs/tests/free-space-tree-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/tests/qgroup-tests.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/coredump.c
fs/dcache.c
fs/debugfs/file.c
fs/ecryptfs/kthread.c
fs/internal.h
fs/jbd2/journal.c
fs/libfs.c
fs/namei.c
fs/namespace.c
fs/nfsd/blocklayout.c
fs/nfsd/nfs2acl.c
fs/nfsd/nfs3acl.c
fs/nfsd/nfs4acl.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4state.c
fs/nfsd/state.h
fs/nilfs2/the_nilfs.c
fs/ocfs2/Makefile
fs/ocfs2/buffer_head_io.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/posix_acl.c
fs/proc/root.c
fs/reiserfs/super.c
fs/ubifs/file.c
fs/udf/partition.c
fs/udf/super.c
fs/udf/udf_sb.h
include/asm-generic/qspinlock.h
include/drm/bridge/analogix_dp.h
include/drm/drm_fb_cma_helper.h
include/drm/i915_pciids.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/linux/binfmts.h
include/linux/clk-provider.h
include/linux/cpuidle.h
include/linux/dcache.h
include/linux/efi.h
include/linux/iio/common/st_sensors.h
include/linux/init_task.h
include/linux/isa.h
include/linux/jump_label.h
include/linux/kasan.h
include/linux/leds.h
include/linux/mlx5/device.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/qp.h
include/linux/mlx5/vport.h
include/linux/mm.h
include/linux/of.h
include/linux/of_pci.h
include/linux/of_reserved_mem.h
include/linux/pinctrl/pinconf-generic.h
include/linux/pwm.h
include/linux/sched.h
include/linux/seqlock.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/svc_xprt.h
include/linux/sunrpc/xprt.h
include/linux/thermal.h
include/linux/usb/gadget.h
include/linux/usb/musb.h
include/media/v4l2-mc.h
include/net/compat.h
include/net/ip_vs.h
include/net/netfilter/nf_queue.h
include/net/netns/netfilter.h
include/net/pkt_cls.h
include/net/sch_generic.h
include/rdma/ib_verbs.h
include/rdma/rdma_vt.h
include/sound/hdmi-codec.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/i915_drm.h
include/uapi/drm/vc4_drm.h
include/uapi/linux/btrfs.h
include/uapi/linux/gtp.h
include/uapi/sound/Kbuild
include/video/imx-ipu-v3.h
init/main.c
kernel/events/core.c
kernel/fork.c
kernel/futex.c
kernel/jump_label.c
kernel/kcov.c
kernel/locking/mutex-debug.c
kernel/locking/mutex-debug.h
kernel/locking/mutex.c
kernel/locking/mutex.h
kernel/locking/qspinlock.c
kernel/power/process.c
kernel/relay.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/sched.h
kernel/sched/stats.h
kernel/trace/bpf_trace.c
kernel/trace/trace_printk.c
mm/compaction.c
mm/fadvise.c
mm/filemap.c
mm/hugetlb.c
mm/internal.h
mm/kasan/kasan.c
mm/kmemleak.c
mm/memcontrol.c
mm/memory.c
mm/mempool.c
mm/migrate.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_owner.c
mm/percpu.c
mm/shmem.c
mm/swap.c
mm/swap_state.c
net/bridge/br_fdb.c
net/compat.c
net/core/gen_stats.c
net/core/net-sysfs.c
net/ipv4/udp.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/nf_dup_ipv6.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/mac80211/mesh.c
net/mac80211/sta_info.h
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_irc.c
net/netfilter/nf_conntrack_sane.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_conntrack_tftp.c
net/netfilter/nf_queue.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_queue.c
net/netfilter/x_tables.c
net/packet/af_packet.c
net/rds/rds.h
net/rds/recv.c
net/rds/send.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/threads.c
net/rxrpc/rxkad.c
net/sched/act_police.c
net/sched/cls_flower.c
net/sched/cls_u32.c
net/sched/sch_drr.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sched/sch_hfsc.c
net/sched/sch_ingress.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_tbf.c
net/sunrpc/clnt.c
net/sunrpc/svc_xprt.c
net/sunrpc/xprtsock.c
net/tipc/netlink_compat.c
net/unix/af_unix.c
net/wireless/core.c
net/wireless/wext-core.c
scripts/mod/file2alias.c
security/keys/key.c
sound/drivers/dummy.c
sound/hda/hdac_regmap.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/hdmi-codec.c
tools/perf/util/data-convert-bt.c
tools/perf/util/event.c
tools/perf/util/symbol.c
tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
tools/testing/selftests/net/reuseport_bpf.c
tools/testing/selftests/vm/compaction_test.c
tools/virtio/ringtest/Makefile
tools/virtio/ringtest/README
tools/virtio/ringtest/noring.c [new file with mode: 0644]
tools/virtio/ringtest/run-on-all.sh
tools/vm/slabinfo.c
virt/kvm/kvm_main.c

index 08b80428f5837001940381417e0b1d0be1125b2c..52489f56406941d569b8f55ec2af9c70fbc89206 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -21,6 +21,7 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andy Adamson <andros@citi.umich.edu>
+Antoine Tenart <antoine.tenart@free-electrons.com>
 Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
 Archit Taneja <archit@ti.com>
 Arnaud Patard <arnaud.patard@rtp-net.org>
@@ -30,6 +31,9 @@ Axel Lin <axel.lin@gmail.com>
 Ben Gardner <bgardner@wabtec.com>
 Ben M Cahill <ben.m.cahill@intel.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
+Boris Brezillon <boris.brezillon@free-electrons.com>
+Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon.dev@gmail.com>
+Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon@overkiz.com>
 Brian Avery <b.avery@hp.com>
 Brian King <brking@us.ibm.com>
 Christoph Hellwig <hch@lst.de>
@@ -89,6 +93,7 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com>
 Linas Vepstas <linas@austin.ibm.com>
 Mark Brown <broonie@sirena.org.uk>
 Matthieu CASTET <castet.matthieu@free.fr>
+Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com>
 Mayuresh Janorkar <mayur@ti.com>
 Michael Buesch <m@bues.ch>
 Michel Dänzer <michel@tungstengraphics.com>
@@ -122,6 +127,7 @@ Santosh Shilimkar <santosh.shilimkar@oracle.org>
 Sascha Hauer <s.hauer@pengutronix.de>
 S.ÇaÄŸlar Onur <caglar@pardus.org.tr>
 Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
+Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com> <shuah.khan@hp.com> <shuahkh@osg.samsung.com> <shuah.kh@samsung.com>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
 Stephen Hemminger <shemminger@osdl.org>
diff --git a/CREDITS b/CREDITS
index 0f0bf22afe0c469507bab4ef1dfbc13cac386c25..2a3fbcd229e666608c66ff1c0ad9b5a3ac0b58dd 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -649,6 +649,7 @@ D: Configure, Menuconfig, xconfig
 
 N: Mauro Carvalho Chehab
 E: m.chehab@samsung.org
+E: mchehab@osg.samsung.com
 E: mchehab@infradead.org
 D: Media subsystem (V4L/DVB) drivers and core
 D: EDAC drivers and EDAC 3.0 core rework
index 2f4a0051b32d3bb7feb5552caba934d8b2f214d7..1ba0d0fda9c0b5cac32a0169790b5c87f819f808 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/uvc.name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   UVC function directory
 
                streaming_maxburst      - 0..15 (ss only)
@@ -9,37 +9,37 @@ Description:  UVC function directory
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Control descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/class
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/class/ss
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Super speed control class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/class/fs
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Full speed control class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Output terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output/default
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Default output terminal descriptors
 
                All attributes read only:
@@ -53,12 +53,12 @@ Description:        Default output terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Camera terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera/default
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Default camera terminal descriptors
 
                All attributes read only:
@@ -75,12 +75,12 @@ Description:        Default camera terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/processing
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Processing unit descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/processing/default
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Default processing unit descriptors
 
                All attributes read only:
@@ -94,49 +94,49 @@ Description:        Default processing unit descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/header
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Control header descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/header/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific control header descriptors
 
 dwClockFrequency
 bcdUVC
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Streaming descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/class
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Streaming class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/class/ss
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Super speed streaming class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/class/hs
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   High speed streaming class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/class/fs
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Full speed streaming class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Color matching descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching/default
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Default color matching descriptors
 
                All attributes read only:
@@ -150,12 +150,12 @@ Description:      Default color matching descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   MJPEG format descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific MJPEG format descriptors
 
                All attributes read only,
@@ -174,7 +174,7 @@ Description:        Specific MJPEG format descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific MJPEG frame descriptors
 
                dwFrameInterval         - indicates how frame interval can be
@@ -196,12 +196,12 @@ Description:      Specific MJPEG frame descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Uncompressed format descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific uncompressed format descriptors
 
                bmaControls             - this format's data for bmaControls in
@@ -221,7 +221,7 @@ Description:        Specific uncompressed format descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific uncompressed frame descriptors
 
                dwFrameInterval         - indicates how frame interval can be
@@ -243,12 +243,12 @@ Description:      Specific uncompressed frame descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/header
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Streaming header descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/header/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific streaming header descriptors
 
                All attributes read only:
index 6708c5e264aa81875d0eec9c556b8b14429d4c49..33e96f74063925fb253b7ac16c73f43b76c334c8 100644 (file)
@@ -1,4 +1,4 @@
-What           /sys/bus/iio/devices/iio:deviceX/in_proximity_raw
+What           /sys/bus/iio/devices/iio:deviceX/in_proximity_input
 Date:          March 2014
 KernelVersion: 3.15
 Contact:       Matt Ranostay <mranostay@gmail.com>
index 4f2ba8c13d9259c528322070790f92da84b3bb58..4a0f4f7682ad4a742f32e66125a424e5dc6837f7 100644 (file)
@@ -5,6 +5,7 @@ Required properties for dp-controller:
                platform specific such as:
                 * "samsung,exynos5-dp"
                 * "rockchip,rk3288-dp"
+                * "rockchip,rk3399-edp"
        -reg:
                physical base address of the controller and length
                of memory mapped region.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt b/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt
new file mode 100644 (file)
index 0000000..b9877ac
--- /dev/null
@@ -0,0 +1,7 @@
+LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "lg,lp079qx1-sp0v"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt b/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt
new file mode 100644 (file)
index 0000000..4214151
--- /dev/null
@@ -0,0 +1,7 @@
+LG 9.7" (2048x1536 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "lg,lp097qx1-spa1"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt b/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt
new file mode 100644 (file)
index 0000000..dba298b
--- /dev/null
@@ -0,0 +1,7 @@
+Samsung 12.2" (2560x1600 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "samsung,lsn122dl01-c01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt
new file mode 100644 (file)
index 0000000..4aff25b
--- /dev/null
@@ -0,0 +1,7 @@
+Sharp Display Corp. LQ101K1LY04 10.07" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,lq101k1ly04"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt
new file mode 100644 (file)
index 0000000..bcb0e8a
--- /dev/null
@@ -0,0 +1,7 @@
+Sharp 12.3" (2400x1600 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,lq123p1jx31"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt b/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt
new file mode 100644 (file)
index 0000000..1e87fe6
--- /dev/null
@@ -0,0 +1,7 @@
+Starry 12.2" (1920x1200 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "starry,kr122ea0sra"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
index e832ff98fd61c1e527ac40a92cf80f1df0e20594..01cced1c2a180cfe4307cc5cfb0f70b8616f3d81 100644 (file)
@@ -2,7 +2,8 @@ Rockchip RK3288 specific extensions to the Analogix Display Port
 ================================
 
 Required properties:
-- compatible: "rockchip,rk3288-edp";
+- compatible: "rockchip,rk3288-dp",
+             "rockchip,rk3399-edp";
 
 - reg: physical base address of the controller and length
 
@@ -27,6 +28,12 @@ Required properties:
     Port 0: contained 2 endpoints, connecting to the output of vop.
     Port 1: contained 1 endpoint, connecting to the input of panel.
 
+Optional property for different chips:
+- clocks: from common clock binding: handle to grf_vio clock.
+
+- clock-names: from common clock binding:
+              Required elements: "grf"
+
 For the below properties, please refer to Analogix DP binding document:
  * Documentation/devicetree/bindings/drm/bridge/analogix_dp.txt
 - phys (required)
index a3bd8c050c4ef2f84fcdc29f9325006af14ff01b..0fad7ed2ea191201af1a136d1fee19bd94e67ff2 100644 (file)
@@ -208,6 +208,7 @@ of the following host1x client modules:
     See ../clocks/clock-bindings.txt for details.
   - clock-names: Must include the following entries:
     - sor: clock input for the SOR hardware
+    - source: source clock for the SOR clock
     - parent: input for the pixel clock
     - dp: reference clock for the SOR clock
     - safe: safe reference for the SOR clock during power up
@@ -226,9 +227,9 @@ of the following host1x client modules:
   - nvidia,dpaux: phandle to a DispayPort AUX interface
 
 - dpaux: DisplayPort AUX interface
-  - compatible: For Tegra124, must contain "nvidia,tegra124-dpaux".  Otherwise,
-    must contain '"nvidia,<chip>-dpaux", "nvidia,tegra124-dpaux"', where
-    <chip> is tegra132.
+  - compatible : Should contain one of the following:
+    - "nvidia,tegra124-dpaux": for Tegra124 and Tegra132
+    - "nvidia,tegra210-dpaux": for Tegra210
   - reg: Physical base address and length of the controller's registers.
   - interrupts: The interrupt outputs from the controller.
   - clocks: Must contain an entry for each entry in clock-names.
@@ -241,6 +242,12 @@ of the following host1x client modules:
   - reset-names: Must include the following entries:
     - dpaux
   - vdd-supply: phandle of a supply that powers the DisplayPort link
+  - i2c-bus: Subnode where I2C slave devices are listed. This subnode
+    must be always present. If there are no I2C slave devices, an empty
+    node should be added. See ../../i2c/i2c.txt for more information.
+
+  See ../pinctrl/nvidia,tegra124-dpaux-padctl.txt for information
+  regarding the DPAUX pad controller bindings.
 
 Example:
 
index 9bcd5e87830d23882fa930c3b5214a94252f7957..02af0d94e9217d19e8bbec945ec10e338b6eeafd 100644 (file)
@@ -7,6 +7,7 @@ Required properties:
        - "ti,ina220" for ina220
        - "ti,ina226" for ina226
        - "ti,ina230" for ina230
+       - "ti,ina231" for ina231
 - reg: I2C address
 
 Optional properties:
index bfeabb843941e0c74f509f25a96e15f3daffa711..71191ff0e7814c958502fbc7ee79c6d0f71c9b02 100644 (file)
@@ -44,8 +44,8 @@ Required properties:
 - our-claim-gpio: The GPIO that we use to claim the bus.
 - their-claim-gpios: The GPIOs that the other sides use to claim the bus.
   Note that some implementations may only support a single other master.
-- Standard I2C mux properties. See mux.txt in this directory.
-- Single I2C child bus node at reg 0. See mux.txt in this directory.
+- Standard I2C mux properties. See i2c-mux.txt in this directory.
+- Single I2C child bus node at reg 0. See i2c-mux.txt in this directory.
 
 Optional properties:
 - slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us.
index 6078aefe7ed481bb9474623ac2dc67d8a0fda7f8..7ce23ac6130808da6a7f621f1991b13f17f185d6 100644 (file)
@@ -27,7 +27,8 @@ Required properties:
 - i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C
                parents.
 
-Furthermore, I2C mux properties and child nodes. See mux.txt in this directory.
+Furthermore, I2C mux properties and child nodes. See i2c-mux.txt in this
+directory.
 
 Example:
 
index 66709a8255413417231608568ed76b7966e25e0b..21da3ecbb3700c06bc80cb3685ee31e56bc1471b 100644 (file)
@@ -22,8 +22,8 @@ Required properties:
 - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
   port is connected to.
 - mux-gpios: list of gpios used to control the muxer
-* Standard I2C mux properties. See mux.txt in this directory.
-* I2C child bus nodes. See mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory.
 
 Optional properties:
 - idle-state: value to set the muxer to when idle. When no value is
@@ -33,7 +33,7 @@ For each i2c child node, an I2C child bus will be created. They will
 be numbered based on their order in the device tree.
 
 Whenever an access is made to a device on a child bus, the value set
-in the revelant node's reg property will be output using the list of
+in the relevant node's reg property will be output using the list of
 GPIOs, the first in the list holding the least-significant value.
 
 If an idle state is defined, using the idle-state (optional) property,
index ae8af1694e950e9df436bee3143a6c0ca207c3b9..33119a98e1447681f5dbb04eb55df352ede95d6d 100644 (file)
@@ -28,9 +28,9 @@ Also required are:
 * Standard pinctrl properties that specify the pin mux state for each child
   bus. See ../pinctrl/pinctrl-bindings.txt.
 
-* Standard I2C mux properties. See mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
 
-* I2C child bus nodes. See mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory.
 
 For each named state defined in the pinctrl-names property, an I2C child bus
 will be created. I2C child bus numbers are assigned based on the index into
index 688783fbe696bebfc7daa9796ef299bcd108aa9c..de00d7fc450b005333e874b4b831a5585594d50b 100644 (file)
@@ -7,8 +7,8 @@ Required properties:
 - compatible: i2c-mux-reg
 - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
   port is connected to.
-* Standard I2C mux properties. See mux.txt in this directory.
-* I2C child bus nodes. See mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory.
 
 Optional properties:
 - reg: this pair of <offset size> specifies the register to control the mux.
@@ -24,7 +24,7 @@ Optional properties:
   given, it defaults to the last value used.
 
 Whenever an access is made to a device on a child bus, the value set
-in the revelant node's reg property will be output to the register.
+in the relevant node's reg property will be output to the register.
 
 If an idle state is defined, using the idle-state (optional) property,
 whenever an access is not being made to a device on a child bus, the
index 14aa6cf58201e4b92b191d0af535bc0d60aa66c1..6a9a63cb054304aa2a367b6381c116aadf0979f9 100644 (file)
@@ -13,10 +13,10 @@ Optional properties:
                      initialization. This is an array of 28 values(u8).
 
   - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip.
-                       firmware will use the pin to wakeup host system.
+                       firmware will use the pin to wakeup host system (u16).
   - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host
                      platform. The value will be configured to firmware. This
-                     is needed to work chip's sleep feature as expected.
+                     is needed to work chip's sleep feature as expected (u16).
   - interrupt-parent: phandle of the parent interrupt controller
   - interrupts : interrupt pin number to the cpu. Driver will request an irq based
                 on this interrupt number. During system suspend, the irq will be
@@ -50,7 +50,7 @@ calibration data is also available in below example.
                        0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
                        0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
                        0x00 0x00 0xf0 0x00>;
-               marvell,wakeup-pin = <0x0d>;
-               marvell,wakeup-gap-ms = <0x64>;
+               marvell,wakeup-pin = /bits/ 16 <0x0d>;
+               marvell,wakeup-gap-ms = /bits/ 16 <0x64>;
        };
 };
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt
new file mode 100644 (file)
index 0000000..f2abdae
--- /dev/null
@@ -0,0 +1,60 @@
+Device tree binding for NVIDIA Tegra DPAUX pad controller
+========================================================
+
+The Tegra Display Port Auxiliary (DPAUX) pad controller manages two pins
+which can be assigned to either the DPAUX channel or to an I2C
+controller.
+
+This document defines the device-specific binding for the DPAUX pad
+controller. Refer to pinctrl-bindings.txt in this directory for generic
+information about pin controller device tree bindings. Please refer to
+the binding document ../display/tegra/nvidia,tegra20-host1x.txt for more
+details on the DPAUX binding.
+
+Pin muxing:
+-----------
+
+Child nodes contain the pinmux configurations following the conventions
+from the pinctrl-bindings.txt document.
+
+Since only three configurations are possible, only three child nodes are
+needed to describe the pin mux'ing options for the DPAUX pads.
+Furthermore, given that the pad functions are only applicable to a
+single set of pads, the child nodes only need to describe the pad group
+the functions are being applied to rather than the individual pads.
+
+Required properties:
+- groups: Must be "dpaux-io"
+- function: Must be either "aux", "i2c" or "off".
+
+Example:
+--------
+
+       dpaux@545c0000 {
+               ...
+
+               state_dpaux_aux: pinmux-aux {
+                       groups = "dpaux-io";
+                       function = "aux";
+               };
+
+               state_dpaux_i2c: pinmux-i2c {
+                       groups = "dpaux-io";
+                       function = "i2c";
+               };
+
+               state_dpaux_off: pinmux-off {
+                       groups = "dpaux-io";
+                       function = "off";
+               };
+       };
+
+       ...
+
+       i2c@7000d100 {
+               ...
+               pinctrl-0 = <&state_dpaux_i2c>;
+               pinctrl-1 = <&state_dpaux_off>;
+               pinctrl-names = "default", "idle";
+               status = "disabled";
+       };
index a7440bcd67ffc9ac43afe39564cb434ce8e58bc6..44722769209f13c8457fd9adbd09eec7696643c1 100644 (file)
@@ -247,6 +247,7 @@ sony        Sony Corporation
 spansion       Spansion Inc.
 sprd   Spreadtrum Communications Inc.
 st     STMicroelectronics
+starry Starry Electronic Technology (ShenZhen) Co., LTD
 startek        Startek
 ste    ST-Ericsson
 stericsson     ST-Ericsson
@@ -255,6 +256,7 @@ synology    Synology, Inc.
 SUNW   Sun Microsystems, Inc
 tbs    TBS Technologies
 tcl    Toby Churchill Ltd.
+technexion     TechNexion
 technologic    Technologic Systems
 thine  THine Electronics, Inc.
 ti     Texas Instruments
@@ -269,6 +271,7 @@ tronsmart   Tronsmart
 truly  Truly Semiconductors Limited
 tyan   Tyan Computer Corporation
 upisemi        uPI Semiconductor Corp.
+uniwest        United Western Technologies Corp (UniWest)
 urt    United Radiant Technology Corporation
 usi    Universal Scientific Industrial Co., Ltd.
 v3     V3 Semiconductor
index d406d98339b25abbe3d1f5f05c4ac7f9856d3e9c..44f5e6bccd978fbe2329cdb39af87494aaafc09c 100644 (file)
@@ -74,8 +74,8 @@ blink_set() function (see <linux/leds.h>). To set an LED to blinking,
 however, it is better to use the API function led_blink_set(), as it
 will check and implement software fallback if necessary.
 
-To turn off blinking again, use the API function led_brightness_set()
-as that will not just set the LED brightness but also stop any software
+To turn off blinking, use the API function led_brightness_set()
+with brightness value LED_OFF, which should stop any software
 timers that may have been required for blinking.
 
 The blink_set() function should choose a user friendly blinking value
index 8638f61c8c9db70d767302497d417be6c9a0a030..37eca00796eeebf805afbbf1504753c1a0febec0 100644 (file)
@@ -263,19 +263,23 @@ scmd->allowed.
 
  3. scmd recovered
     ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
-       - shost->host_failed--
        - clear scmd->eh_eflags
        - scsi_setup_cmd_retry()
        - move from local eh_work_q to local eh_done_q
     LOCKING: none
+    CONCURRENCY: at most one thread per separate eh_work_q to
+                keep queue manipulation lockless
 
  4. EH completes
     ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
-           layer of failure.
+           layer of failure. May be called concurrently but must have
+           a no more than one thread per separate eh_work_q to
+           manipulate the queue locklessly
        - scmd is removed from eh_done_q and scmd->eh_entry is cleared
        - if retry is necessary, scmd is requeued using
           scsi_queue_insert()
        - otherwise, scsi_finish_command() is invoked for scmd
+       - zero shost->host_failed
     LOCKING: queue or finish function performs appropriate locking
 
 
index 5f782ac305db9de1fc12160c3a1f55128b16bba1..5351c4be3aa2b3d9c3dbeced4e96794cc8bdd534 100644 (file)
@@ -1167,6 +1167,7 @@ F:        arch/arm/mach-footbridge/
 ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
 M:     Shawn Guo <shawnguo@kernel.org>
 M:     Sascha Hauer <kernel@pengutronix.de>
+R:     Fabio Estevam <fabio.estevam@nxp.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
@@ -2250,7 +2251,8 @@ F:        include/net/ax25.h
 F:     net/ax25/
 
 AZ6007 DVB DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -2717,7 +2719,8 @@ F:        Documentation/filesystems/btrfs.txt
 F:     fs/btrfs/
 
 BTTV VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -2781,9 +2784,9 @@ F:        include/net/caif/
 F:     net/caif/
 
 CALGARY x86-64 IOMMU
-M:     Muli Ben-Yehuda <muli@il.ibm.com>
-M:     "Jon D. Mason" <jdmason@kudzu.us>
-L:     discuss@x86-64.org
+M:     Muli Ben-Yehuda <mulix@mulix.org>
+M:     Jon Mason <jdmason@kudzu.us>
+L:     iommu@lists.linux-foundation.org
 S:     Maintained
 F:     arch/x86/kernel/pci-calgary_64.c
 F:     arch/x86/kernel/tce_64.c
@@ -3094,6 +3097,7 @@ M:        Stephen Boyd <sboyd@codeaurora.org>
 L:     linux-clk@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
 S:     Maintained
+F:     Documentation/devicetree/bindings/clock/
 F:     drivers/clk/
 X:     drivers/clk/clkdev.c
 F:     include/linux/clk-pr*
@@ -3351,7 +3355,8 @@ S:        Maintained
 F:     drivers/media/dvb-frontends/cx24120*
 
 CX88 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -3792,6 +3797,7 @@ Q:        https://patchwork.kernel.org/project/linux-dmaengine/list/
 S:     Maintained
 F:     drivers/dma/
 F:     include/linux/dmaengine.h
+F:     Documentation/devicetree/bindings/dma/
 F:     Documentation/dmaengine/
 T:     git git://git.infradead.org/users/vkoul/slave-dma.git
 
@@ -4328,7 +4334,8 @@ F:        fs/ecryptfs/
 EDAC-CORE
 M:     Doug Thompson <dougthompson@xmission.com>
 M:     Borislav Petkov <bp@alien8.de>
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
@@ -4373,7 +4380,8 @@ S:        Maintained
 F:     drivers/edac/e7xxx_edac.c
 
 EDAC-GHES
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/ghes_edac.c
@@ -4397,19 +4405,22 @@ S:      Maintained
 F:     drivers/edac/i5000_edac.c
 
 EDAC-I5400
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/i5400_edac.c
 
 EDAC-I7300
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/i7300_edac.c
 
 EDAC-I7CORE
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/i7core_edac.c
@@ -4446,7 +4457,8 @@ S:        Maintained
 F:     drivers/edac/r82600_edac.c
 
 EDAC-SBRIDGE
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/sb_edac.c
@@ -4505,7 +4517,8 @@ S:        Maintained
 F:     drivers/net/ethernet/ibm/ehea/
 
 EM28XX VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -6524,6 +6537,7 @@ F:        include/uapi/linux/sunrpc/
 
 KERNEL SELFTEST FRAMEWORK
 M:     Shuah Khan <shuahkh@osg.samsung.com>
+M:     Shuah Khan <shuah@kernel.org>
 L:     linux-kselftest@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/shuah/linux-kselftest
 S:     Maintained
@@ -7395,7 +7409,8 @@ S:        Supported
 F:     drivers/media/pci/netup_unidvb/*
 
 MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 P:     LinuxTV.org Project
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
@@ -8046,6 +8061,7 @@ Q:        http://patchwork.kernel.org/project/linux-wireless/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
 S:     Maintained
+F:     Documentation/devicetree/bindings/net/wireless/
 F:     drivers/net/wireless/
 
 NETXEN (1/10) GbE SUPPORT
@@ -8443,10 +8459,9 @@ F:       drivers/i2c/busses/i2c-ocores.c
 OPEN FIRMWARE AND FLATTENED DEVICE TREE
 M:     Rob Herring <robh+dt@kernel.org>
 M:     Frank Rowand <frowand.list@gmail.com>
-M:     Grant Likely <grant.likely@linaro.org>
 L:     devicetree@vger.kernel.org
 W:     http://www.devicetree.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
 S:     Maintained
 F:     drivers/of/
 F:     include/linux/of*.h
@@ -8454,12 +8469,10 @@ F:      scripts/dtc/
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
 M:     Rob Herring <robh+dt@kernel.org>
-M:     Pawel Moll <pawel.moll@arm.com>
 M:     Mark Rutland <mark.rutland@arm.com>
-M:     Ian Campbell <ijc+devicetree@hellion.org.uk>
-M:     Kumar Gala <galak@codeaurora.org>
 L:     devicetree@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
+Q:     http://patchwork.ozlabs.org/project/devicetree-bindings/list/
 S:     Maintained
 F:     Documentation/devicetree/
 F:     arch/*/boot/dts/
@@ -9891,7 +9904,8 @@ S:        Odd Fixes
 F:     drivers/media/i2c/saa6588*
 
 SAA7134 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -10410,7 +10424,8 @@ S:      Maintained
 F:     drivers/media/radio/si4713/radio-usb-si4713.c
 
 SIANO DVB DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -11176,7 +11191,8 @@ S:      Maintained
 F:     drivers/media/i2c/tda9840*
 
 TEA5761 TUNER DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -11184,7 +11200,8 @@ S:      Odd fixes
 F:     drivers/media/tuners/tea5761.*
 
 TEA5767 TUNER DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -11571,7 +11588,8 @@ F:      include/linux/shmem_fs.h
 F:     mm/shmem.c
 
 TM6000 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -11925,7 +11943,8 @@ F:      drivers/usb/common/usb-otg-fsm.c
 
 USB OVER IP DRIVER
 M:     Valentina Manea <valentina.manea.m@gmail.com>
-M:     Shuah Khan <shuah.kh@samsung.com>
+M:     Shuah Khan <shuahkh@osg.samsung.com>
+M:     Shuah Khan <shuah@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 F:     Documentation/usb/usbip_protocol.txt
@@ -11996,6 +12015,7 @@ L:      linux-usb@vger.kernel.org
 W:     http://www.linux-usb.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
 S:     Supported
+F:     Documentation/devicetree/bindings/usb/
 F:     Documentation/usb/
 F:     drivers/usb/
 F:     include/linux/usb.h
@@ -12169,6 +12189,7 @@ VIRTIO CORE, NET AND BLOCK DRIVERS
 M:     "Michael S. Tsirkin" <mst@redhat.com>
 L:     virtualization@lists.linux-foundation.org
 S:     Maintained
+F:     Documentation/devicetree/bindings/virtio/
 F:     drivers/virtio/
 F:     tools/virtio/
 F:     drivers/net/virtio_net.c
@@ -12557,7 +12578,8 @@ S:      Maintained
 F:     arch/x86/entry/vdso/
 
 XC2028/3028 TUNER DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
index 801457b847a43e7bc3e9d5f62a6c9b3d2d64c0da..4acefc10c13aa587bf640d0140af2d3ee98f07e7 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc5
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
index d794384a0404d2e9b1d1e9c3f6a966ca877c647c..15996290fed4a920f22e2687cb77944e9c3b3cfc 100644 (file)
@@ -226,8 +226,8 @@ config ARCH_INIT_TASK
 config ARCH_TASK_STRUCT_ALLOCATOR
        bool
 
-# Select if arch has its private alloc_thread_info() function
-config ARCH_THREAD_INFO_ALLOCATOR
+# Select if arch has its private alloc_thread_stack() function
+config ARCH_THREAD_STACK_ALLOCATOR
        bool
 
 # Select if arch wants to size task_struct dynamically via arch_task_struct_size:
@@ -606,6 +606,9 @@ config HAVE_ARCH_HASH
          file which provides platform-specific implementations of some
          functions in <linux/hash.h> or fs/namei.c.
 
+config ISA_BUS_API
+       def_bool ISA
+
 #
 # ABI hall of shame
 #
index aab14a019c20f82c7cc60084026b120727971214..c2ebb6f36c9d3358699eac1ebb5f9dfad081b68f 100644 (file)
@@ -40,7 +40,7 @@ pgd_free(struct mm_struct *mm, pgd_t *pgd)
 static inline pmd_t *
 pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return ret;
 }
 
@@ -53,7 +53,7 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
 static inline pte_t *
 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return pte;
 }
 
index 0dcbacfdea4b98ca7805624a5ffb59b9298c4992..0d3e59f56974816f71140e2d0c51245f92a33768 100644 (file)
@@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
 config ARCH_DISCONTIGMEM_ENABLE
-       def_bool y
+       def_bool n
 
 config ARCH_FLATMEM_ENABLE
        def_bool y
@@ -186,9 +186,6 @@ if SMP
 config ARC_HAS_COH_CACHES
        def_bool n
 
-config ARC_HAS_REENTRANT_IRQ_LV2
-       def_bool n
-
 config ARC_MCIP
        bool "ARConnect Multicore IP (MCIP) Support "
        depends on ISA_ARCV2
@@ -366,25 +363,10 @@ config NODES_SHIFT
 if ISA_ARCOMPACT
 
 config ARC_COMPACT_IRQ_LEVELS
-       bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+       bool "Setup Timer IRQ as high Priority"
        default n
-       # Timer HAS to be high priority, for any other high priority config
-       select ARC_IRQ3_LV2
        # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
-       depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
-
-if ARC_COMPACT_IRQ_LEVELS
-
-config ARC_IRQ3_LV2
-       bool
-
-config ARC_IRQ5_LV2
-       bool
-
-config ARC_IRQ6_LV2
-       bool
-
-endif  #ARC_COMPACT_IRQ_LEVELS
+       depends on !SMP
 
 config ARC_FPU_SAVE_RESTORE
        bool "Enable FPU state persistence across context switch"
@@ -407,11 +389,6 @@ config ARC_HAS_LLSC
        default y
        depends on !ARC_CANT_LLSC
 
-config ARC_STAR_9000923308
-       bool "Workaround for llock/scond livelock"
-       default n
-       depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
-
 config ARC_HAS_SWAPE
        bool "Insn: SWAPE (endian-swap)"
        default y
@@ -471,7 +448,7 @@ config LINUX_LINK_BASE
 
 config HIGHMEM
        bool "High Memory Support"
-       select DISCONTIGMEM
+       select ARCH_DISCONTIGMEM_ENABLE
        help
          With ARC 2G:2G address split, only upper 2G is directly addressable by
          kernel. Enable this to potentially allow access to rest of 2G and PAE
index 02fabef2891cd8bf1a6eda36bcf93549eea16158..d4df6be66d583d8708585013c07020fbde28b658 100644 (file)
@@ -127,7 +127,7 @@ libs-y              += arch/arc/lib/ $(LIBGCC)
 
 boot           := arch/arc/boot
 
-#default target for make without any arguements.
+#default target for make without any arguments.
 KBUILD_IMAGE   := bootpImage
 
 all:   $(KBUILD_IMAGE)
index 3942634f805ade1545d30dd066d2ba88cdec3cac..02410b2114334466572c05e651b6227b02e415f0 100644 (file)
@@ -23,8 +23,6 @@
 
 
 / {
-       clock-frequency         = <500000000>;  /* 500 MHZ */
-
        soc100 {
                bus-frequency   = <166666666>;
 
index b0467229a5c45e9c10eb1cc2884b84b33ef2e603..f9e7686044ebee0a4c49e1d4824810e49747256e 100644 (file)
@@ -23,8 +23,6 @@
 
 
 / {
-       clock-frequency         = <500000000>;  /* 500 MHZ */
-
        soc100 {
                bus-frequency   = <166666666>;
 
index 3e02f152edcb5eac50870720be8ffe626cb1b1b6..6ae2c476ad825aee57fadb1e557b95a0bb082647 100644 (file)
@@ -15,7 +15,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <750000000>;  /* 750 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
 
index 378e455a94c4a86aa2af9a44a4d83c2cbbf771c2..14df46f141bf3450b8df0660bc0f248beef4669b 100644 (file)
@@ -14,7 +14,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <90000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index 64c94b2860ab569d03d4672db417ad1e3307f86a..3d6cfa32bf5142a50ff95b6f7e69871526172deb 100644 (file)
@@ -14,7 +14,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <90000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index b89f6c3eb35208e2686cf73017a08686a6cfad5e..1e0d225791c1250252e76697187a7d0ba8eb26ec 100644 (file)
@@ -18,7 +18,6 @@
 
 / {
        compatible = "ezchip,arc-nps";
-       clock-frequency = <83333333>;   /* 83.333333 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&intc>;
index 5d5e373e0ebc579c7f734d42212965c8187fa003..63970513e4aee2ceaadc2265eae1393115a0d2b6 100644 (file)
@@ -11,7 +11,6 @@
 
 / {
        compatible = "snps,nsim";
-       clock-frequency = <80000000>;   /* 80 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&core_intc>;
index 37c416defe9031cc824e0d354b6d68f66992e4f2..e659a340ca8a7836735bc32bf5cc09c5e4b5907f 100644 (file)
@@ -11,7 +11,6 @@
 
 / {
        compatible = "snps,nsimosci";
-       clock-frequency = <20000000>;   /* 20 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&core_intc>;
index f2a22c49541d065ea7810bfc3fca714c67d6e4a9..16ce5d65cfded4ac545eb2020604bc183d89d015 100644 (file)
@@ -11,7 +11,6 @@
 
 / {
        compatible = "snps,nsimosci_hs";
-       clock-frequency = <20000000>;   /* 20 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&core_intc>;
index 34457a5e7706b9ce847394e0f59b7c37a152e784..ce8dfbc30c4d1b67d55de644131769599a3e8ace 100644 (file)
@@ -11,7 +11,6 @@
 
 / {
        compatible = "snps,nsimosci_hs";
-       clock-frequency = <5000000>;    /* 5 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&core_intc>;
index 3a10cc633e2b0e200b8eacfd16ff99e56a718652..65808fe0a290be15ded0903ad347540d0f35a52e 100644 (file)
@@ -13,7 +13,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <80000000>;   /* 80 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        chosen { };
index 71fd308a9298aac6435d67e6863314d26e140396..2dfe8037dfbb34ac680597619ecef27319bed1a2 100644 (file)
@@ -8,7 +8,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <80000000>;   /* 80 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        chosen { };
index d1cb25a6698952b461925d5a361223c8bc3c5080..4c11079f3565a3decc7f6401a92950df16565411 100644 (file)
@@ -8,7 +8,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <80000000>;   /* 80 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        chosen { };
index ad4ee43bd2ac7700ee93d1c05ee7c84e474c4829..0fd6ba985b164b7752c26544e5d4b6d9684c88be 100644 (file)
@@ -14,7 +14,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <50000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index a3cb6263c581ea8a030bad036ec1ddebfc2b25f9..82214cd7ba0ca7406483ad80ad0cac88425b7a68 100644 (file)
@@ -15,7 +15,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <50000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index 5f3dcbbc0cc9c14df199c55ff9e916b64be3be0d..dd683995bc9d119136807adcbf71b189714fe359 100644 (file)
 
 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
-#ifdef CONFIG_ARC_STAR_9000923308
-
-#define SCOND_FAIL_RETRY_VAR_DEF                                               \
-       unsigned int delay = 1, tmp;                                            \
-
-#define SCOND_FAIL_RETRY_ASM                                                   \
-       "       bz      4f                      \n"                             \
-       "   ; --- scond fail delay ---          \n"                             \
-       "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
-       "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
-       "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
-       "       rol     %[delay], %[delay]      \n"     /* delay *= 2 */        \
-       "       b       1b                      \n"     /* start over */        \
-       "4: ; --- success ---                   \n"                             \
-
-#define SCOND_FAIL_RETRY_VARS                                                  \
-         ,[delay] "+&r" (delay),[tmp] "=&r"    (tmp)                           \
-
-#else  /* !CONFIG_ARC_STAR_9000923308 */
-
-#define SCOND_FAIL_RETRY_VAR_DEF
-
-#define SCOND_FAIL_RETRY_ASM                                                   \
-       "       bnz     1b                      \n"                             \
-
-#define SCOND_FAIL_RETRY_VARS
-
-#endif
-
 #define ATOMIC_OP(op, c_op, asm_op)                                    \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
-       unsigned int val;                                               \
-       SCOND_FAIL_RETRY_VAR_DEF                                        \
+       unsigned int val;                                               \
                                                                        \
        __asm__ __volatile__(                                           \
        "1:     llock   %[val], [%[ctr]]                \n"             \
        "       " #asm_op " %[val], %[val], %[i]        \n"             \
        "       scond   %[val], [%[ctr]]                \n"             \
-       "                                               \n"             \
-       SCOND_FAIL_RETRY_ASM                                            \
-                                                                       \
+       "       bnz     1b                              \n"             \
        : [val] "=&r"   (val) /* Early clobber to prevent reg reuse */  \
-         SCOND_FAIL_RETRY_VARS                                         \
        : [ctr] "r"     (&v->counter), /* Not "m": llock only supports reg direct addr mode */  \
          [i]   "ir"    (i)                                             \
        : "cc");                                                        \
@@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v)                    \
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
 static inline int atomic_##op##_return(int i, atomic_t *v)             \
 {                                                                      \
-       unsigned int val;                                               \
-       SCOND_FAIL_RETRY_VAR_DEF                                        \
+       unsigned int val;                                               \
                                                                        \
        /*                                                              \
         * Explicit full memory barrier needed before/after as          \
@@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v)           \
        "1:     llock   %[val], [%[ctr]]                \n"             \
        "       " #asm_op " %[val], %[val], %[i]        \n"             \
        "       scond   %[val], [%[ctr]]                \n"             \
-       "                                               \n"             \
-       SCOND_FAIL_RETRY_ASM                                            \
-                                                                       \
+       "       bnz     1b                              \n"             \
        : [val] "=&r"   (val)                                           \
-         SCOND_FAIL_RETRY_VARS                                         \
        : [ctr] "r"     (&v->counter),                                  \
          [i]   "ir"    (i)                                             \
        : "cc");                                                        \
index e0e1faf03c50177f69232f2990dd5e4a4ac19b6b..14c310f2e0b198ecac4d4d9f19a33d80b258005b 100644 (file)
@@ -76,8 +76,8 @@
         * We need to be a bit more cautious here. What if a kernel bug in
         * L1 ISR, caused SP to go whaco (some small value which looks like
         * USER stk) and then we take L2 ISR.
-        * Above brlo alone would treat it as a valid L1-L2 sceanrio
-        * instead of shouting alound
+        * Above brlo alone would treat it as a valid L1-L2 scenario
+        * instead of shouting around
         * The only feasible way is to make sure this L2 happened in
         * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
         * L1 ISR before it switches stack
index 1fd467ef658fe861b34f36247674d8f3e167acad..b0b87f2447f5248b3b2bcf1e13b14a5ad85d2d44 100644 (file)
@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
                local_flush_tlb_all();
 
                /*
-                * Above checke for rollover of 8 bit ASID in 32 bit container.
+                * Above check for rollover of 8 bit ASID in 32 bit container.
                 * If the container itself wrapped around, set it to a non zero
                 * "generation" to distinguish from no context
                 */
index 86ed671286df377bb6231013724737a135a86d69..3749234b74196cb2f4770b7ed19290aa9cdcc98b 100644 (file)
@@ -95,7 +95,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
                                         __get_order_pte());
 
        return pte;
@@ -107,7 +107,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
        pgtable_t pte_pg;
        struct page *page;
 
-       pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+       pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
        if (!pte_pg)
                return 0;
        memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
index 034bbdc0ff61c3bffadd0284d776240d8137767d..858f98ef7f1ba3b1fd1ccda9298f2484f7749418 100644 (file)
@@ -47,7 +47,7 @@
  * Page Tables are purely for Linux VM's consumption and the bits below are
  * suited to that (uniqueness). Hence some are not implemented in the TLB and
  * some have different value in TLB.
- * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
  *      seperate PD0 and PD1, which combined forms a translation entry)
  *      while for PTE perspective, they are 8 and 9 respectively
  * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
index f9048994b22f857a487205de5b5a25c4d3dc1f5f..16b630fbeb6acfb09fc59d20c4f965a0bb3e59aa 100644 (file)
@@ -78,7 +78,7 @@ struct task_struct;
 #define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
 
 /*
- * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
  * Look in process.c for details of kernel stack layout
  */
 #define TSK_K_ESP(tsk)         (tsk->thread.ksp)
index 991380438d6bfd6af03e6fff4748b8c5342b6a70..89fdd1b0a76ebe672094daa10134204cb96cf925 100644 (file)
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
  * (1) These insn were introduced only in 4.10 release. So for older released
  *     support needed.
  *
- * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
  *     gaurantted by the platform (not something which core handles).
  *     Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
  *     disabling for atomicity.
index 800e7c430ca589724c9467a3a9689aad6faf6bc3..cded4a9b543822d5ff9e2099fb05c67bdfc8e84a 100644 (file)
 
 #ifdef CONFIG_ARC_HAS_LLSC
 
-/*
- * A normal LLOCK/SCOND based system, w/o need for livelock workaround
- */
-#ifndef CONFIG_ARC_STAR_9000923308
-
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
        unsigned int val;
@@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
        smp_mb();
 }
 
-#else  /* CONFIG_ARC_STAR_9000923308 */
-
-/*
- * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
- * coherency transactions in the SCU. The exclusive line state keeps rotating
- * among contenting cores leading to a never ending cycle. So break the cycle
- * by deferring the retry of failed exclusive access (SCOND). The actual delay
- * needed is function of number of contending cores as well as the unrelated
- * coherency traffic from other cores. To keep the code simple, start off with
- * small delay of 1 which would suffice most cases and in case of contention
- * double the delay. Eventually the delay is sufficient such that the coherency
- * pipeline is drained, thus a subsequent exclusive access would succeed.
- */
-
-#define SCOND_FAIL_RETRY_VAR_DEF                                               \
-       unsigned int delay, tmp;                                                \
-
-#define SCOND_FAIL_RETRY_ASM                                                   \
-       "   ; --- scond fail delay ---          \n"                             \
-       "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
-       "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
-       "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
-       "       rol     %[delay], %[delay]      \n"     /* delay *= 2 */        \
-       "       b       1b                      \n"     /* start over */        \
-       "                                       \n"                             \
-       "4: ; --- done ---                      \n"                             \
-
-#define SCOND_FAIL_RETRY_VARS                                                  \
-         ,[delay] "=&r" (delay), [tmp] "=&r"   (tmp)                           \
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
-       unsigned int val;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[slock]]      \n"
-       "       breq    %[val], %[LOCKED], 0b   \n"     /* spin while LOCKED */
-       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
-       "       bz      4f                      \n"     /* done */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val)
-         SCOND_FAIL_RETRY_VARS
-       : [slock]       "r"     (&(lock->slock)),
-         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
-       unsigned int val, got_it = 0;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[slock]]      \n"
-       "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
-       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
-       "       bz.d    4f                      \n"
-       "       mov.z   %[got_it], 1            \n"     /* got it */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val),
-         [got_it]      "+&r"   (got_it)
-         SCOND_FAIL_RETRY_VARS
-       : [slock]       "r"     (&(lock->slock)),
-         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
-       : "memory", "cc");
-
-       smp_mb();
-
-       return got_it;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
-       smp_mb();
-
-       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
-       smp_mb();
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
- */
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
-       unsigned int val;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       /*
-        * zero means writer holds the lock exclusively, deny Reader.
-        * Otherwise grant lock to first/subseq reader
-        *
-        *      if (rw->counter > 0) {
-        *              rw->counter--;
-        *              ret = 1;
-        *      }
-        */
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       brls    %[val], %[WR_LOCKED], 0b\n"     /* <= 0: spin while write locked */
-       "       sub     %[val], %[val], 1       \n"     /* reader lock */
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bz      4f                      \n"     /* done */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val)
-         SCOND_FAIL_RETRY_VARS
-       : [rwlock]      "r"     (&(rw->counter)),
-         [WR_LOCKED]   "ir"    (0)
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
-       unsigned int val, got_it = 0;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
-       "       sub     %[val], %[val], 1       \n"     /* counter-- */
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bz.d    4f                      \n"
-       "       mov.z   %[got_it], 1            \n"     /* got it */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val),
-         [got_it]      "+&r"   (got_it)
-         SCOND_FAIL_RETRY_VARS
-       : [rwlock]      "r"     (&(rw->counter)),
-         [WR_LOCKED]   "ir"    (0)
-       : "memory", "cc");
-
-       smp_mb();
-
-       return got_it;
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
-       unsigned int val;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       /*
-        * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
-        * deny writer. Otherwise if unlocked grant to writer
-        * Hence the claim that Linux rwlocks are unfair to writers.
-        * (can be starved for an indefinite time by readers).
-        *
-        *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
-        *              rw->counter = 0;
-        *              ret = 1;
-        *      }
-        */
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       brne    %[val], %[UNLOCKED], 0b \n"     /* while !UNLOCKED spin */
-       "       mov     %[val], %[WR_LOCKED]    \n"
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bz      4f                      \n"
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val)
-         SCOND_FAIL_RETRY_VARS
-       : [rwlock]      "r"     (&(rw->counter)),
-         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
-         [WR_LOCKED]   "ir"    (0)
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
-       unsigned int val, got_it = 0;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
-       "       mov     %[val], %[WR_LOCKED]    \n"
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bz.d    4f                      \n"
-       "       mov.z   %[got_it], 1            \n"     /* got it */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val),
-         [got_it]      "+&r"   (got_it)
-         SCOND_FAIL_RETRY_VARS
-       : [rwlock]      "r"     (&(rw->counter)),
-         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
-         [WR_LOCKED]   "ir"    (0)
-       : "memory", "cc");
-
-       smp_mb();
-
-       return got_it;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
-       unsigned int val;
-
-       smp_mb();
-
-       /*
-        * rw->counter++;
-        */
-       __asm__ __volatile__(
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       add     %[val], %[val], 1       \n"
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bnz     1b                      \n"
-       "                                       \n"
-       : [val]         "=&r"   (val)
-       : [rwlock]      "r"     (&(rw->counter))
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
-       unsigned int val;
-
-       smp_mb();
-
-       /*
-        * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
-        */
-       __asm__ __volatile__(
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       scond   %[UNLOCKED], [%[rwlock]]\n"
-       "       bnz     1b                      \n"
-       "                                       \n"
-       : [val]         "=&r"   (val)
-       : [rwlock]      "r"     (&(rw->counter)),
-         [UNLOCKED]    "r"     (__ARCH_RW_LOCK_UNLOCKED__)
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-#undef SCOND_FAIL_RETRY_VAR_DEF
-#undef SCOND_FAIL_RETRY_ASM
-#undef SCOND_FAIL_RETRY_VARS
-
-#endif /* CONFIG_ARC_STAR_9000923308 */
-
 #else  /* !CONFIG_ARC_HAS_LLSC */
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)
index 3af67455659af49d9f9d53b67ac26024cf3b77b9..2d79e527fa50a305e932259d336423d9d89c3fae 100644 (file)
@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
 
 /*
  * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
- * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
  * syscall, so all that reamins to be tested is _TIF_WORK_MASK
  */
 
index d1da6032b715a7fea35d71fcedeeeb8cdfe590e4..a78d5670884f32490d2341b2468bf606258b6654 100644 (file)
@@ -32,7 +32,7 @@
 #define __kernel_ok            (segment_eq(get_fs(), KERNEL_DS))
 
 /*
- * Algorthmically, for __user_ok() we want do:
+ * Algorithmically, for __user_ok() we want do:
  *     (start < TASK_SIZE) && (start+len < TASK_SIZE)
  * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
  * emitted directly in code.
index 095599a73195c35bd6aabbc301bfe8dcafd3d52a..71f3918b0fc304bd0eeadb024bcbf9fa3ba32418 100644 (file)
@@ -74,7 +74,7 @@
        __tmp ^ __in;                                           \
 })
 
-#elif (ARC_BSWAP_TYPE == 2)    /* Custom single cycle bwap instruction */
+#elif (ARC_BSWAP_TYPE == 2)    /* Custom single cycle bswap instruction */
 
 #define __arch_swab32(x)                                               \
 ({                                                                     \
index 0cb0abaa0479e53ab1ea7ef8e45ab0cd42d2e965..98812c1248dfaf85b28ed023287ae78f27286073 100644 (file)
@@ -91,27 +91,13 @@ VECTOR   mem_service             ; 0x8, Mem exception   (0x1)
 VECTOR   instr_service           ; 0x10, Instrn Error   (0x2)
 
 ; ******************** Device ISRs **********************
-#ifdef CONFIG_ARC_IRQ3_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-VECTOR   handle_interrupt_level1
-
-#ifdef CONFIG_ARC_IRQ5_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-#ifdef CONFIG_ARC_IRQ6_LV2
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
 VECTOR   handle_interrupt_level2
 #else
 VECTOR   handle_interrupt_level1
 #endif
 
-.rept   25
+.rept   28
 VECTOR   handle_interrupt_level1 ; Other devices
 .endr
 
index c5cceca36118744f8c06359bc060f250974313df..ce9deb953ca90e1a0309ccd69f9608e8fd32cf70 100644 (file)
@@ -28,10 +28,8 @@ void arc_init_IRQ(void)
 {
        int level_mask = 0;
 
-       /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
-       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
-       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
-       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
+       /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
+       level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
 
        /*
         * Write to register, even if no LV2 IRQs configured to reset it
index 6fd48021324b50acd326a4ce9a17031f05db7609..08f03d9b5b3e544fcaf3e1696b8ea8e87cb943af 100644 (file)
@@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event,
        int64_t delta = new_raw_count - prev_raw_count;
 
        /*
-        * We don't afaraid of hwc->prev_count changing beneath our feet
+        * We aren't afraid of hwc->prev_count changing beneath our feet
         * because there's no way for us to re-enter this function anytime.
         */
        local64_set(&hwc->prev_count, new_raw_count);
index f63b8bfefb0ca789dcbac3ad0459dfc4b3e98760..2ee7a4d758a882a2b4e36cacd74adb280648fced 100644 (file)
@@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p)
                /*
                 * If we are here, it is established that @uboot_arg didn't
                 * point to DT blob. Instead if u-boot says it is cmdline,
-                * Appent to embedded DT cmdline.
+                * append to embedded DT cmdline.
                 * setup_machine_fdt() would have populated @boot_command_line
                 */
                if (uboot_tag == 1) {
index 004b7f0bc76cc58c6988547df1bb8705cd36004d..6cb3736b6b83613a95180cd3e9cb5ba12a9b2f7f 100644 (file)
@@ -34,7 +34,7 @@
  *  -ViXS were still seeing crashes when using insmod to load drivers.
  *   It turned out that the code to change Execute permssions for TLB entries
  *   of user was not guarded for interrupts (mod_tlb_permission)
- *   This was cauing TLB entries to be overwritten on unrelated indexes
+ *   This was causing TLB entries to be overwritten on unrelated indexes
  *
  * Vineetg: July 15th 2008: Bug #94183
  *  -Exception happens in Delay slot of a JMP, and before user space resumes,
index a6f91e88ce36e3ea2a2c95d8eabdceffa48be7fc..934150e7ac4895ef9e4523fd1ffabbdacce2dd74 100644 (file)
@@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file)
        return 0;
 }
 
-/* called on user read(): display the couters */
+/* called on user read(): display the counters */
 static ssize_t tlb_stats_output(struct file *file,     /* file descriptor */
                                char __user *user_buf,  /* user buffer */
                                size_t len,             /* length of buffer */
index 9e5eddbb856f1f32dd25f4b13132dd527725249c..5a294b2c3cb307ac591293afae8c8a72409c97c5 100644 (file)
@@ -215,7 +215,7 @@ void read_decode_cache_bcr(void)
  * ------------------
  * This ver of MMU supports variable page sizes (1k-16k): although Linux will
  * only support 8k (default), 16k and 4k.
- * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * However from hardware perspective, smaller page sizes aggravate aliasing
  * meaning more vaddr bits needed to disambiguate the cache-line-op ;
  * the existing scheme of piggybacking won't work for certain configurations.
  * Two new registers IC_PTAG and DC_PTAG inttoduced.
@@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
 
        /*
         * This is technically for MMU v4, using the MMU v3 programming model
-        * Special work for HS38 aliasing I-cache configuratino with PAE40
+        * Special work for HS38 aliasing I-cache configuration with PAE40
         *   - upper 8 bits of paddr need to be written into PTAG_HI
         *   - (and needs to be written before the lower 32 bits)
         * Note that PTAG_HI is hoisted outside the line loop
@@ -936,7 +936,7 @@ void arc_cache_init(void)
                              ic->ver, CONFIG_ARC_MMU_VER);
 
                /*
-                * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
+                * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
                 * pair to provide vaddr/paddr respectively, just as in MMU v3
                 */
                if (is_isa_arcv2() && ic->alias)
index 8c8e36fa5659411c6705e46d538e7c506fc42c96..73d7e4c75b7dbc9140604b334ab5e22a8cdd7a8d 100644 (file)
@@ -10,7 +10,7 @@
  * DMA Coherent API Notes
  *
  * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessintg it using a kernel virtual address, with
+ * implemented by accessing it using a kernel virtual address, with
  * Cache bit off in the TLB entry.
  *
  * The default DMA address == Phy address which is 0x8000_0000 based.
index 06b6c2d695bfb6bfbb0a61bbfffd958a58bf20d3..414b42710a366fc0f5708765d4ff0786e53a2b5b 100644 (file)
@@ -741,6 +741,7 @@ dtb-$(CONFIG_MACH_SUN7I) += \
        sun7i-a20-olimex-som-evb.dtb \
        sun7i-a20-olinuxino-lime.dtb \
        sun7i-a20-olinuxino-lime2.dtb \
+       sun7i-a20-olinuxino-lime2-emmc.dtb \
        sun7i-a20-olinuxino-micro.dtb \
        sun7i-a20-orangepi.dtb \
        sun7i-a20-orangepi-mini.dtb \
index d82dd6e3f9b1dda71783ff52cc495c45ed4578c4..5687d6b4da60c12061a425562659bc4b2306f875 100644 (file)
@@ -418,7 +418,7 @@ &i2c0 {
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&i2c0_pins>;
-       clock-frequency = <400000>;
+       clock-frequency = <100000>;
 
        tps@24 {
                compatible = "ti,tps65218";
index b01a5948cdd0434ce00d1d5f8da0ab059c62026b..0e63b9dff6e7d717f190886d24f5175e8510489e 100644 (file)
@@ -60,10 +60,26 @@ tps659038: tps659038@58 {
 
                tps659038_pmic {
                        compatible = "ti,tps659038-pmic";
+
+                       smps12-in-supply = <&vmain>;
+                       smps3-in-supply = <&vmain>;
+                       smps45-in-supply = <&vmain>;
+                       smps6-in-supply = <&vmain>;
+                       smps7-in-supply = <&vmain>;
+                       smps8-in-supply = <&vmain>;
+                       smps9-in-supply = <&vmain>;
+                       ldo1-in-supply = <&vmain>;
+                       ldo2-in-supply = <&vmain>;
+                       ldo3-in-supply = <&vmain>;
+                       ldo4-in-supply = <&vmain>;
+                       ldo9-in-supply = <&vmain>;
+                       ldoln-in-supply = <&vmain>;
+                       ldousb-in-supply = <&vmain>;
+                       ldortc-in-supply = <&vmain>;
+
                        regulators {
                                smps12_reg: smps12 {
                                        /* VDD_MPU */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps12";
                                        regulator-min-microvolt = <850000>;
                                        regulator-max-microvolt = <1250000>;
@@ -73,7 +89,6 @@ smps12_reg: smps12 {
 
                                smps3_reg: smps3 {
                                        /* VDD_DDR EMIF1 EMIF2 */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps3";
                                        regulator-min-microvolt = <1350000>;
                                        regulator-max-microvolt = <1350000>;
@@ -84,7 +99,6 @@ smps3_reg: smps3 {
                                smps45_reg: smps45 {
                                        /* VDD_DSPEVE on AM572 */
                                        /* VDD_IVA + VDD_DSP on AM571 */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps45";
                                        regulator-min-microvolt = <850000>;
                                        regulator-max-microvolt = <1250000>;
@@ -94,7 +108,6 @@ smps45_reg: smps45 {
 
                                smps6_reg: smps6 {
                                        /* VDD_GPU */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps6";
                                        regulator-min-microvolt = <850000>;
                                        regulator-max-microvolt = <1250000>;
@@ -104,7 +117,6 @@ smps6_reg: smps6 {
 
                                smps7_reg: smps7 {
                                        /* VDD_CORE */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps7";
                                        regulator-min-microvolt = <850000>;
                                        regulator-max-microvolt = <1150000>;
@@ -115,13 +127,11 @@ smps7_reg: smps7 {
                                smps8_reg: smps8 {
                                        /* 5728 - VDD_IVAHD */
                                        /* 5718 - N.C. test point */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps8";
                                };
 
                                smps9_reg: smps9 {
                                        /* VDD_3_3D */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps9";
                                        regulator-min-microvolt = <3300000>;
                                        regulator-max-microvolt = <3300000>;
@@ -132,7 +142,6 @@ smps9_reg: smps9 {
                                ldo1_reg: ldo1 {
                                        /* VDDSHV8 - VSDMMC  */
                                        /* NOTE: on rev 1.3a, data supply */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo1";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <3300000>;
@@ -142,7 +151,6 @@ ldo1_reg: ldo1 {
 
                                ldo2_reg: ldo2 {
                                        /* VDDSH18V */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo2";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
@@ -152,7 +160,6 @@ ldo2_reg: ldo2 {
 
                                ldo3_reg: ldo3 {
                                        /* R1.3a 572x V1_8PHY_LDO3: USB, SATA */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo3";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
@@ -162,7 +169,6 @@ ldo3_reg: ldo3 {
 
                                ldo4_reg: ldo4 {
                                        /* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo4";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
@@ -174,7 +180,6 @@ ldo4_reg: ldo4 {
 
                                ldo9_reg: ldo9 {
                                        /* VDD_RTC  */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo9";
                                        regulator-min-microvolt = <840000>;
                                        regulator-max-microvolt = <1160000>;
@@ -184,7 +189,6 @@ ldo9_reg: ldo9 {
 
                                ldoln_reg: ldoln {
                                        /* VDDA_1V8_PLL */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldoln";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
@@ -194,7 +198,6 @@ ldoln_reg: ldoln {
 
                                ldousb_reg: ldousb {
                                        /* VDDA_3V_USB: VDDA_USBHS33 */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldousb";
                                        regulator-min-microvolt = <3300000>;
                                        regulator-max-microvolt = <3300000>;
@@ -204,7 +207,6 @@ ldousb_reg: ldousb {
 
                                ldortc_reg: ldortc {
                                        /* VDDA_RTC  */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldortc";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
index cbc17b0794b102fb2f0862958203c19c1fe79dae..4128fa91823c98c6fb94870ccb2e5b69c31f35e0 100644 (file)
@@ -93,6 +93,10 @@ partition@0x780000 {
        };
 };
 
+&mmc1 {
+        status = "disabled";
+};
+
 &mmc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&sd1_pins>;
@@ -101,6 +105,10 @@ &mmc2 {
        cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
 };
 
+&mmc3 {
+        status = "disabled";
+};
+
 &pincntl {
        sd1_pins: pinmux_sd1_pins {
                pinctrl-single,pins = <
index 5d4313fd5a46012aac394ee2b6210af1d8fd6ee6..3f184863e0c52192d7a01a8f24ad3637b1229556 100644 (file)
@@ -45,6 +45,14 @@ &cpsw_emac1 {
        phy-mode = "rgmii";
 };
 
+&mmc1 {
+       status = "disabled";
+};
+
+&mmc2 {
+       status = "disabled";
+};
+
 &mmc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&sd2_pins>;
@@ -53,6 +61,7 @@ &mmc3 {
        dmas = <&edma_xbar 8 0 1        /* use SDTXEVT1 instead of MCASP0TX */
                &edma_xbar 9 0 2>;      /* use SDRXEVT1 instead of MCASP0RX */
        dma-names = "tx", "rx";
+       non-removable;
 };
 
 &pincntl {
index e0074014385a50d930a2058a74f3a3a4f8428ec3..3a8f3976f6f9677874f46d0491653856a444a6e6 100644 (file)
@@ -1451,6 +1451,8 @@ gpmc: gpmc@50000000 {
                        ti,hwmods = "gpmc";
                        reg = <0x50000000 0x37c>;      /* device IO registers */
                        interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+                       dmas = <&edma_xbar 4 0>;
+                       dma-names = "rxtx";
                        gpmc,num-cs = <8>;
                        gpmc,num-waitpins = <2>;
                        #address-cells = <2>;
index 4220eeffc65ae806892f353efce4eab400a9cdb9..5e06020f450bba5f78d0df064731dc1d55ce5ec9 100644 (file)
@@ -107,8 +107,8 @@ &dss {
        reg = <0x58000000 0x80>,
              <0x58004054 0x4>,
              <0x58004300 0x20>,
-             <0x58005054 0x4>,
-             <0x58005300 0x20>;
+             <0x58009054 0x4>,
+             <0x58009300 0x20>;
        reg-names = "dss", "pll1_clkctrl", "pll1",
                    "pll2_clkctrl", "pll2";
 
index ddfe1f558c105a1b962cce3ca69e0966021a01da..fa14f77df5632709548958522d83ddb81401d0d0 100644 (file)
@@ -242,7 +242,7 @@ &dp {
        hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
 
        ports {
-               port0 {
+               port {
                        dp_out: endpoint {
                                remote-endpoint = <&bridge_in>;
                        };
@@ -485,13 +485,20 @@ ptn3460: lvds-bridge@20 {
                edid-emulation = <5>;
 
                ports {
-                       port0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+
                                bridge_out: endpoint {
                                        remote-endpoint = <&panel_in>;
                                };
                        };
 
-                       port1 {
+                       port@1 {
+                               reg = <1>;
+
                                bridge_in: endpoint {
                                        remote-endpoint = <&dp_out>;
                                };
index f9d2e4f1a0e09598de0ad852bec24996b7373c56..1de972d46a87f31abc814991b2517d4bf75a59f1 100644 (file)
@@ -163,7 +163,7 @@ &dp {
        hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
 
        ports {
-               port0 {
+               port {
                        dp_out: endpoint {
                                remote-endpoint = <&bridge_in>;
                        };
@@ -631,13 +631,20 @@ ps8625: lvds-bridge@48 {
                use-external-pwm;
 
                ports {
-                       port0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+
                                bridge_out: endpoint {
                                        remote-endpoint = <&panel_in>;
                                };
                        };
 
-                       port1 {
+                       port@1 {
+                               reg = <1>;
+
                                bridge_in: endpoint {
                                        remote-endpoint = <&dp_out>;
                                };
index 76056ba92cedc038933a4367a55ba68af9786d4b..ed449827c3d358512f4004f1555d279bbe59267a 100644 (file)
@@ -85,7 +85,7 @@ mmc2_pins: pinmux_mmc2_pins {
                        OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
                        OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
                        OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
-                       OMAP3_CORE1_IOPAD(0x215e, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
+                       OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
                        OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
                        OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
                >;
index 41f5d386f21f31c1ac02cc091d9e318097a3e211..f4f2ce46d681cd0ed7872dccea21eb6523dbd275 100644 (file)
@@ -188,6 +188,7 @@ &mmc1 {
        vmmc-supply = <&vmmc1>;
        vmmc_aux-supply = <&vsim>;
        bus-width = <4>;
+       cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>;
 };
 
 &mmc3 {
index d6f839cab6499acc72cd5b93733aa967defe9d09..b6971060648a5f148d3bf7fce8b1aaf08ed3f264 100644 (file)
@@ -194,6 +194,12 @@ OMAP3630_CORE2_IOPAD(0x25f6, PIN_OUTPUT | MUX_MODE4) /* etk_d13.gpio_27 */
                        OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */
                >;
        };
+
+       mmc1_wp_pins: pinmux_mmc1_cd_pins {
+               pinctrl-single,pins = <
+                       OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT | MUX_MODE4)   /* etk_d15.gpio_29 */
+               >;
+       };
 };
 
 &i2c3 {
@@ -250,3 +256,8 @@ dpi_out: endpoint {
                };
        };
 };
+
+&mmc1 {
+       pinctrl-0 = <&mmc1_pins &mmc1_wp_pins>;
+       wp-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>; /* gpio_29 */
+};
index d9e2d9c6e999e4aa6f3a4c8f582fe9caf74953f4..2b74a81d1de266e360c558f5bf5b29b248c400bc 100644 (file)
@@ -288,7 +288,7 @@ ssi_pins: pinmux_ssi {
                pinctrl-single,pins = <
                        OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
                        OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1)               /* ssi1_flag_tx */
-                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4)                /* ssi1_wake_tx (cawake) */
                        OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1)               /* ssi1_dat_tx */
                        OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1)                /* ssi1_dat_rx */
                        OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1)                /* ssi1_flag_rx */
@@ -300,7 +300,7 @@ OMAP3_CORE1_IOPAD(0x218a, PIN_OUTPUT | MUX_MODE1)           /* ssi1_wake */
        modem_pins: pinmux_modem {
                pinctrl-single,pins = <
                        OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE4)               /* gpio 70 => cmt_apeslpx */
-                       OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio 72 => ape_rst_rq */
+                       OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | MUX_MODE4)                /* gpio 72 => ape_rst_rq */
                        OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE4)               /* gpio 73 => cmt_rst_rq */
                        OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4)               /* gpio 74 => cmt_en */
                        OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE4)               /* gpio 75 => cmt_rst */
index a00ca761675d446c148ffc50c8d0d189cb1f241f..927b17fc4ed885634320480a2527131ec8f451f1 100644 (file)
@@ -97,7 +97,7 @@ ssi_pins: pinmux_ssi_pins {
                        OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1)            /* ssi1_dat_tx */
                        OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1)            /* ssi1_flag_tx */
                        OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1)      /* ssi1_rdy_tx */
-                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4)        /* ssi1_wake_tx (cawake) */
                        OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1)             /* ssi1_dat_rx */
                        OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1)             /* ssi1_flag_rx */
                        OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE1)            /* ssi1_rdy_rx */
@@ -110,7 +110,7 @@ ssi_pins_idle: pinmux_ssi_pins_idle {
                        OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE7)            /* ssi1_dat_tx */
                        OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE7)            /* ssi1_flag_tx */
                        OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLDOWN | MUX_MODE7)    /* ssi1_rdy_tx */
-                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4)        /* ssi1_wake_tx (cawake) */
                        OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE7)             /* ssi1_dat_rx */
                        OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE7)             /* ssi1_flag_rx */
                        OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE4)            /* ssi1_rdy_rx */
@@ -120,7 +120,7 @@ OMAP3_CORE1_IOPAD(0x218a, PIN_OUTPUT | MUX_MODE7)            /* ssi1_wake */
 
        modem_pins1: pinmux_modem_core1_pins {
                pinctrl-single,pins = <
-                       OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio_34 (ape_rst_rq) */
+                       OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | MUX_MODE4)        /* gpio_34 (ape_rst_rq) */
                        OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE4)            /* gpio_88 (cmt_rst_rq) */
                        OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4)            /* gpio_93 (cmt_apeslpx) */
                >;
index f19170bdcc1fa0f382f2305112daa457ff528e6c..c29b41dc7b95a19213c7e514cd3371e06c87230f 100644 (file)
@@ -98,7 +98,7 @@ uart2_pins: pinmux_uart2_pins {
                pinctrl-single,pins = <
                         OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0)        /* uart2_cts.uart2_cts */
                         OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0)              /* uart2_rts.uart2_rts */
-                        OMAP3_CORE1_IOPAD(0x217a, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
+                        OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0)               /* uart2_rx.uart2_rx */
                         OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0)              /* uart2_tx.uart2_tx */
                >;
        };
@@ -107,7 +107,7 @@ uart3_pins: pinmux_uart3_pins {
                pinctrl-single,pins = <
                         OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0)      /* uart3_cts_rctx.uart3_cts_rctx */
                         OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0)              /* uart3_rts_sd.uart3_rts_sd */
-                        OMAP3_CORE1_IOPAD(0x219e, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
+                        OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0)               /* uart3_rx_irrx.uart3_rx_irrx */
                         OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0)              /* uart3_tx_irtx.uart3_tx_irtx */
                >;
        };
@@ -125,7 +125,7 @@ mmc3_2_pins: pinmux_mmc3_2_pins {
                pinctrl-single,pins = <
                        OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_clk.sdmmc3_clk */
                        OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_d4.sdmmc3_dat0 */
-                       OMAP3630_CORE2_IOPAD(0x25e6, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
+                       OMAP3630_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_d5.sdmmc3_dat1 */
                        OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_d6.sdmmc3_dat2 */
                        OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_d3.sdmmc3_dat3 */
                >;
index dc759a3028b790d1b525ee889077c5cca0ce2b8d..5d5b620b7d9b70b6529e4b657641fd4434c88bc5 100644 (file)
@@ -14,6 +14,29 @@ aliases {
                display0 = &hdmi0;
        };
 
+       vmain: fixedregulator-vmain {
+               compatible = "regulator-fixed";
+               regulator-name = "vmain";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
+       vsys_cobra: fixedregulator-vsys_cobra {
+               compatible = "regulator-fixed";
+               regulator-name = "vsys_cobra";
+               vin-supply = <&vmain>;
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
+       vdds_1v8_main: fixedregulator-vdds_1v8_main {
+               compatible = "regulator-fixed";
+               regulator-name = "vdds_1v8_main";
+               vin-supply = <&smps7_reg>;
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+       };
+
        vmmcsd_fixed: fixedregulator-mmcsd {
                compatible = "regulator-fixed";
                regulator-name = "vmmcsd_fixed";
@@ -309,7 +332,7 @@ OMAP5_IOPAD(0x05a, PIN_OUTPUT | MUX_MODE0) /* fref_clk1_out, USB hub clk */
 
        wlcore_irq_pin: pinmux_wlcore_irq_pin {
                pinctrl-single,pins = <
-                       OMAP5_IOPAD(0x40, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE6)     /* llia_wakereqin.gpio1_wk14 */
+                       OMAP5_IOPAD(0x40, PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
                >;
        };
 };
@@ -409,6 +432,26 @@ palmas_pmic {
 
                        ti,ldo6-vibrator;
 
+                       smps123-in-supply = <&vsys_cobra>;
+                       smps45-in-supply = <&vsys_cobra>;
+                       smps6-in-supply = <&vsys_cobra>;
+                       smps7-in-supply = <&vsys_cobra>;
+                       smps8-in-supply = <&vsys_cobra>;
+                       smps9-in-supply = <&vsys_cobra>;
+                       smps10_out2-in-supply = <&vsys_cobra>;
+                       smps10_out1-in-supply = <&vsys_cobra>;
+                       ldo1-in-supply = <&vsys_cobra>;
+                       ldo2-in-supply = <&vsys_cobra>;
+                       ldo3-in-supply = <&vdds_1v8_main>;
+                       ldo4-in-supply = <&vdds_1v8_main>;
+                       ldo5-in-supply = <&vsys_cobra>;
+                       ldo6-in-supply = <&vdds_1v8_main>;
+                       ldo7-in-supply = <&vsys_cobra>;
+                       ldo8-in-supply = <&vsys_cobra>;
+                       ldo9-in-supply = <&vmmcsd_fixed>;
+                       ldoln-in-supply = <&vsys_cobra>;
+                       ldousb-in-supply = <&vsys_cobra>;
+
                        regulators {
                                smps123_reg: smps123 {
                                        /* VDD_OPP_MPU */
@@ -600,7 +643,8 @@ twl6040: twl@4b {
                pinctrl-0 = <&twl6040_pins>;
 
                interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
-               ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>;  /* gpio line 141 */
+
+               /* audpwron gpio defined in the board specific dts */
 
                vio-supply = <&smps7_reg>;
                v2v1-supply = <&smps9_reg>;
index 46ecb1dd3b5cb3dacefc99ff453093d5914a1b3c..f75ce02fb3983988ec7858630e0c3d93f589b5e6 100644 (file)
@@ -35,6 +35,22 @@ tca6416: tca6416@21 {
        };
 };
 
+/* LDO4 is VPP1 - ball AD9 */
+&ldo4_reg {
+       regulator-min-microvolt = <2000000>;
+       regulator-max-microvolt = <2000000>;
+};
+
+/*
+ * LDO7 is used for HDMI: VDDA_DSIPORTA - ball AA33, VDDA_DSIPORTC - ball AE33,
+ * VDDA_HDMI - ball AN25
+ */
+&ldo7_reg {
+       status = "okay";
+       regulator-min-microvolt = <1800000>;
+       regulator-max-microvolt = <1800000>;
+};
+
 &omap5_pmx_core {
        i2c4_pins: pinmux_i2c4_pins {
                pinctrl-single,pins = <
@@ -52,3 +68,13 @@ &tpd12s015 {
                <&gpio7 3 0>;           /* 195, SDA */
 };
 
+&twl6040 {
+       ti,audpwron-gpio = <&gpio5 16 GPIO_ACTIVE_HIGH>;  /* gpio line 144 */
+};
+
+&twl6040_pins {
+       pinctrl-single,pins = <
+               OMAP5_IOPAD(0x1c4, PIN_OUTPUT | MUX_MODE6)      /* mcspi1_somi.gpio5_144 */
+               OMAP5_IOPAD(0x1ca, PIN_OUTPUT | MUX_MODE6)      /* perslimbus2_clock.gpio5_145 */
+       >;
+};
index 60b3fbb3bf07ad384182e951e1329ca46a8cf1ab..a51e60518eb6bb2bcc0c82452743b0b7ac3c37c4 100644 (file)
@@ -51,3 +51,13 @@ &tpd12s015 {
                <&gpio9 1 GPIO_ACTIVE_HIGH>,    /* TCA6424A P00, LS OE */
                <&gpio7 1 GPIO_ACTIVE_HIGH>;    /* GPIO 193, HPD */
 };
+
+&twl6040 {
+       ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>;  /* gpio line 141 */
+};
+
+&twl6040_pins {
+       pinctrl-single,pins = <
+               OMAP5_IOPAD(0x1be, PIN_OUTPUT | MUX_MODE6)      /* mcspi1_somi.gpio5_141 */
+       >;
+};
index a3601e4c0a2e3b4aecb249d5b9c74436911afe0d..b844473601d25085d0a200e6406c8f5bfc381f04 100644 (file)
@@ -136,6 +136,7 @@ regulator-usb-nrst {
 &gmac1 {
        status = "okay";
        phy-mode = "rgmii";
+       phy-handle = <&phy1>;
 
        snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>;
        snps,reset-active-low;
index ad8ba10764a330e2f328192bb7c4b9aa9dd4e0f3..d294e82447a292bb24e0989191494693f7ee0ac3 100644 (file)
@@ -24,18 +24,21 @@ gp0_reserved: rproc@40000000 {
                        compatible = "shared-dma-pool";
                        reg = <0x40000000 0x01000000>;
                        no-map;
+                       status = "disabled";
                };
 
                gp1_reserved: rproc@41000000 {
                        compatible = "shared-dma-pool";
                        reg = <0x41000000 0x01000000>;
                        no-map;
+                       status = "disabled";
                };
 
                audio_reserved: rproc@42000000 {
                        compatible = "shared-dma-pool";
                        reg = <0x42000000 0x01000000>;
                        no-map;
+                       status = "disabled";
                };
 
                dmu_reserved: rproc@43000000 {
index 68b479b8772c6f3eadc16f2ad6c3d33e35d39c5f..73c133f5e79cdf9540174002656421c96dc68b62 100644 (file)
@@ -176,8 +176,6 @@ &reg_aldo3 {
 };
 
 &reg_dc1sw {
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
        regulator-name = "vcc-lcd";
 };
 
index 360adfb1e9ca56a76094e170e6662d504e115f3c..d6ad6196a768bce9a28d1c7fcb149da32f21d5df 100644 (file)
@@ -135,8 +135,6 @@ &reg_aldo3 {
 
 &reg_dc1sw {
        regulator-name = "vcc-lcd-usb2";
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
 };
 
 &reg_dc5ldo {
index 10f49ab5328eee63acce7d5d9fe9081bd1cf76a4..47195e8690b4ed9c8b7c9937cb62ef7fb8ab795b 100644 (file)
@@ -82,6 +82,7 @@ CONFIG_TOUCHSCREEN_MMS114=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_MAX77693_HAPTIC=y
 CONFIG_INPUT_MAX8997_HAPTIC=y
+CONFIG_KEYBOARD_SAMSUNG=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
index 8f857564657f13482fe0277840b931d369f20eb6..8a5fff1b7f6f9541578d37e4bf03f8eb3e4a003a 100644 (file)
@@ -264,6 +264,7 @@ CONFIG_KEYBOARD_TEGRA=y
 CONFIG_KEYBOARD_SPEAR=y
 CONFIG_KEYBOARD_ST_KEYSCAN=y
 CONFIG_KEYBOARD_CROS_EC=m
+CONFIG_KEYBOARD_SAMSUNG=m
 CONFIG_MOUSE_PS2_ELANTECH=y
 CONFIG_MOUSE_CYAPA=m
 CONFIG_MOUSE_ELAN_I2C=y
index 19cfab526d13a37dd33d476f1270179fc4fe5d5c..20febb36884424a0b23984c776e23abab3509a40 100644 (file)
@@ -29,7 +29,7 @@
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+       return (pmd_t *)get_zeroed_page(GFP_KERNEL);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
index aeddd28b3595515aa70fb849bd65179a7b800443..92fd2c8a9af0638834d6c2b5814b9a88911f33fe 100644 (file)
@@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 
 #define pmd_large(pmd)         (pmd_val(pmd) & 2)
 #define pmd_bad(pmd)           (pmd_val(pmd) & 2)
+#define pmd_present(pmd)       (pmd_val(pmd))
 
 #define copy_pmd(pmdpd,pmdps)          \
        do {                            \
index fa70db7c714b605ff98e4efeb264c944214aabab..2a029bceaf2f8593788dea27ec05f3664fc9733a 100644 (file)
@@ -211,6 +211,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
                                                : !!(pmd_val(pmd) & (val)))
 #define pmd_isclear(pmd, val)  (!(pmd_val(pmd) & (val)))
 
+#define pmd_present(pmd)       (pmd_isset((pmd), L_PMD_SECT_VALID))
 #define pmd_young(pmd)         (pmd_isset((pmd), PMD_SECT_AF))
 #define pte_special(pte)       (pte_isset((pte), L_PTE_SPECIAL))
 static inline pte_t pte_mkspecial(pte_t pte)
@@ -249,10 +250,10 @@ PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
 #define pfn_pmd(pfn,prot)      (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
 #define mk_pmd(page,prot)      pfn_pmd(page_to_pfn(page),prot)
 
-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
+/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
 {
-       return __pmd(0);
+       return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
 }
 
 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
index 348caabb7625ee7b12fc0ddd8e2546b22f338b1c..d62204060cbe7c6239a7dc8725918fc43a7b4203 100644 (file)
@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 #define pgd_offset_k(addr)     pgd_offset(&init_mm, addr)
 
 #define pmd_none(pmd)          (!pmd_val(pmd))
-#define pmd_present(pmd)       (pmd_val(pmd))
 
 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 {
index df90bc59bfce2893328c3c79e12658c106897c90..861521606c6d6755d7802f7656e47d7e60f269ed 100644 (file)
@@ -486,7 +486,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 {
-       trace_ipi_raise(target, ipi_types[ipinr]);
+       trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
        __smp_cross_call(target, ipinr);
 }
 
index e65aa7d11b20971493a730f8a47754bdf3f484cc..20dcf6e904b2c5ea40c21a0903a73bc58b16860e 100644 (file)
@@ -61,7 +61,6 @@ config ARCH_EXYNOS4
        select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
        select CPU_EXYNOS4210
        select GIC_NON_BANKED
-       select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
        select MIGHT_HAVE_CACHE_L2X0
        help
          Samsung EXYNOS4 (Cortex-A9) SoC based systems
index a38b16b699233b2b0948250efe32a030388b2e2a..b56de4b8cdf2dde23f7329a03d081c963c568701 100644 (file)
@@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev)
 static void __init imx6ul_enet_phy_init(void)
 {
        if (IS_BUILTIN(CONFIG_PHYLIB))
-               phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
+               phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
                                           ksz8081_phy_fixup);
 }
 
index 5d7fb596bf4afb6d14b7c835cad0cbc38b827869..bf608441b35731e13b99fb7012bdc89ba005c168 100644 (file)
@@ -43,8 +43,8 @@
 #define OTHERS_MASK                    (MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
 
 /* IRQ handler register bitmasks */
-#define DEFERRED_FIQ_MASK              (0x1 << (INT_DEFERRED_FIQ % IH2_BASE))
-#define GPIO_BANK1_MASK                (0x1 << INT_GPIO_BANK1)
+#define DEFERRED_FIQ_MASK              OMAP_IRQ_BIT(INT_DEFERRED_FIQ)
+#define GPIO_BANK1_MASK                OMAP_IRQ_BIT(INT_GPIO_BANK1)
 
 /* Driver buffer byte offsets */
 #define BUF_MASK                       (FIQ_MASK * 4)
@@ -110,7 +110,7 @@ ENTRY(qwerty_fiqin_start)
        mov r8, #2                              @ reset FIQ agreement
        str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
 
-       cmp r10, #INT_GPIO_BANK1                @ is it GPIO bank interrupt?
+       cmp r10, #(INT_GPIO_BANK1 - NR_IRQS_LEGACY)     @ is it GPIO interrupt?
        beq gpio                                @ yes - process it
 
        mov r8, #1
index d1f12095f3155d37ac72e61bc73d9cf2b84dcc2e..ec760ae2f91794ab58277dbfb8e7ea23255861f1 100644 (file)
@@ -109,7 +109,8 @@ void __init ams_delta_init_fiq(void)
         * Since no set_type() method is provided by OMAP irq chip,
         * switch to edge triggered interrupt type manually.
         */
-       offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4;
+       offset = IRQ_ILR0_REG_OFFSET +
+                       ((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4;
        val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1);
        omap_writel(val, DEFERRED_FIQ_IH_BASE + offset);
 
@@ -149,7 +150,7 @@ void __init ams_delta_init_fiq(void)
        /*
         * Redirect GPIO interrupts to FIQ
         */
-       offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4;
+       offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4;
        val = omap_readl(OMAP_IH1_BASE + offset) | 1;
        omap_writel(val, OMAP_IH1_BASE + offset);
 }
index adb5e764965991e6b9f15a7570104379b314bac9..6dfc3e1210a34d5771d781322f079117f61f91cf 100644 (file)
@@ -14,6 +14,8 @@
 #ifndef __AMS_DELTA_FIQ_H
 #define __AMS_DELTA_FIQ_H
 
+#include <mach/irqs.h>
+
 /*
  * Interrupt number used for passing control from FIQ to IRQ.
  * IRQ12, described as reserved, has been selected.
index 0517f0c1581a71091539c5c1abe47473f4b789ef..1a648e9dfaa08fb3647414a08c03650c114b0396 100644 (file)
@@ -17,6 +17,7 @@ config ARCH_OMAP3
        select PM_OPP if PM
        select PM if CPU_IDLE
        select SOC_HAS_OMAP2_SDRC
+       select ARM_ERRATA_430973
 
 config ARCH_OMAP4
        bool "TI OMAP4"
@@ -36,6 +37,7 @@ config ARCH_OMAP4
        select PM if CPU_IDLE
        select ARM_ERRATA_754322
        select ARM_ERRATA_775420
+       select OMAP_INTERCONNECT
 
 config SOC_OMAP5
        bool "TI OMAP5"
@@ -67,6 +69,8 @@ config SOC_AM43XX
        select HAVE_ARM_SCU
        select GENERIC_CLOCKEVENTS_BROADCAST
        select HAVE_ARM_TWD
+       select ARM_ERRATA_754322
+       select ARM_ERRATA_775420
 
 config SOC_DRA7XX
        bool "TI DRA7XX"
@@ -240,4 +244,12 @@ endmenu
 
 endif
 
+config OMAP5_ERRATA_801819
+       bool "Errata 801819: An eviction from L1 data cache might stall indefinitely"
+       depends on SOC_OMAP5 || SOC_DRA7XX
+       help
+         A livelock can occur in the L2 cache arbitration that might prevent
+         a snoop from completing. Under certain conditions this can cause the
+         system to deadlock.
+
 endmenu
index af2851fbcdf02e224bb196dc805500b788e0a400..bae263fba640af8c1ff7a3b1b98e71a35486e94c 100644 (file)
@@ -46,6 +46,7 @@
 
 #define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX        0x109
 #define OMAP5_MON_AMBA_IF_INDEX                0x108
+#define OMAP5_DRA7_MON_SET_ACR_INDEX   0x107
 
 /* Secure PPA(Primary Protected Application) APIs */
 #define OMAP4_PPA_L2_POR_INDEX         0x23
index c625cc10d9f98aefc844329c4e730f0682358038..8cd1de914ee492beaaae1934a9ff910f3208c1f3 100644 (file)
@@ -50,6 +50,39 @@ void __iomem *omap4_get_scu_base(void)
        return scu_base;
 }
 
+#ifdef CONFIG_OMAP5_ERRATA_801819
+void omap5_erratum_workaround_801819(void)
+{
+       u32 acr, revidr;
+       u32 acr_mask;
+
+       /* REVIDR[3] indicates erratum fix available on silicon */
+       asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr));
+       if (revidr & (0x1 << 3))
+               return;
+
+       asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+       /*
+        * BIT(27) - Disables streaming. All write-allocate lines allocate in
+        * the L1 or L2 cache.
+        * BIT(25) - Disables streaming. All write-allocate lines allocate in
+        * the L1 cache.
+        */
+       acr_mask = (0x3 << 25) | (0x3 << 27);
+       /* do we already have it done.. if yes, skip expensive smc */
+       if ((acr & acr_mask) == acr_mask)
+               return;
+
+       acr |= acr_mask;
+       omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
+
+       pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n",
+                __func__, smp_processor_id());
+}
+#else
+static inline void omap5_erratum_workaround_801819(void) { }
+#endif
+
 static void omap4_secondary_init(unsigned int cpu)
 {
        /*
@@ -64,12 +97,15 @@ static void omap4_secondary_init(unsigned int cpu)
                omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
                                                        4, 0, 0, 0, 0, 0);
 
-       /*
-        * Configure the CNTFRQ register for the secondary cpu's which
-        * indicates the frequency of the cpu local timers.
-        */
-       if (soc_is_omap54xx() || soc_is_dra7xx())
+       if (soc_is_omap54xx() || soc_is_dra7xx()) {
+               /*
+                * Configure the CNTFRQ register for the secondary cpu's which
+                * indicates the frequency of the cpu local timers.
+                */
                set_cntfreq();
+               /* Configure ACR to disable streaming WA for 801819 */
+               omap5_erratum_workaround_801819();
+       }
 
        /*
         * Synchronise with the boot thread.
@@ -218,6 +254,8 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
 
        if (cpu_is_omap446x())
                startup_addr = omap4460_secondary_startup;
+       if (soc_is_dra74x() || soc_is_omap54xx())
+               omap5_erratum_workaround_801819();
 
        /*
         * Write the address of secondary startup routine into the
index 78af6d8cf2e208811f2d6b31e1aa837c3b585565..daf2753de7aa2a4681c690e49fb00d874c856a90 100644 (file)
@@ -186,8 +186,9 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
                        trace_state = (PWRDM_TRACE_STATES_FLAG |
                                       ((next & OMAP_POWERSTATE_MASK) << 8) |
                                       ((prev & OMAP_POWERSTATE_MASK) << 0));
-                       trace_power_domain_target(pwrdm->name, trace_state,
-                                                 smp_processor_id());
+                       trace_power_domain_target_rcuidle(pwrdm->name,
+                                                         trace_state,
+                                                         smp_processor_id());
                }
                break;
        default:
@@ -523,8 +524,8 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
 
        if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
                /* Trace the pwrdm desired target state */
-               trace_power_domain_target(pwrdm->name, pwrst,
-                                         smp_processor_id());
+               trace_power_domain_target_rcuidle(pwrdm->name, pwrst,
+                                                 smp_processor_id());
                /* Program the pwrdm desired target state */
                ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
        }
index 0ec2d00f42371c208ac02f87ffc03b0db5241ace..eb350a67313367ca8c3fe405f0119c7225832180 100644 (file)
@@ -36,14 +36,7 @@ static struct powerdomain iva_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_IVA_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
-       .pwrsts_logic_ret = PWRSTS_OFF,
        .banks            = 4,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* hwa_mem */
-               [1] = PWRSTS_OFF_RET,   /* sl2_mem */
-               [2] = PWRSTS_OFF_RET,   /* tcm1_mem */
-               [3] = PWRSTS_OFF_RET,   /* tcm2_mem */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* hwa_mem */
                [1] = PWRSTS_ON,        /* sl2_mem */
@@ -76,12 +69,7 @@ static struct powerdomain ipu_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_IPU_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
-       .pwrsts_logic_ret = PWRSTS_OFF,
        .banks            = 2,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* aessmem */
-               [1] = PWRSTS_OFF_RET,   /* periphmem */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* aessmem */
                [1] = PWRSTS_ON,        /* periphmem */
@@ -95,11 +83,7 @@ static struct powerdomain dss_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_DSS_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
-       .pwrsts_logic_ret = PWRSTS_OFF,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* dss_mem */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* dss_mem */
        },
@@ -111,13 +95,8 @@ static struct powerdomain l4per_7xx_pwrdm = {
        .name             = "l4per_pwrdm",
        .prcm_offs        = DRA7XX_PRM_L4PER_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
-       .pwrsts           = PWRSTS_RET_ON,
-       .pwrsts_logic_ret = PWRSTS_RET,
+       .pwrsts           = PWRSTS_ON,
        .banks            = 2,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* nonretained_bank */
-               [1] = PWRSTS_OFF_RET,   /* retained_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* nonretained_bank */
                [1] = PWRSTS_ON,        /* retained_bank */
@@ -132,9 +111,6 @@ static struct powerdomain gpu_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* gpu_mem */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* gpu_mem */
        },
@@ -148,8 +124,6 @@ static struct powerdomain wkupaon_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* wkup_bank */
        },
@@ -161,15 +135,7 @@ static struct powerdomain core_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_CORE_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_ON,
-       .pwrsts_logic_ret = PWRSTS_RET,
        .banks            = 5,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* core_nret_bank */
-               [1] = PWRSTS_OFF_RET,   /* core_ocmram */
-               [2] = PWRSTS_OFF_RET,   /* core_other_bank */
-               [3] = PWRSTS_OFF_RET,   /* ipu_l2ram */
-               [4] = PWRSTS_OFF_RET,   /* ipu_unicache */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* core_nret_bank */
                [1] = PWRSTS_ON,        /* core_ocmram */
@@ -226,11 +192,7 @@ static struct powerdomain vpe_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_VPE_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
-       .pwrsts_logic_ret = PWRSTS_OFF,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* vpe_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* vpe_bank */
        },
@@ -260,14 +222,8 @@ static struct powerdomain l3init_7xx_pwrdm = {
        .name             = "l3init_pwrdm",
        .prcm_offs        = DRA7XX_PRM_L3INIT_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
-       .pwrsts           = PWRSTS_RET_ON,
-       .pwrsts_logic_ret = PWRSTS_RET,
+       .pwrsts           = PWRSTS_ON,
        .banks            = 3,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* gmac_bank */
-               [1] = PWRSTS_OFF_RET,   /* l3init_bank1 */
-               [2] = PWRSTS_OFF_RET,   /* l3init_bank2 */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* gmac_bank */
                [1] = PWRSTS_ON,        /* l3init_bank1 */
@@ -283,9 +239,6 @@ static struct powerdomain eve3_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* eve3_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* eve3_bank */
        },
@@ -299,9 +252,6 @@ static struct powerdomain emu_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* emu_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* emu_bank */
        },
@@ -314,11 +264,6 @@ static struct powerdomain dsp2_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 3,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* dsp2_edma */
-               [1] = PWRSTS_OFF_RET,   /* dsp2_l1 */
-               [2] = PWRSTS_OFF_RET,   /* dsp2_l2 */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* dsp2_edma */
                [1] = PWRSTS_ON,        /* dsp2_l1 */
@@ -334,11 +279,6 @@ static struct powerdomain dsp1_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 3,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* dsp1_edma */
-               [1] = PWRSTS_OFF_RET,   /* dsp1_l1 */
-               [2] = PWRSTS_OFF_RET,   /* dsp1_l2 */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* dsp1_edma */
                [1] = PWRSTS_ON,        /* dsp1_l1 */
@@ -354,9 +294,6 @@ static struct powerdomain cam_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* vip_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* vip_bank */
        },
@@ -370,9 +307,6 @@ static struct powerdomain eve4_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* eve4_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* eve4_bank */
        },
@@ -386,9 +320,6 @@ static struct powerdomain eve2_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* eve2_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* eve2_bank */
        },
@@ -402,9 +333,6 @@ static struct powerdomain eve1_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* eve1_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* eve1_bank */
        },
index 5b385bb8aff9673b21b5f18edeab8d8114525180..cb9497a20fb307548cea76a895274c459b534d06 100644 (file)
@@ -496,8 +496,7 @@ void __init omap_init_time(void)
        __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
                        2, "timer_sys_ck", NULL, false);
 
-       if (of_have_populated_dt())
-               clocksource_probe();
+       clocksource_probe();
 }
 
 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
@@ -505,6 +504,8 @@ void __init omap3_secure_sync32k_timer_init(void)
 {
        __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
                        2, "timer_sys_ck", NULL, false);
+
+       clocksource_probe();
 }
 #endif /* CONFIG_ARCH_OMAP3 */
 
@@ -513,6 +514,8 @@ void __init omap3_gptimer_timer_init(void)
 {
        __omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
                        1, "timer_sys_ck", "ti,timer-alwon", true);
+
+       clocksource_probe();
 }
 #endif
 
index 5766ce2be32bbd28452c95ea9e43e92de0f4dcb0..8409cab3f760cc798a14cadc5f017feab53d1b65 100644 (file)
@@ -547,7 +547,7 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
 
        init.name = dev_name(cpu_dev);
        init.ops = &clk_spc_ops;
-       init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+       init.flags = CLK_GET_RATE_NOCACHE;
        init.num_parents = 0;
 
        return devm_clk_register(cpu_dev, &spc->hw);
index 84baa16f4c0b674dfe404b54bb9fe09b4fd682bf..e93aa6734147915b17a283aa7530a2176f175be6 100644 (file)
@@ -68,7 +68,7 @@
 #include <linux/platform_data/asoc-s3c.h>
 #include <linux/platform_data/spi-s3c64xx.h>
 
-static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
+#define samsung_device_dma_mask (*((u64[]) { DMA_BIT_MASK(32) }))
 
 /* AC97 */
 #ifdef CONFIG_CPU_S3C2440
index 7085e322dc42675cf9e502553220096a27eae44b..648a32c89541845affc45426efc6e2e64921d92f 100644 (file)
@@ -95,7 +95,7 @@ boot := arch/arm64/boot
 Image: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
-Image.%: vmlinux
+Image.%: Image
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
 zinstall install:
index 3a4e9a2ab313010614caf641cce5aca55b0ff6c6..fbafa24cd5335b90de29af9a3f3a731bc616ac10 100644 (file)
@@ -125,7 +125,7 @@ amba {
                #size-cells = <1>;
                #interrupts-cells = <3>;
 
-               compatible = "arm,amba-bus";
+               compatible = "simple-bus";
                interrupt-parent = <&gic>;
                ranges;
 
index 46f325a143b0ff0514409139cd89be34a65ae35f..d7f8e06910bcada5899399b87d3a8a926813eb53 100644 (file)
@@ -163,7 +163,7 @@ xin24m: xin24m {
        };
 
        amba {
-               compatible = "arm,amba-bus";
+               compatible = "simple-bus";
                #address-cells = <2>;
                #size-cells = <2>;
                ranges;
index f69f69c8120c3343b118cb81c533563656de92c9..da84645525b902dc05154a7ba16f88718b1aa353 100644 (file)
@@ -38,25 +38,54 @@ extern int kgdb_fault_expected;
 #endif /* !__ASSEMBLY__ */
 
 /*
- * gdb is expecting the following registers layout.
+ * gdb remote procotol (well most versions of it) expects the following
+ * register layout.
  *
  * General purpose regs:
  *     r0-r30: 64 bit
  *     sp,pc : 64 bit
- *     pstate  : 64 bit
- *     Total: 34
+ *     pstate  : 32 bit
+ *     Total: 33 + 1
  * FPU regs:
  *     f0-f31: 128 bit
- *     Total: 32
- * Extra regs
  *     fpsr & fpcr: 32 bit
- *     Total: 2
+ *     Total: 32 + 2
  *
+ * To expand a little on the "most versions of it"... when the gdb remote
+ * protocol for AArch64 was developed it depended on a statement in the
+ * Architecture Reference Manual that claimed "SPSR_ELx is a 32-bit register".
+ * and, as a result, allocated only 32-bits for the PSTATE in the remote
+ * protocol. In fact this statement is still present in ARM DDI 0487A.i.
+ *
+ * Unfortunately "is a 32-bit register" has a very special meaning for
+ * system registers. It means that "the upper bits, bits[63:32], are
+ * RES0.". RES0 is heavily used in the ARM architecture documents as a
+ * way to leave space for future architecture changes. So to translate a
+ * little for people who don't spend their spare time reading ARM architecture
+ * manuals, what "is a 32-bit register" actually means in this context is
+ * "is a 64-bit register but one with no meaning allocated to any of the
+ * upper 32-bits... *yet*".
+ *
+ * Perhaps then we should not be surprised that this has led to some
+ * confusion. Specifically a patch, influenced by the above translation,
+ * that extended PSTATE to 64-bit was accepted into gdb-7.7 but the patch
+ * was reverted in gdb-7.8.1 and all later releases, when this was
+ * discovered to be an undocumented protocol change.
+ *
+ * So... it is *not* wrong for us to only allocate 32-bits to PSTATE
+ * here even though the kernel itself allocates 64-bits for the same
+ * state. That is because this bit of code tells the kernel how the gdb
+ * remote protocol (well most versions of it) describes the register state.
+ *
+ * Note that if you are using one of the versions of gdb that supports
+ * the gdb-7.7 version of the protocol you cannot use kgdb directly
+ * without providing a custom register description (gdb can load new
+ * protocol descriptions at runtime).
  */
 
-#define _GP_REGS               34
+#define _GP_REGS               33
 #define _FP_REGS               32
-#define _EXTRA_REGS            2
+#define _EXTRA_REGS            3
 /*
  * general purpose registers size in bytes.
  * pstate is only 4 bytes. subtract 4 bytes
index ff98585d085aa5737c9c17478555f22b11261f2f..d25f4f137c2aa1c758fa614af40361bfada50aef 100644 (file)
@@ -26,7 +26,7 @@
 
 #define check_pgt_cache()              do { } while (0)
 
-#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
 #define PGD_SIZE       (PTRS_PER_PGD * sizeof(pgd_t))
 
 #if CONFIG_PGTABLE_LEVELS > 2
index 433e504052741ef752dac96f8be0447b6ed8c1fa..022644704a936dce53bb933f9bf6635835e32530 100644 (file)
@@ -124,6 +124,18 @@ static inline void cpu_panic_kernel(void)
        cpu_park_loop();
 }
 
+/*
+ * If a secondary CPU enters the kernel but fails to come online,
+ * (e.g. due to mismatched features), and cannot exit the kernel,
+ * we increment cpus_stuck_in_kernel and leave the CPU in a
+ * quiesecent loop within the kernel text. The memory containing
+ * this loop must not be re-used for anything else as the 'stuck'
+ * core is executing it.
+ *
+ * This function is used to inhibit features like kexec and hibernate.
+ */
+bool cpus_are_stuck_in_kernel(void);
+
 #endif /* ifndef __ASSEMBLY__ */
 
 #endif /* ifndef __ASM_SMP_H */
index fc9682bfe0020caebf99412131fb760d5b8b870d..e875a5a551d7debeab3c34ed2d52e9beae7c8f7d 100644 (file)
@@ -30,22 +30,53 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
 {
        unsigned int tmp;
        arch_spinlock_t lockval;
+       u32 owner;
+
+       /*
+        * Ensure prior spin_lock operations to other locks have completed
+        * on this CPU before we test whether "lock" is locked.
+        */
+       smp_mb();
+       owner = READ_ONCE(lock->owner) << 16;
 
        asm volatile(
 "      sevl\n"
 "1:    wfe\n"
 "2:    ldaxr   %w0, %2\n"
+       /* Is the lock free? */
 "      eor     %w1, %w0, %w0, ror #16\n"
-"      cbnz    %w1, 1b\n"
+"      cbz     %w1, 3f\n"
+       /* Lock taken -- has there been a subsequent unlock->lock transition? */
+"      eor     %w1, %w3, %w0, lsl #16\n"
+"      cbz     %w1, 1b\n"
+       /*
+        * The owner has been updated, so there was an unlock->lock
+        * transition that we missed. That means we can rely on the
+        * store-release of the unlock operation paired with the
+        * load-acquire of the lock operation to publish any of our
+        * previous stores to the new lock owner and therefore don't
+        * need to bother with the writeback below.
+        */
+"      b       4f\n"
+"3:\n"
+       /*
+        * Serialise against any concurrent lockers by writing back the
+        * unlocked lock value
+        */
        ARM64_LSE_ATOMIC_INSN(
        /* LL/SC */
 "      stxr    %w1, %w0, %2\n"
-"      cbnz    %w1, 2b\n", /* Serialise against any concurrent lockers */
-       /* LSE atomics */
 "      nop\n"
-"      nop\n")
+"      nop\n",
+       /* LSE atomics */
+"      mov     %w1, %w0\n"
+"      cas     %w0, %w0, %2\n"
+"      eor     %w1, %w1, %w0\n")
+       /* Somebody else wrote to the lock, GOTO 10 and reload the value */
+"      cbnz    %w1, 2b\n"
+"4:"
        : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
-       :
+       : "r" (owner)
        : "memory");
 }
 
@@ -148,6 +179,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
+       smp_mb(); /* See arch_spin_unlock_wait */
        return !arch_spin_value_unlocked(READ_ONCE(*lock));
 }
 
index f8df75d740f4891a018aa84e02cb7c2988f3ff84..21ab5df9fa76ddea54d58abf77812593ddbb0f6d 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/pgtable.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/sections.h>
+#include <asm/smp.h>
 #include <asm/suspend.h>
 #include <asm/virt.h>
 
@@ -236,6 +237,11 @@ int swsusp_arch_suspend(void)
        unsigned long flags;
        struct sleep_stack_data state;
 
+       if (cpus_are_stuck_in_kernel()) {
+               pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
+               return -EBUSY;
+       }
+
        local_dbg_save(flags);
 
        if (__cpu_suspend_enter(&state)) {
index b67531a13136dd691bba7799af4c1664dc84b3e4..b5f063e5eff7dbe59516dec92b64782a25ff8d16 100644 (file)
@@ -58,7 +58,17 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
        { "x30", 8, offsetof(struct pt_regs, regs[30])},
        { "sp", 8, offsetof(struct pt_regs, sp)},
        { "pc", 8, offsetof(struct pt_regs, pc)},
-       { "pstate", 8, offsetof(struct pt_regs, pstate)},
+       /*
+        * struct pt_regs thinks PSTATE is 64-bits wide but gdb remote
+        * protocol disagrees. Therefore we must extract only the lower
+        * 32-bits. Look for the big comment in asm/kgdb.h for more
+        * detail.
+        */
+       { "pstate", 4, offsetof(struct pt_regs, pstate)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+                                                       + 4
+#endif
+       },
        { "v0", 16, -1 },
        { "v1", 16, -1 },
        { "v2", 16, -1 },
@@ -128,6 +138,8 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
        memset((char *)gdb_regs, 0, NUMREGBYTES);
        thread_regs = task_pt_regs(task);
        memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
+       /* Special case for PSTATE (check comments in asm/kgdb.h for details) */
+       dbg_get_reg(33, gdb_regs + GP_REG_BYTES, thread_regs);
 }
 
 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
index 678e0842cb3b72f891684ce502009168fc444627..62ff3c0622e2e6d83395de24fe93dcd0071f0c62 100644 (file)
@@ -909,3 +909,21 @@ int setup_profiling_timer(unsigned int multiplier)
 {
        return -EINVAL;
 }
+
+static bool have_cpu_die(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+       int any_cpu = raw_smp_processor_id();
+
+       if (cpu_ops[any_cpu]->cpu_die)
+               return true;
+#endif
+       return false;
+}
+
+bool cpus_are_stuck_in_kernel(void)
+{
+       bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
+
+       return !!cpus_stuck_in_kernel || smp_spin_tables;
+}
index f7cf463107df7508a6091e403795898ad9e79e07..2a43012616b7c5e31555e0a45323d74a9181ccd7 100644 (file)
@@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
 
        /*
         * We need to switch to kernel mode so that we can use __get_user
-        * to safely read from kernel space.  Note that we now dump the
-        * code first, just in case the backtrace kills us.
+        * to safely read from kernel space.
         */
        fs = get_fs();
        set_fs(KERNEL_DS);
@@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where)
        print_ip_sym(where);
 }
 
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
 {
        unsigned long addr = instruction_pointer(regs);
-       mm_segment_t fs;
        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
        int i;
 
-       /*
-        * We need to switch to kernel mode so that we can use __get_user
-        * to safely read from kernel space.  Note that we now dump the
-        * code first, just in case the backtrace kills us.
-        */
-       fs = get_fs();
-       set_fs(KERNEL_DS);
-
        for (i = -4; i < 1; i++) {
                unsigned int val, bad;
 
@@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
                }
        }
        printk("%sCode: %s\n", lvl, str);
+}
 
-       set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+       if (!user_mode(regs)) {
+               mm_segment_t fs = get_fs();
+               set_fs(KERNEL_DS);
+               __dump_instr(lvl, regs);
+               set_fs(fs);
+       } else {
+               __dump_instr(lvl, regs);
+       }
 }
 
 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
index b7b397802088d1b6c5af50efac9aa49852cbf242..efcf1f7ef1e4c27b4c0c40546dc542122f824933 100644 (file)
@@ -179,7 +179,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
                                                 &asid_generation);
        flush_context(cpu);
 
-       /* We have at least 1 ASID per CPU, so this will always succeed */
+       /* We have more ASIDs than CPUs, so this will always succeed */
        asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
 
 set_asid:
@@ -227,8 +227,11 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 static int asids_init(void)
 {
        asid_bits = get_cpu_asid_bits();
-       /* If we end up with more CPUs than ASIDs, expect things to crash */
-       WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
+       /*
+        * Expect allocation after rollover to fail if we don't have at least
+        * one more ASID than CPUs. ASID #0 is reserved for init_mm.
+        */
+       WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
        atomic64_set(&asid_generation, ASID_FIRST_VERSION);
        asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
                           GFP_KERNEL);
index 5954881a35ac516aed1bfb0844249feb551f40b3..013e2cbe792474eb3777c8f01ff65f2356527260 100644 (file)
@@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
         * PTE_RDONLY is cleared by default in the asm below, so set it in
         * back if necessary (read-only or clean PTE).
         */
-       if (!pte_write(entry) || !dirty)
+       if (!pte_write(entry) || !pte_sw_dirty(entry))
                pte_val(entry) |= PTE_RDONLY;
 
        /*
@@ -441,7 +441,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
        return 1;
 }
 
-static struct fault_info {
+static const struct fault_info {
        int     (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
        int     sig;
        int     code;
index dbd12ea8ce684993f1adf5c100a06d223b646073..43a76b07eb32dc9c6324c53ddcc601283c6005b2 100644 (file)
@@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
 {
        struct page *page = pte_page(pte);
 
-       /* no flushing needed for anonymous pages */
-       if (!page_mapping(page))
-               return;
-
        if (!test_and_set_bit(PG_dcache_clean, &page->flags))
                sync_icache_aliases(page_address(page),
                                    PAGE_SIZE << compound_order(page));
index 1aba19d68c5ededf4ce986340bf410bb8e05eb3e..db039cb368bef85edaf7461f2f9f834be79a6bfd 100644 (file)
@@ -43,7 +43,7 @@ static inline void pgd_ctor(void *x)
  */
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
+       return quicklist_alloc(QUICK_PGD, GFP_KERNEL, pgd_ctor);
 }
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+       return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -63,7 +63,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
        struct page *page;
        void *pg;
 
-       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
        if (!pg)
                return NULL;
 
index 235ece437dddc7196be819a8b7b30dc3f26028a5..42f1affb9c2dda555041404be054b9ae5e873f85 100644 (file)
@@ -24,14 +24,14 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return pte;
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        struct page *pte;
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+       pte = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
index 41907d25ed384e2ee42e46d1df461bbd2886732f..c9ed14f6c67de118ac789f7f58e3937df7ad3ddf 100644 (file)
@@ -22,7 +22,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
        if (pte)
                clear_page(pte);
        return pte;
@@ -33,9 +33,9 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
        struct page *page;
 
 #ifdef CONFIG_HIGHPTE
-       page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+       page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
 #else
-       page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+       page = alloc_pages(GFP_KERNEL, 0);
 #endif
        if (!page)
                return NULL;
index 77da3b0ae3c2b2c37636f4f0e8dc3ee296e80268..eeebf862c46c6aa16a5a994686f80439b4db6c4b 100644 (file)
@@ -64,7 +64,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
 {
        struct page *pte;
 
-       pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+       pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
@@ -78,7 +78,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       gfp_t flags =  GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
+       gfp_t flags =  GFP_KERNEL | __GFP_ZERO;
        return (pte_t *) __get_free_page(flags);
 }
 
index f80758cb7157df174e930d58306be808e97b248a..e109ee95e9191df66b4d5c167736f4c9d698fffc 100644 (file)
@@ -45,7 +45,7 @@ config IA64
        select GENERIC_SMP_IDLE_THREAD
        select ARCH_INIT_TASK
        select ARCH_TASK_STRUCT_ALLOCATOR
-       select ARCH_THREAD_INFO_ALLOCATOR
+       select ARCH_THREAD_STACK_ALLOCATOR
        select ARCH_CLOCKSOURCE_DATA
        select GENERIC_TIME_VSYSCALL_OLD
        select SYSCTL_ARCH_UNALIGN_NO_WARN
index aa995b67c3f51abe459879648025cdd1fe30724c..d1212b84fb8369636b5d226936b89fa98c0d0115 100644 (file)
@@ -48,15 +48,15 @@ struct thread_info {
 #ifndef ASM_OFFSETS_C
 /* how to get the thread information struct from C */
 #define current_thread_info()  ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
-#define alloc_thread_info_node(tsk, node)      \
-               ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define alloc_thread_stack_node(tsk, node)     \
+               ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
 #define task_thread_info(tsk)  ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
 #else
 #define current_thread_info()  ((struct thread_info *) 0)
-#define alloc_thread_info_node(tsk, node)      ((struct thread_info *) 0)
+#define alloc_thread_stack_node(tsk, node)     ((unsigned long *) 0)
 #define task_thread_info(tsk)  ((struct thread_info *) 0)
 #endif
-#define free_thread_info(ti)   /* nothing */
+#define free_thread_stack(ti)  /* nothing */
 #define task_stack_page(tsk)   ((void *)(tsk))
 
 #define __HAVE_THREAD_FUNCTIONS
index f9efe9739d3fe5da25621d278e4c7a2e6a8110e5..0eaa89f3defd5d8b8f5abf0b7b1e0d1f35ac8d2d 100644 (file)
@@ -26,6 +26,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
  * handled. This is done by having a special ".data..init_task" section...
  */
 #define init_thread_info       init_task_mem.s.thread_info
+#define init_stack             init_task_mem.stack
 
 union {
        struct {
index f9924fbcfe42b8f279d83758e4fc987cb32da409..fb95aed5f428110ef54a43ef355c12ab0bf59389 100644 (file)
@@ -14,7 +14,7 @@ extern const char bad_pmd_string[];
 extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
        unsigned long address)
 {
-       unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
+       unsigned long page = __get_free_page(GFP_DMA);
 
        if (!page)
                return NULL;
@@ -51,7 +51,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
 static inline struct page *pte_alloc_one(struct mm_struct *mm,
        unsigned long address)
 {
-       struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
+       struct page *page = alloc_pages(GFP_DMA, 0);
        pte_t *pte;
 
        if (!page)
index 24bcba496c7505e94d5c45512c399e773106b029..c895b987202cd07fbc9f38ad727bf79b53587a29 100644 (file)
@@ -11,7 +11,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
 {
        pte_t *pte;
 
-       pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        if (pte) {
                __flush_page_to_ram(pte);
                flush_tlb_kernel_page(pte);
@@ -32,7 +32,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres
        struct page *page;
        pte_t *pte;
 
-       page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+       page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
        if(!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
index 0931388de47f600d75bf52a4fd8a60b8117fe1b2..1901f61f926f5ec5b855d51c9b3fea7f835b29da 100644 (file)
@@ -37,7 +37,7 @@ do {                                                  \
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       unsigned long page = __get_free_page(GFP_KERNEL);
 
        if (!page)
                return NULL;
@@ -49,7 +49,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
                                        unsigned long address)
 {
-        struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+        struct page *page = alloc_pages(GFP_KERNEL, 0);
 
        if (page == NULL)
                return NULL;
index 3104df0a4822fdf766aa8821e3dc17165e5bbc0c..c2caa1ee436057368753bfea9fb839078d41b636 100644 (file)
@@ -42,8 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT |
-                                             __GFP_ZERO);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
        return pte;
 }
 
@@ -51,7 +50,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
                                      unsigned long address)
 {
        struct page *pte;
-       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+       pte = alloc_pages(GFP_KERNEL  | __GFP_ZERO, 0);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
index 61436d69775ca216096cd95169e2bd89eb34b526..7c89390c0c13dc8bfafd35c30bd9fd7372ffa385 100644 (file)
@@ -116,9 +116,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
        struct page *ptepage;
 
 #ifdef CONFIG_HIGHPTE
-       int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+       int flags = GFP_KERNEL | __GFP_HIGHMEM;
 #else
-       int flags = GFP_KERNEL | __GFP_REPEAT;
+       int flags = GFP_KERNEL;
 #endif
 
        ptepage = alloc_pages(flags, 0);
index 4f4520e779a5bf06ccfc69c7b3671961c1967dff..eb99fcc760884e4ee01f1e6bf73a6802a1277169 100644 (file)
@@ -239,8 +239,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
        if (mem_init_done) {
-               pte = (pte_t *)__get_free_page(GFP_KERNEL |
-                                       __GFP_REPEAT | __GFP_ZERO);
+               pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
        } else {
                pte = (pte_t *)early_get_page();
                if (pte)
index 6733ac575da4fd13d7cee0e06e4e45e0c2d9b0a6..36a391d289aa09494b4892f995a3c2f591ed837a 100644 (file)
@@ -74,7 +74,7 @@
 #define KVM_GUEST_KUSEG                        0x00000000UL
 #define KVM_GUEST_KSEG0                        0x40000000UL
 #define KVM_GUEST_KSEG23               0x60000000UL
-#define KVM_GUEST_KSEGX(a)             ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_KSEGX(a)             ((_ACAST32_(a)) & 0xe0000000)
 #define KVM_GUEST_CPHYSADDR(a)         ((_ACAST32_(a)) & 0x1fffffff)
 
 #define KVM_GUEST_CKSEG0ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
@@ -338,6 +338,7 @@ struct kvm_mips_tlb {
 #define KVM_MIPS_GUEST_TLB_SIZE        64
 struct kvm_vcpu_arch {
        void *host_ebase, *guest_ebase;
+       int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
        unsigned long host_stack;
        unsigned long host_gp;
 
index b336037e8768cfd50e45ff877f759a0039bbeb5e..93c079a1cfc8e1f13366590e939c47eade8db789 100644 (file)
@@ -69,7 +69,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER);
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
 
        return pte;
 }
@@ -79,7 +79,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
 {
        struct page *pte;
 
-       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+       pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
        if (!pte)
                return NULL;
        clear_highpage(pte);
@@ -113,7 +113,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        pmd_t *pmd;
 
-       pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER);
+       pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
        if (pmd)
                pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
        return pmd;
index 396df6eb0a12d9bdb9a886f00be348c4b8698ec2..645c8a1982a7b2ab828f83d1cd34b5bdf2550cb7 100644 (file)
@@ -1636,6 +1636,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
                if (index < 0) {
                        vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
                        vcpu->arch.host_cp0_badvaddr = va;
+                       vcpu->arch.pc = curr_pc;
                        er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
                                                         vcpu);
                        preempt_enable();
@@ -1647,6 +1648,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
                         * invalid exception to the guest
                         */
                        if (!TLB_IS_VALID(*tlb, va)) {
+                               vcpu->arch.host_cp0_badvaddr = va;
+                               vcpu->arch.pc = curr_pc;
                                er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
                                                                run, vcpu);
                                preempt_enable();
@@ -1666,7 +1669,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
                        cache, op, base, arch->gprs[base], offset);
                er = EMULATE_FAIL;
                preempt_enable();
-               goto dont_update_pc;
+               goto done;
 
        }
 
@@ -1694,16 +1697,20 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
                kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
                        cache, op, base, arch->gprs[base], offset);
                er = EMULATE_FAIL;
-               preempt_enable();
-               goto dont_update_pc;
        }
 
        preempt_enable();
+done:
+       /* Rollback PC only if emulation was unsuccessful */
+       if (er == EMULATE_FAIL)
+               vcpu->arch.pc = curr_pc;
 
 dont_update_pc:
-       /* Rollback PC */
-       vcpu->arch.pc = curr_pc;
-done:
+       /*
+        * This is for exceptions whose emulation updates the PC, so do not
+        * overwrite the PC under any circumstances
+        */
+
        return er;
 }
 
index 4ab4bdfad703e5f19ed1f2db2c10bbece179f259..2143884709e4761ca440cab2944709f1e85661f5 100644 (file)
@@ -28,6 +28,7 @@
 #define MIPS_EXC_MAX                12
 /* XXXSL More to follow */
 
+extern char __kvm_mips_vcpu_run_end[];
 extern char mips32_exception[], mips32_exceptionEnd[];
 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
 
index 3ef03009de5f65fa4807cac71bb9749baaa63ab7..828fcfc1cd7fe9037dc70c1828d4630ac9839c33 100644 (file)
@@ -202,6 +202,7 @@ FEXPORT(__kvm_mips_load_k0k1)
 
        /* Jump to guest */
        eret
+EXPORT(__kvm_mips_vcpu_run_end)
 
 VECTOR(MIPSX(exception), unknown)
 /* Find out what mode we came from and jump to the proper handler. */
index dc052fb5c7a2f80cdc20b7bf282b0cb09fc30a2e..44da5259f39027e9bd8d0e6584c864997d3388e9 100644 (file)
@@ -315,6 +315,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
        memcpy(gebase + offset, mips32_GuestException,
               mips32_GuestExceptionEnd - mips32_GuestException);
 
+#ifdef MODULE
+       offset += mips32_GuestExceptionEnd - mips32_GuestException;
+       memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
+              __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
+       vcpu->arch.vcpu_run = gebase + offset;
+#else
+       vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
+#endif
+
        /* Invalidate the icache for these ranges */
        local_flush_icache_range((unsigned long)gebase,
                                (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
@@ -404,7 +413,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        /* Disable hardware page table walking while in guest */
        htw_stop();
 
-       r = __kvm_mips_vcpu_run(run, vcpu);
+       r = vcpu->arch.vcpu_run(run, vcpu);
 
        /* Re-enable HTW before enabling interrupts */
        htw_start();
index 4861a78c7160f7b1912804439307b77599317619..f5f90bbf019d6842c0cf2cb2b4ae11fcdc362b07 100644 (file)
@@ -115,7 +115,7 @@ static inline unsigned long current_stack_pointer(void)
 }
 
 #ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti);
+void arch_release_thread_stack(unsigned long *stack);
 #endif
 #define get_thread_info(ti)    get_task_struct((ti)->task)
 #define put_thread_info(ti)    put_task_struct((ti)->task)
index 99770823451a7e8b1fc2552c948e8f651951e334..2d7986c386fe5964a51d5eddec5cfac578fbb0e4 100644 (file)
@@ -397,8 +397,9 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
  * single-step state is cleared.  At this point the breakpoints should have
  * been removed by __switch_to().
  */
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_stack(unsigned long *stack)
 {
+       struct thread_info *ti = (void *)stack;
        if (kgdb_sstep_thread == ti) {
                kgdb_sstep_thread = NULL;
 
index e77a7c728081125233ebf569dac465960a997aff..9577cf768875c8bd207b1ceb6f1ba8ddb8d943a3 100644 (file)
@@ -63,7 +63,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
        if (pte)
                clear_page(pte);
        return pte;
@@ -74,9 +74,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
        struct page *pte;
 
 #ifdef CONFIG_HIGHPTE
-       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
 #else
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+       pte = alloc_pages(GFP_KERNEL, 0);
 #endif
        if (!pte)
                return NULL;
index 6e2985e0a7b90284641d41a839b34df22d8864fd..bb47d08c8ef76f085676fe9ce7d26810a90fffa7 100644 (file)
@@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
-                                       PTE_ORDER);
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
 
        return pte;
 }
@@ -53,7 +52,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
 {
        struct page *pte;
 
-       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+       pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
        if (pte) {
                if (!pgtable_page_ctor(pte)) {
                        __free_page(pte);
index 21484e5b9e9af1cec95e2d786d87816caccb13e4..87eebd18508953733ddc5cd9ed1f387b7abfb88d 100644 (file)
@@ -77,7 +77,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
                                         unsigned long address)
 {
        struct page *pte;
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+       pte = alloc_pages(GFP_KERNEL, 0);
        if (!pte)
                return NULL;
        clear_page(page_address(pte));
index 62b08ef392be2c202f5ed73326fe708b9eda8e41..5b2a95116e8fa0e3d3fc95d3094bfdcc682b47d9 100644 (file)
@@ -122,7 +122,7 @@ pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
        pte_t *pte;
 
        if (likely(mem_init_done)) {
-               pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT);
+               pte = (pte_t *) __get_free_page(GFP_KERNEL);
        } else {
                pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
 #if 0
index f2fd327dce2edc14db5717211fe27943e957fcf0..f08dda3f0995e41841c0555655a51d16bdc91b97 100644 (file)
@@ -63,8 +63,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
-                                              PMD_ORDER);
+       pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
        if (pmd)
                memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
        return pmd;
@@ -124,7 +123,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
 static inline pgtable_t
 pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
        if (!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
@@ -137,7 +136,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
 static inline pte_t *
 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return pte;
 }
 
index 01f7464d9fea68033b7ef8876e22e0352e365597..0a9d439bcda6c1ade0025d5e894e14d2db4b24be 100644 (file)
@@ -128,7 +128,7 @@ config PPC
        select IRQ_FORCED_THREADING
        select HAVE_RCU_TABLE_FREE if SMP
        select HAVE_SYSCALL_TRACEPOINTS
-       select HAVE_CBPF_JIT
+       select HAVE_CBPF_JIT if CPU_BIG_ENDIAN
        select HAVE_ARCH_JUMP_LABEL
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select ARCH_HAS_GCOV_PROFILE_ALL
index a2350194fc7640bf96a3ec42f12085efc63a5711..8e21bb492dca038358f6ca7a10d69a783cd6364c 100644 (file)
@@ -102,7 +102,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
-       tlb_flush_pgtable(tlb, address);
        pgtable_page_dtor(table);
        pgtable_free_tlb(tlb, page_address(table), 0);
 }
index 290157e8d5b2498bce2d6ffb14261124d47a25a7..74839f24f4124614eb23ed8dd2c7098ad5f6eb29 100644 (file)
@@ -88,6 +88,7 @@
 #define HPTE_R_RPN_SHIFT       12
 #define HPTE_R_RPN             ASM_CONST(0x0ffffffffffff000)
 #define HPTE_R_PP              ASM_CONST(0x0000000000000003)
+#define HPTE_R_PPP             ASM_CONST(0x8000000000000003)
 #define HPTE_R_N               ASM_CONST(0x0000000000000004)
 #define HPTE_R_G               ASM_CONST(0x0000000000000008)
 #define HPTE_R_M               ASM_CONST(0x0000000000000010)
index 488279edb1f045e00cb8bf2261367d319ad862ea..cd5e7aa8cc3489e9f44540e66cafc852bb67d1b7 100644 (file)
@@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[];
                        pgtable_cache[(shift) - 1];     \
                })
 
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
 
 extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
 extern void pte_fragment_free(unsigned long *, int);
@@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
        return (pgd_t *)__get_free_page(PGALLOC_GFP);
 #else
        struct page *page;
-       page = alloc_pages(PGALLOC_GFP, 4);
+       page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
        if (!page)
                return NULL;
        return (pgd_t *) page_address(page);
@@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -110,13 +109,17 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
                                   unsigned long address)
 {
+       /*
+        * By now all the pud entries should be none entries. So go
+        * ahead and flush the page walk cache
+        */
+       flush_tlb_pgtable(tlb, address);
         pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
 }
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -127,6 +130,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
                                   unsigned long address)
 {
+       /*
+        * By now all the pud entries should be none entries. So go
+        * ahead and flush the page walk cache
+        */
+       flush_tlb_pgtable(tlb, address);
         return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
 }
 
@@ -151,7 +159,7 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+       return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -198,7 +206,11 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
-       tlb_flush_pgtable(tlb, address);
+       /*
+        * By now all the pud entries should be none entries. So go
+        * ahead and flush the page walk cache
+        */
+       flush_tlb_pgtable(tlb, address);
        pgtable_free_tlb(tlb, table, 0);
 }
 
index 937d4e247ac31aeb614de3ba59c7e21900dac1cc..df294224e28020cfd438155c6733132b8c906a07 100644 (file)
@@ -228,5 +228,20 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
 
 extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                                 pgprot_t flags, unsigned int psz);
+
+static inline unsigned long radix__get_tree_size(void)
+{
+       unsigned long rts_field;
+       /*
+        * we support 52 bits, hence 52-31 = 21, 0b10101
+        * RTS encoding details
+        * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
+        * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
+        */
+       rts_field = (0x5UL << 5); /* 6 - 8 bits */
+       rts_field |= (0x2UL << 61);
+
+       return rts_field;
+}
 #endif /* __ASSEMBLY__ */
 #endif
index 13ef38828dfee2fec57551bf2154aabbac3422a4..3fa94fcac6284e20e51566367b58c027ef2e3716 100644 (file)
@@ -18,16 +18,19 @@ extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
 extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                                    unsigned long ap, int nid);
+extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
 extern void radix__tlb_flush(struct mmu_gather *tlb);
 #ifdef CONFIG_SMP
 extern void radix__flush_tlb_mm(struct mm_struct *mm);
 extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                              unsigned long ap, int nid);
+extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
 #else
 #define radix__flush_tlb_mm(mm)                radix__local_flush_tlb_mm(mm)
 #define radix__flush_tlb_page(vma,addr)        radix__local_flush_tlb_page(vma,addr)
 #define radix___flush_tlb_page(mm,addr,p,i)    radix___local_flush_tlb_page(mm,addr,p,i)
+#define radix__flush_tlb_pwc(tlb, addr)        radix__local_flush_tlb_pwc(tlb, addr)
 #endif
 
 #endif
index d98424ae356c49356c459334468c488b786b9be8..96e5769b18b00fe8fbcd7d788c3c94457d15a98a 100644 (file)
@@ -72,5 +72,19 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 #define flush_tlb_mm(mm)               local_flush_tlb_mm(mm)
 #define flush_tlb_page(vma, addr)      local_flush_tlb_page(vma, addr)
 #endif /* CONFIG_SMP */
+/*
+ * flush the page walk cache for the address
+ */
+static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
+{
+       /*
+        * Flush the page table walk cache on freeing a page table. We already
+        * have marked the upper/higher level page table entry none by now.
+        * So it is safe to flush PWC here.
+        */
+       if (!radix_enabled())
+               return;
 
+       radix__flush_tlb_pwc(tlb, address);
+}
 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
index 54f591e9572eeb4ea6c3ad99d4f397034e6acbe9..c0a69ae922567b5fc819069e7e7d150992417866 100644 (file)
@@ -4,11 +4,6 @@
 #include <linux/mm.h>
 
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
-                                    unsigned long address)
-{
-
-}
 
 #ifdef CONFIG_PPC64
 #include <asm/book3s/64/pgalloc.h>
index 0c12a3bfe2ab10d49f76e24c4d1dbaa59096b2b0..897d2e1c8a9b84ffa96cc283963e9d322d276ad0 100644 (file)
@@ -57,8 +57,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -88,7 +87,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+       return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -172,7 +171,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
 
 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 {
-       pte_fragment_fre((unsigned long *)pte, 1);
+       pte_fragment_free((unsigned long *)pte, 1);
 }
 
 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
@@ -190,8 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
index 2714a3b81d24476ad07d651c64720f42ab96bcec..b5f73cb5eeb6e34b43fd75d5054f867128e792f7 100644 (file)
@@ -642,7 +642,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
                if (pe->type & EEH_PE_VF) {
                        eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
                } else {
-                       eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                        pci_lock_rescan_remove();
                        pci_hp_remove_devices(bus);
                        pci_unlock_rescan_remove();
@@ -692,10 +691,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
                 */
                edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
                eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
-               if (pe->type & EEH_PE_VF)
+               if (pe->type & EEH_PE_VF) {
                        eeh_add_virt_device(edev, NULL);
-               else
+               } else {
+                       eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                        pci_hp_add_devices(bus);
+               }
        } else if (frozen_bus && rmv_data->removed) {
                pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
                ssleep(5);
index 4c9440629128ccd94dbc35d5eb0b6e060d4d970f..8bcc1b457115867e93ace2317f203e1cda100352 100644 (file)
@@ -1399,11 +1399,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
        lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
 
        mtlr    r10
-BEGIN_MMU_FTR_SECTION
-       b       2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
        andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
+BEGIN_MMU_FTR_SECTION
        beq-    2f
+FTR_SECTION_ELSE
+       b       2f
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
 
 .machine       push
 .machine       "power4"
index ccd2037c797f9e48c82b545ed4eb5febf03d95f6..6ee4b72cda4201840cf2b85f38661831c5e37233 100644 (file)
@@ -719,7 +719,7 @@ unsigned char ibm_architecture_vec[] = {
         * must match by the macro below. Update the definition if
         * the structure layout changes.
         */
-#define IBM_ARCH_VEC_NRCORES_OFFSET    125
+#define IBM_ARCH_VEC_NRCORES_OFFSET    133
        W(NR_CPUS),                     /* number of cores supported */
        0,
        0,
index 30a03c03fe734a8c80828fc4260fe3679ab63e85..060b140f03c69de424e40e4dcdb5064d832ce889 100644 (file)
@@ -377,7 +377,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
 
 #else
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
-                    offsetof(struct thread_fp_state, fpr[32][0]));
+                    offsetof(struct thread_fp_state, fpr[32]));
 
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                   &target->thread.fp_state, 0, -1);
@@ -405,7 +405,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
        return 0;
 #else
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
-                    offsetof(struct thread_fp_state, fpr[32][0]));
+                    offsetof(struct thread_fp_state, fpr[32]));
 
        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                  &target->thread.fp_state, 0, -1);
index d873f6507f7210fe4b9a7caa774861b9648ba5aa..f8a871a72985c1d758d21c7f406210ce491db3b5 100644 (file)
@@ -316,8 +316,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
                        DBG_LOW(" -> hit\n");
                        /* Update the HPTE */
                        hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
-                                               ~(HPTE_R_PP | HPTE_R_N)) |
-                                              (newpp & (HPTE_R_PP | HPTE_R_N |
+                                               ~(HPTE_R_PPP | HPTE_R_N)) |
+                                              (newpp & (HPTE_R_PPP | HPTE_R_N |
                                                         HPTE_R_C)));
                }
                native_unlock_hpte(hptep);
@@ -385,8 +385,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
 
        /* Update the HPTE */
        hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
-                       ~(HPTE_R_PP | HPTE_R_N)) |
-               (newpp & (HPTE_R_PP | HPTE_R_N)));
+                               ~(HPTE_R_PPP | HPTE_R_N)) |
+                              (newpp & (HPTE_R_PPP | HPTE_R_N)));
        /*
         * Ensure it is out of the tlb too. Bolted entries base and
         * actual page size will be same.
@@ -550,7 +550,11 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
                }
        }
        /* This works for all page sizes, and for 256M and 1T segments */
-       *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+       if (cpu_has_feature(CPU_FTR_ARCH_300))
+               *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
+       else
+               *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+
        shift = mmu_psize_defs[size].shift;
 
        avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
index b2740c67e1729628045dcc41fe50ecada6bac3ae..5b22ba0b58bc9e5168a4d12022bb6c176ab3a14c 100644 (file)
@@ -201,9 +201,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
        /*
         * We can't allow hardware to update hpte bits. Hence always
         * set 'R' bit and set 'C' if it is a write fault
-        * Memory coherence is always enabled
         */
-       rflags |=  HPTE_R_R | HPTE_R_M;
+       rflags |=  HPTE_R_R;
 
        if (pteflags & _PAGE_DIRTY)
                rflags |= HPTE_R_C;
@@ -213,10 +212,15 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
 
        if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
                rflags |= HPTE_R_I;
-       if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT)
+       else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
                rflags |= (HPTE_R_I | HPTE_R_G);
-       if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
-               rflags |= (HPTE_R_I | HPTE_R_W);
+       else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
+               rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
+       else
+               /*
+                * Add memory coherence if cache inhibited is not set
+                */
+               rflags |= HPTE_R_M;
 
        return rflags;
 }
index 5aac1a3f86cde4471ba3460c3b08d543a43fd0e4..119d18611500f4ccde99916b1012bfba0b9f1039 100644 (file)
@@ -73,7 +73,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
        cachep = PGT_CACHE(pdshift - pshift);
 #endif
 
-       new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
+       new = kmem_cache_zalloc(cachep, GFP_KERNEL);
 
        BUG_ON(pshift > HUGEPD_SHIFT_MASK);
        BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
index 227b2a6c4544b1739813bf192e3a156bed7e1ffd..196222227e8207ca7b71ae28e2f8e8a0ffb1565e 100644 (file)
@@ -65,7 +65,7 @@ static int radix__init_new_context(struct mm_struct *mm, int index)
        /*
         * set the process table entry,
         */
-       rts_field = 3ull << PPC_BITLSHIFT(2);
+       rts_field = radix__get_tree_size();
        process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
        return 0;
 }
index c939e6e57a9e83eda7918dc7e279d879c7fe432b..e58707deef5c2f90530d088dbc786417d191af04 100644 (file)
@@ -160,9 +160,8 @@ static void __init radix_init_pgtable(void)
        process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
        /*
         * Fill in the process table.
-        * we support 52 bits, hence 52-28 = 24, 11000
         */
-       rts_field = 3ull << PPC_BITLSHIFT(2);
+       rts_field = radix__get_tree_size();
        process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
        /*
         * Fill in the partition table. We are suppose to use effective address
@@ -176,10 +175,8 @@ static void __init radix_init_pgtable(void)
 static void __init radix_init_partition_table(void)
 {
        unsigned long rts_field;
-       /*
-        * we support 52 bits, hence 52-28 = 24, 11000
-        */
-       rts_field = 3ull << PPC_BITLSHIFT(2);
+
+       rts_field = radix__get_tree_size();
 
        BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
        partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
index bf7bf32b54f8a7ca025c06ca7e74e3f0d78f1d70..7f922f5579365f2a3b3b9404cc7b22d855f587bf 100644 (file)
@@ -84,7 +84,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
        pte_t *pte;
 
        if (slab_is_available()) {
-               pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+               pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        } else {
                pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
                if (pte)
@@ -97,7 +97,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        struct page *ptepage;
 
-       gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
+       gfp_t flags = GFP_KERNEL | __GFP_ZERO;
 
        ptepage = alloc_pages(flags, 0);
        if (!ptepage)
index e009e0604a8ab76f7b9b1efd2004c8c45f210c6c..f5e8d4edb808f8748d8eaf630f027f0d5796b344 100644 (file)
@@ -350,8 +350,7 @@ static pte_t *get_from_cache(struct mm_struct *mm)
 static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
 {
        void *ret = NULL;
-       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
-                                      __GFP_REPEAT | __GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
        if (!page)
                return NULL;
        if (!kernel && !pgtable_page_ctor(page)) {
index 0fdaf93a3e091be64d827bd376f651a6c3bab8f0..ab2f60e812e2826badd47b0f03fd0eee12ed0051 100644 (file)
 
 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
-static inline void __tlbiel_pid(unsigned long pid, int set)
+#define RIC_FLUSH_TLB 0
+#define RIC_FLUSH_PWC 1
+#define RIC_FLUSH_ALL 2
+
+static inline void __tlbiel_pid(unsigned long pid, int set,
+                               unsigned long ric)
 {
-       unsigned long rb,rs,ric,prs,r;
+       unsigned long rb,rs,prs,r;
 
        rb = PPC_BIT(53); /* IS = 1 */
        rb |= set << PPC_BITLSHIFT(51);
        rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
        prs = 1; /* process scoped */
        r = 1;   /* raidx format */
-       ric = 2;  /* invalidate all the caches */
 
        asm volatile("ptesync": : :"memory");
        asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@@ -39,25 +43,24 @@ static inline void __tlbiel_pid(unsigned long pid, int set)
 /*
  * We use 128 set in radix mode and 256 set in hpt mode.
  */
-static inline void _tlbiel_pid(unsigned long pid)
+static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
 {
        int set;
 
        for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
-               __tlbiel_pid(pid, set);
+               __tlbiel_pid(pid, set, ric);
        }
        return;
 }
 
-static inline void _tlbie_pid(unsigned long pid)
+static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
 {
-       unsigned long rb,rs,ric,prs,r;
+       unsigned long rb,rs,prs,r;
 
        rb = PPC_BIT(53); /* IS = 1 */
        rs = pid << PPC_BITLSHIFT(31);
        prs = 1; /* process scoped */
        r = 1;   /* raidx format */
-       ric = 2;  /* invalidate all the caches */
 
        asm volatile("ptesync": : :"memory");
        asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@@ -67,16 +70,15 @@ static inline void _tlbie_pid(unsigned long pid)
 }
 
 static inline void _tlbiel_va(unsigned long va, unsigned long pid,
-                             unsigned long ap)
+                             unsigned long ap, unsigned long ric)
 {
-       unsigned long rb,rs,ric,prs,r;
+       unsigned long rb,rs,prs,r;
 
        rb = va & ~(PPC_BITMASK(52, 63));
        rb |= ap << PPC_BITLSHIFT(58);
        rs = pid << PPC_BITLSHIFT(31);
        prs = 1; /* process scoped */
        r = 1;   /* raidx format */
-       ric = 0;  /* no cluster flush yet */
 
        asm volatile("ptesync": : :"memory");
        asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@@ -86,16 +88,15 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
 }
 
 static inline void _tlbie_va(unsigned long va, unsigned long pid,
-                            unsigned long ap)
+                            unsigned long ap, unsigned long ric)
 {
-       unsigned long rb,rs,ric,prs,r;
+       unsigned long rb,rs,prs,r;
 
        rb = va & ~(PPC_BITMASK(52, 63));
        rb |= ap << PPC_BITLSHIFT(58);
        rs = pid << PPC_BITLSHIFT(31);
        prs = 1; /* process scoped */
        r = 1;   /* raidx format */
-       ric = 0;  /* no cluster flush yet */
 
        asm volatile("ptesync": : :"memory");
        asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@@ -117,25 +118,40 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
  */
 void radix__local_flush_tlb_mm(struct mm_struct *mm)
 {
-       unsigned int pid;
+       unsigned long pid;
 
        preempt_disable();
        pid = mm->context.id;
        if (pid != MMU_NO_CONTEXT)
-               _tlbiel_pid(pid);
+               _tlbiel_pid(pid, RIC_FLUSH_ALL);
        preempt_enable();
 }
 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
 
+void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+       unsigned long pid;
+       struct mm_struct *mm = tlb->mm;
+
+       preempt_disable();
+
+       pid = mm->context.id;
+       if (pid != MMU_NO_CONTEXT)
+               _tlbiel_pid(pid, RIC_FLUSH_PWC);
+
+       preempt_enable();
+}
+EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
+
 void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                            unsigned long ap, int nid)
 {
-       unsigned int pid;
+       unsigned long pid;
 
        preempt_disable();
        pid = mm ? mm->context.id : 0;
        if (pid != MMU_NO_CONTEXT)
-               _tlbiel_va(vmaddr, pid, ap);
+               _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
        preempt_enable();
 }
 
@@ -160,7 +176,7 @@ static int mm_is_core_local(struct mm_struct *mm)
 
 void radix__flush_tlb_mm(struct mm_struct *mm)
 {
-       unsigned int pid;
+       unsigned long pid;
 
        preempt_disable();
        pid = mm->context.id;
@@ -172,20 +188,46 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
 
                if (lock_tlbie)
                        raw_spin_lock(&native_tlbie_lock);
-               _tlbie_pid(pid);
+               _tlbie_pid(pid, RIC_FLUSH_ALL);
                if (lock_tlbie)
                        raw_spin_unlock(&native_tlbie_lock);
        } else
-               _tlbiel_pid(pid);
+               _tlbiel_pid(pid, RIC_FLUSH_ALL);
 no_context:
        preempt_enable();
 }
 EXPORT_SYMBOL(radix__flush_tlb_mm);
 
+void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+       unsigned long pid;
+       struct mm_struct *mm = tlb->mm;
+
+       preempt_disable();
+
+       pid = mm->context.id;
+       if (unlikely(pid == MMU_NO_CONTEXT))
+               goto no_context;
+
+       if (!mm_is_core_local(mm)) {
+               int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+               if (lock_tlbie)
+                       raw_spin_lock(&native_tlbie_lock);
+               _tlbie_pid(pid, RIC_FLUSH_PWC);
+               if (lock_tlbie)
+                       raw_spin_unlock(&native_tlbie_lock);
+       } else
+               _tlbiel_pid(pid, RIC_FLUSH_PWC);
+no_context:
+       preempt_enable();
+}
+EXPORT_SYMBOL(radix__flush_tlb_pwc);
+
 void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                       unsigned long ap, int nid)
 {
-       unsigned int pid;
+       unsigned long pid;
 
        preempt_disable();
        pid = mm ? mm->context.id : 0;
@@ -196,11 +238,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
 
                if (lock_tlbie)
                        raw_spin_lock(&native_tlbie_lock);
-               _tlbie_va(vmaddr, pid, ap);
+               _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
                if (lock_tlbie)
                        raw_spin_unlock(&native_tlbie_lock);
        } else
-               _tlbiel_va(vmaddr, pid, ap);
+               _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
 bail:
        preempt_enable();
 }
@@ -224,7 +266,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
 
        if (lock_tlbie)
                raw_spin_lock(&native_tlbie_lock);
-       _tlbie_pid(0);
+       _tlbie_pid(0, RIC_FLUSH_ALL);
        if (lock_tlbie)
                raw_spin_unlock(&native_tlbie_lock);
 }
index c50ea76ba66ceb95c99555f7391b75c0744a1b96..6081fbd75330b8a936a6e590b7fcb9604c4a23cb 100644 (file)
@@ -221,7 +221,7 @@ static bool soc_has_mclk_mux0_canin(void)
 /* convenience wrappers around the common clk API */
 static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
 {
-       return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+       return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
 }
 
 static inline struct clk *mpc512x_clk_factor(
index 84fb984f29c130f89cffbbaaf1aae9b8b1d6c843..85c85eb3e245d7a7a9600a8fce95a592789f38bc 100644 (file)
@@ -172,7 +172,7 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
        if (rc < 0)
                goto out;
 
-       skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos;
+       skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
        if (!dump_skip(cprm, skip))
                goto Eio;
 out:
index b7dfc1359d0113d570fe9ea7f702c728814dc66f..3e8865b187de22187a39e1bc66b662e464009f14 100644 (file)
@@ -927,7 +927,7 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
        dn = pci_device_to_OF_node(dev);
        pdn = PCI_DN(dn);
        buid = pdn->phb->buid;
-       cfg_addr = (pdn->busno << 8) | pdn->devfn;
+       cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
 
        ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
                  cfg_addr, BUID_HI(buid), BUID_LO(buid));
@@ -956,7 +956,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
        dn = pci_device_to_OF_node(dev);
        pdn = PCI_DN(dn);
        buid = pdn->phb->buid;
-       cfg_addr = (pdn->busno << 8) | pdn->devfn;
+       cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
 
        do {
                /* extra outputs are LIOBN and dma-addr (hi, lo) */
index 37b9017c6a96b70d5a9460be9923a44663268ff8..ac82e8eb936dc673e8079f4c91e170adf1075fc9 100644 (file)
@@ -245,6 +245,7 @@ struct kvm_vcpu_stat {
        u32 exit_stop_request;
        u32 exit_validity;
        u32 exit_instruction;
+       u32 exit_pei;
        u32 halt_successful_poll;
        u32 halt_attempted_poll;
        u32 halt_poll_invalid;
index 59215c518f379d06479536653b6448a7d556fe83..7ec63b1d920d29aa75420a63ea4b4f8e868975a6 100644 (file)
@@ -649,6 +649,8 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu)
 
 /* Performance monitoring unit for s390x */
 static struct pmu cpumf_pmu = {
+       .task_ctx_nr  = perf_sw_context,
+       .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
        .pmu_enable   = cpumf_pmu_enable,
        .pmu_disable  = cpumf_pmu_disable,
        .event_init   = cpumf_pmu_event_init,
@@ -708,12 +710,6 @@ static int __init cpumf_pmu_init(void)
                goto out;
        }
 
-       /* The CPU measurement counter facility does not have overflow
-        * interrupts to do sampling.  Sampling must be provided by
-        * external means, for example, by timers.
-        */
-       cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-
        cpumf_pmu.attr_groups = cpumf_cf_event_group();
        rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
        if (rc) {
index 2e6b54e4d3f955d1c7971fce4ad02551eeac3846..252157181302e43edb4dac6e675108002009b30a 100644 (file)
@@ -341,6 +341,8 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
 
 static int handle_partial_execution(struct kvm_vcpu *vcpu)
 {
+       vcpu->stat.exit_pei++;
+
        if (vcpu->arch.sie_block->ipa == 0xb254)        /* MVPG */
                return handle_mvpg_pei(vcpu);
        if (vcpu->arch.sie_block->ipa >> 8 == 0xae)     /* SIGP */
index 6d8ec3ac9dd8ec675d4d138a83888fa371138bcb..43f2a2b80490ce44d6d425efdeab9fbe7491b5bc 100644 (file)
@@ -61,6 +61,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "exit_external_request", VCPU_STAT(exit_external_request) },
        { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
        { "exit_instruction", VCPU_STAT(exit_instruction) },
+       { "exit_pei", VCPU_STAT(exit_pei) },
        { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
        { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
        { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
@@ -657,7 +658,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
                kvm->arch.model.cpuid = proc->cpuid;
                lowest_ibc = sclp.ibc >> 16 & 0xfff;
                unblocked_ibc = sclp.ibc & 0xfff;
-               if (lowest_ibc) {
+               if (lowest_ibc && proc->ibc) {
                        if (proc->ibc > unblocked_ibc)
                                kvm->arch.model.ibc = unblocked_ibc;
                        else if (proc->ibc < lowest_ibc)
index e8b5962ac12ab8035797829f6940b9c1c7cfe227..e2565d2d0c32edeea01c61de92127c8666e7a305 100644 (file)
@@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
                        return table;
        }
        /* Allocate a fresh page */
-       page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+       page = alloc_page(GFP_KERNEL);
        if (!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
index 4324b87f93982f08552ecb86bb9c893431507427..9f0ce0e6eeb484c2fad96f560a3b5ca851188ea8 100644 (file)
@@ -437,7 +437,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
        pgste = pgste_get_lock(ptep);
        pgstev = pgste_val(pgste);
        pte = *ptep;
-       if (pte_swap(pte) &&
+       if (!reset && pte_swap(pte) &&
            ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
             (pgstev & _PGSTE_GPS_ZERO))) {
                ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
index 2e067657db988d66d8fbd01912fbb4729d934ad3..49b012d78c1ac9a6868a1a08f88c54e415c76028 100644 (file)
@@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
-                                       PTE_ORDER);
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
 
        return pte;
 }
@@ -53,7 +52,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
 {
        struct page *pte;
 
-       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+       pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
        if (!pte)
                return NULL;
        clear_highpage(pte);
index a33673b3687df2fa1e76022635cc3961a60810c5..f3f42c84c40fd3a2b81ea523ee841c99d1e82f4b 100644 (file)
@@ -34,7 +34,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+       return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -43,7 +43,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
        struct page *page;
        void *pg;
 
-       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
        if (!pg)
                return NULL;
        page = virt_to_page(pg);
index 26e03a1f7ca4b6d602e9cc80d2b04ff9bb28600f..a62bd869677976bf4759d83b8e5646420d0f6826 100644 (file)
@@ -1,7 +1,7 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 
-#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
 
 static struct kmem_cache *pgd_cachep;
 #if PAGETABLE_LEVELS > 2
index 5e3187185b4a8b9c3f22d4a67ea538a12fdcd9e7..3529f1378cd8d6230a0ad3c76cec74b874a94b2d 100644 (file)
@@ -41,8 +41,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(pgtable_cache,
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -52,8 +51,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(pgtable_cache,
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
index 14bb0d5ed3c605cc71680d95102c832295abbbb8..aec508e374906ba0b1860f3669a609407c080125 100644 (file)
@@ -2704,8 +2704,7 @@ void __flush_tlb_all(void)
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                            unsigned long address)
 {
-       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
-                                      __GFP_REPEAT | __GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
        pte_t *pte = NULL;
 
        if (page)
@@ -2717,8 +2716,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 pgtable_t pte_alloc_one(struct mm_struct *mm,
                        unsigned long address)
 {
-       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
-                                      __GFP_REPEAT | __GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
        if (!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
index 4b7cef9e94e05917476335cb8248de28a6f25696..c1467ac59ce63e150b571708df8b950c8eb4dfc1 100644 (file)
@@ -78,7 +78,7 @@ struct thread_info {
 
 #ifndef __ASSEMBLY__
 
-void arch_release_thread_info(struct thread_info *info);
+void arch_release_thread_stack(unsigned long *stack);
 
 /* How to get the thread information struct from C. */
 register unsigned long stack_pointer __asm__("sp");
index 6b705ccc9cc1ec57abc481e0a296d105faf8f250..a465d8372edda17448b8b9fa3c3920da9d1047b2 100644 (file)
@@ -73,8 +73,9 @@ void arch_cpu_idle(void)
 /*
  * Release a thread_info structure
  */
-void arch_release_thread_info(struct thread_info *info)
+void arch_release_thread_stack(unsigned long *stack)
 {
+       struct thread_info *info = (void *)stack;
        struct single_step_state *step_state = info->step_state;
 
        if (step_state) {
index 7bf2491a9c1f6ce8b4f2c1fc31b4dc8f46addaab..c4d5bf841a7f19bc727abbff423ed12b20666f29 100644 (file)
@@ -231,7 +231,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
                               int order)
 {
-       gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
+       gfp_t flags = GFP_KERNEL|__GFP_ZERO;
        struct page *p;
        int i;
 
index b2a2dff50b4e1983a89b9257de79417333c1f2b6..e7437ec627108a263f105ad042f6529201d1775c 100644 (file)
@@ -204,7 +204,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
        pte_t *pte;
 
-       pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return pte;
 }
 
@@ -212,7 +212,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        struct page *pte;
 
-       pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
index 2e02d1356fdfffd531305693399c378df7f1735d..26775793c204e77d09f6ed1e62a3edab97aa76a5 100644 (file)
@@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
 #define pgd_alloc(mm)                  get_pgd_slow(mm)
 #define pgd_free(mm, pgd)              free_pgd_slow(mm, pgd)
 
-#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
 
 /*
  * Allocate one PTE table.
index 0a7b885964baccd540f2c9e600dce014398bd987..d9a94da0c29fd72b048080681ab1660928a7d3f0 100644 (file)
@@ -2439,6 +2439,15 @@ config PCI_CNB20LE_QUIRK
 
 source "drivers/pci/Kconfig"
 
+config ISA_BUS
+       bool "ISA-style bus support on modern systems" if EXPERT
+       select ISA_BUS_API
+       help
+         Enables ISA-style drivers on modern systems. This is necessary to
+         support PC/104 devices on X86_64 platforms.
+
+         If unsure, say N.
+
 # x86_64 have no ISA slots, but can have ISA-style DMA.
 config ISA_DMA_API
        bool "ISA-style DMA support" if (X86_64 && EXPERT)
index 700a9c6e6159362d9b62631d124d6703a6554ba5..be8e688fa0d48b1fe4aa5180c6afa2c2fb4e84f1 100644 (file)
@@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage
        for i in lib lib64 share end ; do \
                if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
                        cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
+                       if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
+                               cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
+                       fi ; \
                        break ; \
                fi ; \
                if [ $$i = end ] ; then exit 1 ; fi ; \
index 99c4bab123cdae71b14ab92241d81dbf15616fe1..e30eef4f29a6f6034e46beab4a559f9994311c34 100644 (file)
@@ -714,7 +714,7 @@ static void cleanup_rapl_pmus(void)
        int i;
 
        for (i = 0; i < rapl_pmus->maxpkg; i++)
-               kfree(rapl_pmus->pmus + i);
+               kfree(rapl_pmus->pmus[i]);
        kfree(rapl_pmus);
 }
 
index b2625867ebd17543401cf6dbc5bf9e76fcd6ee6c..874e8bd64d1d54cdf662c5ba348f8872a0265e24 100644 (file)
@@ -2868,27 +2868,10 @@ static struct intel_uncore_type bdx_uncore_cbox = {
        .format_group           = &hswep_uncore_cbox_format_group,
 };
 
-static struct intel_uncore_type bdx_uncore_sbox = {
-       .name                   = "sbox",
-       .num_counters           = 4,
-       .num_boxes              = 4,
-       .perf_ctr_bits          = 48,
-       .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
-       .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
-       .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
-       .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
-       .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
-       .ops                    = &hswep_uncore_sbox_msr_ops,
-       .format_group           = &hswep_uncore_sbox_format_group,
-};
-
-#define BDX_MSR_UNCORE_SBOX    3
-
 static struct intel_uncore_type *bdx_msr_uncores[] = {
        &bdx_uncore_ubox,
        &bdx_uncore_cbox,
        &hswep_uncore_pcu,
-       &bdx_uncore_sbox,
        NULL,
 };
 
@@ -2897,10 +2880,6 @@ void bdx_uncore_cpu_init(void)
        if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
        uncore_msr_uncores = bdx_msr_uncores;
-
-       /* BDX-DE doesn't have SBOX */
-       if (boot_cpu_data.x86_model == 86)
-               uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
 }
 
 static struct intel_uncore_type bdx_uncore_ha = {
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
new file mode 100644 (file)
index 0000000..6999f7d
--- /dev/null
@@ -0,0 +1,68 @@
+#ifndef _ASM_X86_INTEL_FAMILY_H
+#define _ASM_X86_INTEL_FAMILY_H
+
+/*
+ * "Big Core" Processors (Branded as Core, Xeon, etc...)
+ *
+ * The "_X" parts are generally the EP and EX Xeons, or the
+ * "Extreme" ones, like Broadwell-E.
+ *
+ * Things ending in "2" are usually because we have no better
+ * name for them.  There's no processor called "WESTMERE2".
+ */
+
+#define INTEL_FAM6_CORE_YONAH          0x0E
+#define INTEL_FAM6_CORE2_MEROM         0x0F
+#define INTEL_FAM6_CORE2_MEROM_L       0x16
+#define INTEL_FAM6_CORE2_PENRYN                0x17
+#define INTEL_FAM6_CORE2_DUNNINGTON    0x1D
+
+#define INTEL_FAM6_NEHALEM             0x1E
+#define INTEL_FAM6_NEHALEM_EP          0x1A
+#define INTEL_FAM6_NEHALEM_EX          0x2E
+#define INTEL_FAM6_WESTMERE            0x25
+#define INTEL_FAM6_WESTMERE2           0x1F
+#define INTEL_FAM6_WESTMERE_EP         0x2C
+#define INTEL_FAM6_WESTMERE_EX         0x2F
+
+#define INTEL_FAM6_SANDYBRIDGE         0x2A
+#define INTEL_FAM6_SANDYBRIDGE_X       0x2D
+#define INTEL_FAM6_IVYBRIDGE           0x3A
+#define INTEL_FAM6_IVYBRIDGE_X         0x3E
+
+#define INTEL_FAM6_HASWELL_CORE                0x3C
+#define INTEL_FAM6_HASWELL_X           0x3F
+#define INTEL_FAM6_HASWELL_ULT         0x45
+#define INTEL_FAM6_HASWELL_GT3E                0x46
+
+#define INTEL_FAM6_BROADWELL_CORE      0x3D
+#define INTEL_FAM6_BROADWELL_XEON_D    0x56
+#define INTEL_FAM6_BROADWELL_GT3E      0x47
+#define INTEL_FAM6_BROADWELL_X         0x4F
+
+#define INTEL_FAM6_SKYLAKE_MOBILE      0x4E
+#define INTEL_FAM6_SKYLAKE_DESKTOP     0x5E
+#define INTEL_FAM6_SKYLAKE_X           0x55
+#define INTEL_FAM6_KABYLAKE_MOBILE     0x8E
+#define INTEL_FAM6_KABYLAKE_DESKTOP    0x9E
+
+/* "Small Core" Processors (Atom) */
+
+#define INTEL_FAM6_ATOM_PINEVIEW       0x1C
+#define INTEL_FAM6_ATOM_LINCROFT       0x26
+#define INTEL_FAM6_ATOM_PENWELL                0x27
+#define INTEL_FAM6_ATOM_CLOVERVIEW     0x35
+#define INTEL_FAM6_ATOM_CEDARVIEW      0x36
+#define INTEL_FAM6_ATOM_SILVERMONT1    0x37 /* BayTrail/BYT / Valleyview */
+#define INTEL_FAM6_ATOM_SILVERMONT2    0x4D /* Avaton/Rangely */
+#define INTEL_FAM6_ATOM_AIRMONT                0x4C /* CherryTrail / Braswell */
+#define INTEL_FAM6_ATOM_MERRIFIELD1    0x4A /* Tangier */
+#define INTEL_FAM6_ATOM_MERRIFIELD2    0x5A /* Annidale */
+#define INTEL_FAM6_ATOM_GOLDMONT       0x5C
+#define INTEL_FAM6_ATOM_DENVERTON      0x5F /* Goldmont Microserver */
+
+/* Xeon Phi */
+
+#define INTEL_FAM6_XEON_PHI_KNL                0x57 /* Knights Landing */
+
+#endif /* _ASM_X86_INTEL_FAMILY_H */
index 4421b5da409d6424d91eac6fc91231d8e92d3658..d1d1e5094c2844d31ff7d2028c950bd42e396312 100644 (file)
@@ -38,12 +38,11 @@ typedef u8 kprobe_opcode_t;
 #define RELATIVECALL_OPCODE 0xe8
 #define RELATIVE_ADDR_SIZE 4
 #define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR)                                          \
-       (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
-                             THREAD_SIZE - (unsigned long)(ADDR)))    \
-        ? (MAX_STACK_SIZE)                                            \
-        : (((unsigned long)current_thread_info()) +                   \
-           THREAD_SIZE - (unsigned long)(ADDR)))
+#define CUR_STACK_SIZE(ADDR) \
+       (current_top_of_stack() - (unsigned long)(ADDR))
+#define MIN_STACK_SIZE(ADDR)                           \
+       (MAX_STACK_SIZE < CUR_STACK_SIZE(ADDR) ?        \
+        MAX_STACK_SIZE : CUR_STACK_SIZE(ADDR))
 
 #define flush_insn_slot(p)     do { } while (0)
 
index e0fbe7e70dc19396ddebf638f6b126b3863b4442..69e62862b6222e4f3ec3a90f205cdf176ec983b3 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/irqbypass.h>
 #include <linux/hyperv.h>
 
+#include <asm/apic.h>
 #include <asm/pvclock-abi.h>
 #include <asm/desc.h>
 #include <asm/mtrr.h>
@@ -1368,4 +1369,14 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
 
 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 
+static inline int kvm_cpu_get_apicid(int mps_cpu)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+       return __default_cpu_present_to_apicid(mps_cpu);
+#else
+       WARN_ON_ONCE(1);
+       return BAD_APICID;
+#endif
+}
+
 #endif /* _ASM_X86_KVM_HOST_H */
index 7dc1d8fef7fdec6a633f5cd3f7ada4d2ba09e8fe..b5fee97813cdf8d1408aa7ac874b21593eef8121 100644 (file)
@@ -122,7 +122,7 @@ notrace static inline void native_write_msr(unsigned int msr,
                     "2:\n"
                     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
                     : : "c" (msr), "a"(low), "d" (high) : "memory");
-       if (msr_tracepoint_active(__tracepoint_read_msr))
+       if (msr_tracepoint_active(__tracepoint_write_msr))
                do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
 }
 
@@ -141,7 +141,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
                     : "c" (msr), "0" (low), "d" (high),
                       [fault] "i" (-EIO)
                     : "memory");
-       if (msr_tracepoint_active(__tracepoint_read_msr))
+       if (msr_tracepoint_active(__tracepoint_write_msr))
                do_trace_write_msr(msr, ((u64)high << 32 | low), err);
        return err;
 }
index bf7f8b55b0f9d8940d839b695466d32999078c10..574c23cf761ac97b50f6042cd86cc58ef1575add 100644 (file)
@@ -81,7 +81,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
        struct page *page;
-       page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+       page = alloc_pages(GFP_KERNEL |  __GFP_ZERO, 0);
        if (!page)
                return NULL;
        if (!pgtable_pmd_page_ctor(page)) {
@@ -125,7 +125,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+       return (pud_t *)get_zeroed_page(GFP_KERNEL);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
index 7c247e7404be778e06ebff15f020dbd792fb7be0..0944218af9e279d631af9f03506a9672c4443c6d 100644 (file)
@@ -14,7 +14,7 @@ extern int kstack_depth_to_print;
 struct thread_info;
 struct stacktrace_ops;
 
-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+typedef unsigned long (*walk_stack_t)(struct task_struct *task,
                                      unsigned long *stack,
                                      unsigned long bp,
                                      const struct stacktrace_ops *ops,
@@ -23,13 +23,13 @@ typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
                                      int *graph);
 
 extern unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
                    unsigned long *stack, unsigned long bp,
                    const struct stacktrace_ops *ops, void *data,
                    unsigned long *end, int *graph);
 
 extern unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
                       unsigned long *stack, unsigned long bp,
                       const struct stacktrace_ops *ops, void *data,
                       unsigned long *end, int *graph);
index 84e33ff5a6d595693c3718477f168f20d5546316..446702ed99dca3edf5a8a48cccb6c2475ae9e85f 100644 (file)
@@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void)
                res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
                snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
                mem += IOAPIC_RESOURCE_NAME_SIZE;
+               ioapics[i].iomem_res = &res[num];
                num++;
-               ioapics[i].iomem_res = res;
        }
 
        ioapic_resources = res;
index c343a54bed396d2f924d4ed6d8e5ec2188c636b0..f5c69d8974e176e44a995bd6f91512c7d030b001 100644 (file)
@@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
        u64 value;
 
        /* re-enable TopologyExtensions if switched off by BIOS */
-       if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
+       if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
            !cpu_has(c, X86_FEATURE_TOPOEXT)) {
 
                if (msr_set_bit(0xc0011005, 54) > 0) {
                        rdmsrl(0xc0011005, value);
                        if (value & BIT_64(54)) {
                                set_cpu_cap(c, X86_FEATURE_TOPOEXT);
-                               pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+                               pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
                        }
                }
        }
index 2bb25c3fe2e8e9c6933ab37c99a532f8a36e47fa..ef8017ca5ba9dab6c59e51ae91b8c353ddaa00da 100644 (file)
@@ -42,16 +42,14 @@ void printk_address(unsigned long address)
 static void
 print_ftrace_graph_addr(unsigned long addr, void *data,
                        const struct stacktrace_ops *ops,
-                       struct thread_info *tinfo, int *graph)
+                       struct task_struct *task, int *graph)
 {
-       struct task_struct *task;
        unsigned long ret_addr;
        int index;
 
        if (addr != (unsigned long)return_to_handler)
                return;
 
-       task = tinfo->task;
        index = task->curr_ret_stack;
 
        if (!task->ret_stack || index < *graph)
@@ -68,7 +66,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
 static inline void
 print_ftrace_graph_addr(unsigned long addr, void *data,
                        const struct stacktrace_ops *ops,
-                       struct thread_info *tinfo, int *graph)
+                       struct task_struct *task, int *graph)
 { }
 #endif
 
@@ -79,10 +77,10 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
  */
 
-static inline int valid_stack_ptr(struct thread_info *tinfo,
+static inline int valid_stack_ptr(struct task_struct *task,
                        void *p, unsigned int size, void *end)
 {
-       void *t = tinfo;
+       void *t = task_stack_page(task);
        if (end) {
                if (p < end && p >= (end-THREAD_SIZE))
                        return 1;
@@ -93,14 +91,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
 }
 
 unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
                unsigned long *stack, unsigned long bp,
                const struct stacktrace_ops *ops, void *data,
                unsigned long *end, int *graph)
 {
        struct stack_frame *frame = (struct stack_frame *)bp;
 
-       while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
+       while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
                unsigned long addr;
 
                addr = *stack;
@@ -112,7 +110,7 @@ print_context_stack(struct thread_info *tinfo,
                        } else {
                                ops->address(data, addr, 0);
                        }
-                       print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+                       print_ftrace_graph_addr(addr, data, ops, task, graph);
                }
                stack++;
        }
@@ -121,7 +119,7 @@ print_context_stack(struct thread_info *tinfo,
 EXPORT_SYMBOL_GPL(print_context_stack);
 
 unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
                       unsigned long *stack, unsigned long bp,
                       const struct stacktrace_ops *ops, void *data,
                       unsigned long *end, int *graph)
@@ -129,7 +127,7 @@ print_context_stack_bp(struct thread_info *tinfo,
        struct stack_frame *frame = (struct stack_frame *)bp;
        unsigned long *ret_addr = &frame->return_address;
 
-       while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+       while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
                unsigned long addr = *ret_addr;
 
                if (!__kernel_text_address(addr))
@@ -139,7 +137,7 @@ print_context_stack_bp(struct thread_info *tinfo,
                        break;
                frame = frame->next_frame;
                ret_addr = &frame->return_address;
-               print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+               print_ftrace_graph_addr(addr, data, ops, task, graph);
        }
 
        return (unsigned long)frame;
index 464ffd69b92e9ef376b9c534aec3c12973d6ad7a..fef917e79b9d614eeaca609c9c03e980bae2272f 100644 (file)
@@ -61,15 +61,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                bp = stack_frame(task, regs);
 
        for (;;) {
-               struct thread_info *context;
                void *end_stack;
 
                end_stack = is_hardirq_stack(stack, cpu);
                if (!end_stack)
                        end_stack = is_softirq_stack(stack, cpu);
 
-               context = task_thread_info(task);
-               bp = ops->walk_stack(context, stack, bp, ops, data,
+               bp = ops->walk_stack(task, stack, bp, ops, data,
                                     end_stack, &graph);
 
                /* Stop if not on irq stack */
index 5f1c6266eb3028579f5036161dc240387dcb124f..d558a8a49016b2c6eccb50f3685cc136fdc621ad 100644 (file)
@@ -153,7 +153,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                const struct stacktrace_ops *ops, void *data)
 {
        const unsigned cpu = get_cpu();
-       struct thread_info *tinfo;
        unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
        unsigned long dummy;
        unsigned used = 0;
@@ -179,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
         * current stack address. If the stacks consist of nested
         * exceptions
         */
-       tinfo = task_thread_info(task);
        while (!done) {
                unsigned long *stack_end;
                enum stack_type stype;
@@ -202,7 +200,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                        if (ops->stack(data, id) < 0)
                                break;
 
-                       bp = ops->walk_stack(tinfo, stack, bp, ops,
+                       bp = ops->walk_stack(task, stack, bp, ops,
                                             data, stack_end, &graph);
                        ops->stack(data, "<EOE>");
                        /*
@@ -218,7 +216,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 
                        if (ops->stack(data, "IRQ") < 0)
                                break;
-                       bp = ops->walk_stack(tinfo, stack, bp,
+                       bp = ops->walk_stack(task, stack, bp,
                                     ops, data, stack_end, &graph);
                        /*
                         * We link to the next stack (which would be
@@ -240,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
        /*
         * This handles the process stack:
         */
-       bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
+       bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
        put_cpu();
 }
 EXPORT_SYMBOL(dump_trace);
index 4d38416e2a7fd0625488d635dc5452ffb651c115..04f89caef9c4926c40092aed725db848adc2379e 100644 (file)
@@ -57,7 +57,7 @@
 # error "Need more than one PGD for the ESPFIX hack"
 #endif
 
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
 
 /* This contains the *bottom* address of the espfix stack */
 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
index 38da8f29a9c81f075d4b91fc6982e28ae2f28eee..c627bf8d98adf8377a351f1cbbe98aa8e29fe7f1 100644 (file)
@@ -130,11 +130,9 @@ void irq_ctx_init(int cpu)
 
 void do_softirq_own_stack(void)
 {
-       struct thread_info *curstk;
        struct irq_stack *irqstk;
        u32 *isp, *prev_esp;
 
-       curstk = current_stack();
        irqstk = __this_cpu_read(softirq_stack);
 
        /* build the stack frame on the softirq stack */
index 38cf7a7412503f513bb1aae1c1acc5b76e7d2e1d..7847e5c0e0b5d0ce9af2cd47274b52bab7f2597a 100644 (file)
@@ -961,7 +961,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
                 * normal page fault.
                 */
                regs->ip = (unsigned long)cur->addr;
+               /*
+                * Trap flag (TF) has been set here because this fault
+                * happened where the single stepping will be done.
+                * So clear it by resetting the current kprobe:
+                */
+               regs->flags &= ~X86_EFLAGS_TF;
+
+               /*
+                * If the TF flag was set before the kprobe hit,
+                * don't touch it:
+                */
                regs->flags |= kcb->kprobe_old_flags;
+
                if (kcb->kprobe_status == KPROBE_REENTER)
                        restore_previous_kprobe(kcb);
                else
index d1590486204a1bb52974c27793dc66139f20c4ee..00f03d82e69acfbb7c90a7d8d859516088a43e6d 100644 (file)
@@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
                local_irq_disable();
 }
 
+/*
+ * In IST context, we explicitly disable preemption.  This serves two
+ * purposes: it makes it much less likely that we would accidentally
+ * schedule in IST context and it will force a warning if we somehow
+ * manage to schedule by accident.
+ */
 void ist_enter(struct pt_regs *regs)
 {
        if (user_mode(regs)) {
@@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs)
                rcu_nmi_enter();
        }
 
-       /*
-        * We are atomic because we're on the IST stack; or we're on
-        * x86_32, in which case we still shouldn't schedule; or we're
-        * on x86_64 and entered from user mode, in which case we're
-        * still atomic unless ist_begin_non_atomic is called.
-        */
-       preempt_count_add(HARDIRQ_OFFSET);
+       preempt_disable();
 
        /* This code is a bit fragile.  Test it. */
        RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
@@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs)
 
 void ist_exit(struct pt_regs *regs)
 {
-       preempt_count_sub(HARDIRQ_OFFSET);
+       preempt_enable_no_resched();
 
        if (!user_mode(regs))
                rcu_nmi_exit();
@@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
        BUG_ON((unsigned long)(current_top_of_stack() -
                               current_stack_pointer()) >= THREAD_SIZE);
 
-       preempt_count_sub(HARDIRQ_OFFSET);
+       preempt_enable_no_resched();
 }
 
 /**
@@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
  */
 void ist_end_non_atomic(void)
 {
-       preempt_count_add(HARDIRQ_OFFSET);
+       preempt_disable();
 }
 
 static nokprobe_inline int
index 1163e8173e5a71b26d77f9ffde3612bddefc76aa..16ef31b874522143e275591eb93c9705df83c873 100644 (file)
@@ -238,7 +238,9 @@ module_param(nested, int, S_IRUGO);
 
 /* enable / disable AVIC */
 static int avic;
+#ifdef CONFIG_X86_LOCAL_APIC
 module_param(avic, int, S_IRUGO);
+#endif
 
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
@@ -981,11 +983,14 @@ static __init int svm_hardware_setup(void)
        } else
                kvm_disable_tdp();
 
-       if (avic && (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)))
-               avic = false;
-
-       if (avic)
-               pr_info("AVIC enabled\n");
+       if (avic) {
+               if (!npt_enabled ||
+                   !boot_cpu_has(X86_FEATURE_AVIC) ||
+                   !IS_ENABLED(CONFIG_X86_LOCAL_APIC))
+                       avic = false;
+               else
+                       pr_info("AVIC enabled\n");
+       }
 
        return 0;
 
@@ -1324,7 +1329,7 @@ static int avic_vm_init(struct kvm *kvm)
 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
 {
        u64 entry;
-       int h_physical_id = __default_cpu_present_to_apicid(vcpu->cpu);
+       int h_physical_id = kvm_cpu_get_apicid(vcpu->cpu);
        struct vcpu_svm *svm = to_svm(vcpu);
 
        if (!kvm_vcpu_apicv_active(vcpu))
@@ -1349,7 +1354,7 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        u64 entry;
        /* ID = 0xff (broadcast), ID > 0xff (reserved) */
-       int h_physical_id = __default_cpu_present_to_apicid(cpu);
+       int h_physical_id = kvm_cpu_get_apicid(cpu);
        struct vcpu_svm *svm = to_svm(vcpu);
 
        if (!kvm_vcpu_apicv_active(vcpu))
@@ -4236,7 +4241,7 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
 
        if (avic_vcpu_is_running(vcpu))
                wrmsrl(SVM_AVIC_DOORBELL,
-                      __default_cpu_present_to_apicid(vcpu->cpu));
+                      kvm_cpu_get_apicid(vcpu->cpu));
        else
                kvm_vcpu_wake_up(vcpu);
 }
index fb93010beaa4df8b61fb4b654d889b1dcaa0990d..003618e324ce4bd584458ffbf3b5de6d32635d4e 100644 (file)
@@ -2072,7 +2072,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
        unsigned int dest;
 
        if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+               !kvm_vcpu_apicv_active(vcpu))
                return;
 
        do {
@@ -2180,7 +2181,8 @@ static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
        if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+               !kvm_vcpu_apicv_active(vcpu))
                return;
 
        /* Set SN when the vCPU is preempted */
@@ -10714,7 +10716,8 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
        if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+               !kvm_vcpu_apicv_active(vcpu))
                return 0;
 
        vcpu->pre_pcpu = vcpu->cpu;
@@ -10780,7 +10783,8 @@ static void vmx_post_block(struct kvm_vcpu *vcpu)
        unsigned long flags;
 
        if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+               !kvm_vcpu_apicv_active(vcpu))
                return;
 
        do {
@@ -10833,7 +10837,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
        int idx, ret = -EINVAL;
 
        if (!kvm_arch_has_assigned_device(kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP) ||
+               !kvm_vcpu_apicv_active(kvm->vcpus[0]))
                return 0;
 
        idx = srcu_read_lock(&kvm->irq_srcu);
index 4eb287e25043ed17639832a1d6b3f71b528ab69e..aa0ff4b02a96e230ed17052f9ed961950371753b 100644 (file)
@@ -6,7 +6,7 @@
 #include <asm/fixmap.h>
 #include <asm/mtrr.h>
 
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
 
 #ifdef CONFIG_HIGHPTE
 #define PGALLOC_USER_GFP __GFP_HIGHMEM
index 6e7242be1c8744b89af4a96587fb79d3a387e77a..b226b3f497f1c75a465fa73f5171a695ea5c9e95 100644 (file)
@@ -139,7 +139,7 @@ int __init efi_alloc_page_tables(void)
        if (efi_enabled(EFI_OLD_MEMMAP))
                return 0;
 
-       gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
+       gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
        efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
        if (!efi_pgd)
                return -ENOMEM;
index 478a2de543a59ea797515be2f8145a503277778f..67433714b791d6f49eb954a30b092c2121cc66c5 100644 (file)
@@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
 
        /* NOTE: The loop is more greedy than the cleanup_highmap variant.
         * We include the PMD passed in on _both_ boundaries. */
-       for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
+       for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
                        pmd++, vaddr += PMD_SIZE) {
                if (pmd_none(*pmd))
                        continue;
@@ -1551,41 +1551,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
 #endif
 }
 
-#ifdef CONFIG_X86_32
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
-{
-       /* If there's an existing pte, then don't allow _PAGE_RW to be set */
-       if (pte_val_ma(*ptep) & _PAGE_PRESENT)
-               pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
-                              pte_val_ma(pte));
-
-       return pte;
-}
-#else /* CONFIG_X86_64 */
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
-{
-       unsigned long pfn;
-
-       if (xen_feature(XENFEAT_writable_page_tables) ||
-           xen_feature(XENFEAT_auto_translated_physmap) ||
-           xen_start_info->mfn_list >= __START_KERNEL_map)
-               return pte;
-
-       /*
-        * Pages belonging to the initial p2m list mapped outside the default
-        * address range must be mapped read-only. This region contains the
-        * page tables for mapping the p2m list, too, and page tables MUST be
-        * mapped read-only.
-        */
-       pfn = pte_pfn(pte);
-       if (pfn >= xen_start_info->first_p2m_pfn &&
-           pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
-               pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
-
-       return pte;
-}
-#endif /* CONFIG_X86_64 */
-
 /*
  * Init-time set_pte while constructing initial pagetables, which
  * doesn't allow RO page table pages to be remapped RW.
@@ -1600,13 +1565,37 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
  * so always write the PTE directly and rely on Xen trapping and
  * emulating any updates as necessary.
  */
-static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+__visible pte_t xen_make_pte_init(pteval_t pte)
 {
-       if (pte_mfn(pte) != INVALID_P2M_ENTRY)
-               pte = mask_rw_pte(ptep, pte);
-       else
-               pte = __pte_ma(0);
+#ifdef CONFIG_X86_64
+       unsigned long pfn;
+
+       /*
+        * Pages belonging to the initial p2m list mapped outside the default
+        * address range must be mapped read-only. This region contains the
+        * page tables for mapping the p2m list, too, and page tables MUST be
+        * mapped read-only.
+        */
+       pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
+       if (xen_start_info->mfn_list < __START_KERNEL_map &&
+           pfn >= xen_start_info->first_p2m_pfn &&
+           pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
+               pte &= ~_PAGE_RW;
+#endif
+       pte = pte_pfn_to_mfn(pte);
+       return native_make_pte(pte);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
 
+static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+#ifdef CONFIG_X86_32
+       /* If there's an existing pte, then don't allow _PAGE_RW to be set */
+       if (pte_mfn(pte) != INVALID_P2M_ENTRY
+           && pte_val_ma(*ptep) & _PAGE_PRESENT)
+               pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+                              pte_val_ma(pte));
+#endif
        native_set_pte(ptep, pte);
 }
 
@@ -2407,6 +2396,7 @@ static void __init xen_post_allocator_init(void)
        pv_mmu_ops.alloc_pud = xen_alloc_pud;
        pv_mmu_ops.release_pud = xen_release_pud;
 #endif
+       pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
 
 #ifdef CONFIG_X86_64
        pv_mmu_ops.write_cr3 = &xen_write_cr3;
@@ -2455,7 +2445,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
        .pte_val = PV_CALLEE_SAVE(xen_pte_val),
        .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
 
-       .make_pte = PV_CALLEE_SAVE(xen_make_pte),
+       .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
        .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
 
 #ifdef CONFIG_X86_PAE
index cab9f766bb06b28b8f0f8f35583e70ae50adb30e..dd2a49a8aacc6b1dfaeac32980c94c71af991ed9 100644 (file)
@@ -182,7 +182,7 @@ static void * __ref alloc_p2m_page(void)
        if (unlikely(!slab_is_available()))
                return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
 
-       return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+       return (void *)__get_free_page(GFP_KERNEL);
 }
 
 static void __ref free_p2m_page(void *p)
index d38eb9237e645cf5affd4964d8d289abb1a2ee4f..1065bc8bcae56bba87f8d1a55b2c5155d489777d 100644 (file)
@@ -44,7 +44,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
        pte_t *ptep;
        int i;
 
-       ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       ptep = (pte_t *)__get_free_page(GFP_KERNEL);
        if (!ptep)
                return NULL;
        for (i = 0; i < 1024; i++)
index 23d7f301a1967483ec79a383a1a317881caf3358..9e29dc35169560a223ae7eea9dffc80da15fc7f9 100644 (file)
@@ -113,6 +113,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                ret = submit_bio_wait(type, bio);
                if (ret == -EOPNOTSUPP)
                        ret = 0;
+               bio_put(bio);
        }
        blk_finish_plug(&plug);
 
@@ -165,8 +166,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                }
        }
 
-       if (bio)
+       if (bio) {
                ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+               bio_put(bio);
+       }
        return ret != -EOPNOTSUPP ? ret : 0;
 }
 EXPORT_SYMBOL(blkdev_issue_write_same);
@@ -206,8 +209,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                }
        }
 
-       if (bio)
-               return submit_bio_wait(WRITE, bio);
+       if (bio) {
+               ret = submit_bio_wait(WRITE, bio);
+               bio_put(bio);
+               return ret;
+       }
        return 0;
 }
 
index 29cbc1b5fbdba0ee89874c662ab441b2a817e177..f9b9049b1284cc8adf65c1eed611e4c09d2d6584 100644 (file)
@@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_split(q, &bio, q->bio_split);
 
-       if (!is_flush_fua && !blk_queue_nomerges(q)) {
-               if (blk_attempt_plug_merge(q, bio, &request_count,
-                                          &same_queue_rq))
-                       return BLK_QC_T_NONE;
-       } else
-               request_count = blk_plug_queued_count(q);
+       if (!is_flush_fua && !blk_queue_nomerges(q) &&
+           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
+               return BLK_QC_T_NONE;
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
@@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_split(q, &bio, q->bio_split);
 
-       if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, NULL))
-               return BLK_QC_T_NONE;
+       if (!is_flush_fua && !blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+                       return BLK_QC_T_NONE;
+       } else
+               request_count = blk_plug_queued_count(q);
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
index a1d177d58254cd8e201836481a51b809ec98c628..21932d640a41f6f5b060b8fcb64fbc2d9ac131ab 100644 (file)
@@ -108,7 +108,9 @@ acpi_ex_add_table(u32 table_index,
 
        /* Add the table to the namespace */
 
+       acpi_ex_exit_interpreter();
        status = acpi_ns_load_table(table_index, parent_node);
+       acpi_ex_enter_interpreter();
        if (ACPI_FAILURE(status)) {
                acpi_ut_remove_reference(obj_desc);
                *ddb_handle = NULL;
index daceb80022b023da04354bb36300caf13cc40a2a..3b7fb99362b6945ecc5ebe3f192405718ecd33fb 100644 (file)
@@ -306,12 +306,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
 acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
 {
        u64 address;
-       u8 access_width;
-       u32 bit_width;
-       u8 bit_offset;
-       u64 value64;
-       u32 new_value32, old_value32;
-       u8 index;
        acpi_status status;
 
        ACPI_FUNCTION_NAME(hw_write);
@@ -323,145 +317,23 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
                return (status);
        }
 
-       /* Convert access_width into number of bits based */
-
-       access_width = acpi_hw_get_access_bit_width(reg, 32);
-       bit_width = reg->bit_offset + reg->bit_width;
-       bit_offset = reg->bit_offset;
-
        /*
         * Two address spaces supported: Memory or IO. PCI_Config is
         * not supported here because the GAS structure is insufficient
         */
-       index = 0;
-       while (bit_width) {
-               /*
-                * Use offset style bit reads because "Index * AccessWidth" is
-                * ensured to be less than 32-bits by acpi_hw_validate_register().
-                */
-               new_value32 = ACPI_GET_BITS(&value, index * access_width,
-                                           ACPI_MASK_BITS_ABOVE_32
-                                           (access_width));
-
-               if (bit_offset >= access_width) {
-                       bit_offset -= access_width;
-               } else {
-                       /*
-                        * Use offset style bit masks because access_width is ensured
-                        * to be less than 32-bits by acpi_hw_validate_register() and
-                        * bit_offset/bit_width is less than access_width here.
-                        */
-                       if (bit_offset) {
-                               new_value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
-                       }
-                       if (bit_width < access_width) {
-                               new_value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
-                       }
-
-                       if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
-                               if (bit_offset || bit_width < access_width) {
-                                       /*
-                                        * Read old values in order not to modify the bits that
-                                        * are beyond the register bit_width/bit_offset setting.
-                                        */
-                                       status =
-                                           acpi_os_read_memory((acpi_physical_address)
-                                                               address +
-                                                               index *
-                                                               ACPI_DIV_8
-                                                               (access_width),
-                                                               &value64,
-                                                               access_width);
-                                       old_value32 = (u32)value64;
-
-                                       /*
-                                        * Use offset style bit masks because access_width is
-                                        * ensured to be less than 32-bits by
-                                        * acpi_hw_validate_register() and bit_offset/bit_width is
-                                        * less than access_width here.
-                                        */
-                                       if (bit_offset) {
-                                               old_value32 &=
-                                                   ACPI_MASK_BITS_ABOVE
-                                                   (bit_offset);
-                                               bit_offset = 0;
-                                       }
-                                       if (bit_width < access_width) {
-                                               old_value32 &=
-                                                   ACPI_MASK_BITS_BELOW
-                                                   (bit_width);
-                                       }
-
-                                       new_value32 |= old_value32;
-                               }
-
-                               value64 = (u64)new_value32;
-                               status =
-                                   acpi_os_write_memory((acpi_physical_address)
-                                                        address +
-                                                        index *
-                                                        ACPI_DIV_8
-                                                        (access_width),
-                                                        value64, access_width);
-                       } else {        /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
-
-                               if (bit_offset || bit_width < access_width) {
-                                       /*
-                                        * Read old values in order not to modify the bits that
-                                        * are beyond the register bit_width/bit_offset setting.
-                                        */
-                                       status =
-                                           acpi_hw_read_port((acpi_io_address)
-                                                             address +
-                                                             index *
-                                                             ACPI_DIV_8
-                                                             (access_width),
-                                                             &old_value32,
-                                                             access_width);
-
-                                       /*
-                                        * Use offset style bit masks because access_width is
-                                        * ensured to be less than 32-bits by
-                                        * acpi_hw_validate_register() and bit_offset/bit_width is
-                                        * less than access_width here.
-                                        */
-                                       if (bit_offset) {
-                                               old_value32 &=
-                                                   ACPI_MASK_BITS_ABOVE
-                                                   (bit_offset);
-                                               bit_offset = 0;
-                                       }
-                                       if (bit_width < access_width) {
-                                               old_value32 &=
-                                                   ACPI_MASK_BITS_BELOW
-                                                   (bit_width);
-                                       }
-
-                                       new_value32 |= old_value32;
-                               }
-
-                               status = acpi_hw_write_port((acpi_io_address)
-                                                           address +
-                                                           index *
-                                                           ACPI_DIV_8
-                                                           (access_width),
-                                                           new_value32,
-                                                           access_width);
-                       }
-               }
-
-               /*
-                * Index * access_width is ensured to be less than 32-bits by
-                * acpi_hw_validate_register().
-                */
-               bit_width -=
-                   bit_width > access_width ? access_width : bit_width;
-               index++;
+       if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+               status = acpi_os_write_memory((acpi_physical_address)
+                                             address, (u64)value,
+                                             reg->bit_width);
+       } else {                /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+
+               status = acpi_hw_write_port((acpi_io_address)
+                                           address, value, reg->bit_width);
        }
 
        ACPI_DEBUG_PRINT((ACPI_DB_IO,
                          "Wrote: %8.8X width %2d   to %8.8X%8.8X (%s)\n",
-                         value, access_width, ACPI_FORMAT_UINT64(address),
+                         value, reg->bit_width, ACPI_FORMAT_UINT64(address),
                          acpi_ut_get_region_name(reg->space_id)));
 
        return (status);
index f631a47724f05332644a54e290d9667b4b326010..1783cd7e14467b8f43e9f087884b3b847e6b5492 100644 (file)
@@ -47,6 +47,7 @@
 #include "acparser.h"
 #include "acdispat.h"
 #include "actables.h"
+#include "acinterp.h"
 
 #define _COMPONENT          ACPI_NAMESPACE
 ACPI_MODULE_NAME("nsparse")
@@ -170,6 +171,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
 
        ACPI_FUNCTION_TRACE(ns_parse_table);
 
+       acpi_ex_enter_interpreter();
+
        /*
         * AML Parse, pass 1
         *
@@ -185,7 +188,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
        status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1,
                                            table_index, start_node);
        if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
+               goto error_exit;
        }
 
        /*
@@ -201,8 +204,10 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
        status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2,
                                            table_index, start_node);
        if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
+               goto error_exit;
        }
 
+error_exit:
+       acpi_ex_exit_interpreter();
        return_ACPI_STATUS(status);
 }
index 31e8da648fffb9b6db10cda340bcc394f3a95994..262ca31b86d9311c05bd840c7fb8bb0aa3b09b34 100644 (file)
@@ -1051,7 +1051,7 @@ static int __init acpi_bus_init(void)
         * Maybe EC region is required at bus_scan/acpi_get_devices. So it
         * is necessary to enable it as early as possible.
         */
-       acpi_boot_ec_enable();
+       acpi_ec_dsdt_probe();
 
        printk(KERN_INFO PREFIX "Interpreter enabled\n");
 
index 0e70181f150c9eae5e00b8f9a24a41382ee41bbc..73c76d646064700dc1bb51905b4dbc70b415aabf 100644 (file)
@@ -1446,10 +1446,30 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
        return AE_OK;
 }
 
-int __init acpi_boot_ec_enable(void)
+static const struct acpi_device_id ec_device_ids[] = {
+       {"PNP0C09", 0},
+       {"", 0},
+};
+
+int __init acpi_ec_dsdt_probe(void)
 {
-       if (!boot_ec)
+       acpi_status status;
+
+       if (boot_ec)
                return 0;
+
+       /*
+        * Finding EC from DSDT if there is no ECDT EC available. When this
+        * function is invoked, ACPI tables have been fully loaded, we can
+        * walk namespace now.
+        */
+       boot_ec = make_acpi_ec();
+       if (!boot_ec)
+               return -ENOMEM;
+       status = acpi_get_devices(ec_device_ids[0].id,
+                                 ec_parse_device, boot_ec, NULL);
+       if (ACPI_FAILURE(status) || !boot_ec->handle)
+               return -ENODEV;
        if (!ec_install_handlers(boot_ec)) {
                first_ec = boot_ec;
                return 0;
@@ -1457,11 +1477,6 @@ int __init acpi_boot_ec_enable(void)
        return -EFAULT;
 }
 
-static const struct acpi_device_id ec_device_ids[] = {
-       {"PNP0C09", 0},
-       {"", 0},
-};
-
 #if 0
 /*
  * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
index 9bb0773d39bfd14f87f0e8dbfe5a1ed31b54162d..27cc7feabfe4322dfa578c2e9af8464e4f4feaf7 100644 (file)
@@ -181,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data);
 
 int acpi_ec_init(void);
 int acpi_ec_ecdt_probe(void);
-int acpi_boot_ec_enable(void);
+int acpi_ec_dsdt_probe(void);
 void acpi_ec_block_transactions(void);
 void acpi_ec_unblock_transactions(void);
 void acpi_ec_unblock_transactions_early(void);
index 61dc7a99e89a05e64395d1186913685667c8afdb..c6f0174589581e3ac4e07dd791eb6c1604eca219 100644 (file)
@@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host)
        ata_scsi_port_error_handler(host, ap);
 
        /* finish or retry handled scmd's and clean up */
-       WARN_ON(host->host_failed || !list_empty(&eh_work_q));
+       WARN_ON(!list_empty(&eh_work_q));
 
        DPRINTK("EXIT\n");
 }
index 6b2a84e7f2bece5643c2b68a17254067213b99ce..2609ba20b39622b1268323cd360171a3e545cbc1 100644 (file)
@@ -10,7 +10,7 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
 obj-y                  += power/
 obj-$(CONFIG_HAS_DMA)  += dma-mapping.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
-obj-$(CONFIG_ISA)      += isa.o
+obj-$(CONFIG_ISA_BUS_API)      += isa.o
 obj-$(CONFIG_FW_LOADER)        += firmware_class.o
 obj-$(CONFIG_NUMA)     += node.o
 obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
index 91dba65d72645509b74e289a04402af7696009b4..cd6ccdcf9df0c5ef2a37fabb2b4b7bbfa3c88755 100644 (file)
@@ -180,4 +180,4 @@ static int __init isa_bus_init(void)
        return error;
 }
 
-device_initcall(isa_bus_init);
+postcore_initcall(isa_bus_init);
index db930d3ee31259f6a17b9dd33eb16caee4e12f5c..2a215780eda25c7c33ac44c136c4fadd36238ab8 100644 (file)
@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
 
 static void module_create_drivers_dir(struct module_kobject *mk)
 {
-       if (!mk || mk->drivers_dir)
-               return;
+       static DEFINE_MUTEX(drivers_dir_mutex);
 
-       mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
+       mutex_lock(&drivers_dir_mutex);
+       if (mk && !mk->drivers_dir)
+               mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
+       mutex_unlock(&drivers_dir_mutex);
 }
 
 void module_add_driver(struct module *mod, struct device_driver *drv)
index 83d6e7ba1a343db48a3de678a3ba5b7df973c221..8c3434bdb26dee8c23a637798b18a3963666ee70 100644 (file)
@@ -211,7 +211,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
                }
 
                /* Mark opp-table as multiple CPUs are sharing it now */
-               opp_table->shared_opp = true;
+               opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
        }
 unlock:
        mutex_unlock(&opp_table_lock);
@@ -227,7 +227,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
  *
  * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
  *
- * Returns -ENODEV if OPP table isn't already present.
+ * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
+ * table's status is access-unknown.
  *
  * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
@@ -249,9 +250,14 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
                goto unlock;
        }
 
+       if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
        cpumask_clear(cpumask);
 
-       if (opp_table->shared_opp) {
+       if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
                list_for_each_entry(opp_dev, &opp_table->dev_list, node)
                        cpumask_set_cpu(opp_dev->dev->id, cpumask);
        } else {
index 94d2010558e363f7991c89e9c0e3037822b761ae..1dfd3dd926241d9e2c9514c99d78b7bb736b0260 100644 (file)
@@ -34,7 +34,10 @@ static struct opp_table *_managed_opp(const struct device_node *np)
                         * But the OPPs will be considered as shared only if the
                         * OPP table contains a "opp-shared" property.
                         */
-                       return opp_table->shared_opp ? opp_table : NULL;
+                       if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
+                               return opp_table;
+
+                       return NULL;
                }
        }
 
@@ -353,7 +356,10 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
        }
 
        opp_table->np = opp_np;
-       opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+       if (of_property_read_bool(opp_np, "opp-shared"))
+               opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
+       else
+               opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
 
        mutex_unlock(&opp_table_lock);
 
index 20f3be22e060f38761c9f949d776b33e7de756f0..fabd5ca1a08394b05a1b75cd528ad4f66b58ecf7 100644 (file)
@@ -119,6 +119,12 @@ struct opp_device {
 #endif
 };
 
+enum opp_table_access {
+       OPP_TABLE_ACCESS_UNKNOWN = 0,
+       OPP_TABLE_ACCESS_EXCLUSIVE = 1,
+       OPP_TABLE_ACCESS_SHARED = 2,
+};
+
 /**
  * struct opp_table - Device opp structure
  * @node:      table node - contains the devices with OPPs that
@@ -166,7 +172,7 @@ struct opp_table {
        /* For backward compatibility with v1 bindings */
        unsigned int voltage_tolerance_v1;
 
-       bool shared_opp;
+       enum opp_table_access shared_opp;
        struct dev_pm_opp *suspend_opp;
 
        unsigned int *supported_hw;
index d597e432e195305906d357e113b90659cbdadbd6..ab19adb07a126ae0cae4f0d37170a2929367b115 100644 (file)
@@ -1750,7 +1750,7 @@ aoecmd_init(void)
        int ret;
 
        /* get_zeroed_page returns page with ref count 1 */
-       p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+       p = (void *) get_zeroed_page(GFP_KERNEL);
        if (!p)
                return -ENOMEM;
        empty_page = virt_to_page(p);
index 31e73a7a40f20eb6de0337c79aade131c4b70df4..6a48ed41963ff9d215665b1423d20567fc50ee06 100644 (file)
@@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
        debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
        debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
        debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
-       debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
+       debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
 
        return 0;
 }
index ca13df8546396cf6a7dd72c3a288f27623681baf..2e6d1e9c3345f190b01f9d9caf8d27e6f5f8b9ce 100644 (file)
@@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
                          const struct blk_mq_queue_data *qd)
 {
        unsigned long flags;
-       struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data;
+       int qid = hctx->queue_num;
+       struct blkfront_info *info = hctx->queue->queuedata;
+       struct blkfront_ring_info *rinfo = NULL;
 
+       BUG_ON(info->nr_rings <= qid);
+       rinfo = &info->rinfo[qid];
        blk_mq_start_request(qd->rq);
        spin_lock_irqsave(&rinfo->ring_lock, flags);
        if (RING_FULL(&rinfo->ring))
@@ -901,20 +905,9 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
        return BLK_MQ_RQ_QUEUE_BUSY;
 }
 
-static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
-                           unsigned int index)
-{
-       struct blkfront_info *info = (struct blkfront_info *)data;
-
-       BUG_ON(info->nr_rings <= index);
-       hctx->driver_data = &info->rinfo[index];
-       return 0;
-}
-
 static struct blk_mq_ops blkfront_mq_ops = {
        .queue_rq = blkif_queue_rq,
        .map_queue = blk_mq_map_queue,
-       .init_hctx = blk_mq_init_hctx,
 };
 
 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
@@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
                return PTR_ERR(rq);
        }
 
+       rq->queuedata = info;
        queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
 
        if (info->feature_discard) {
@@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev)
                return err;
 
        err = talk_to_blkback(dev, info);
+       if (!err)
+               blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
 
        /*
         * We have to wait for the backend to switch to
@@ -2485,10 +2481,23 @@ static void blkback_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateConnected:
-               if (dev->state != XenbusStateInitialised) {
+               /*
+                * talk_to_blkback sets state to XenbusStateInitialised
+                * and blkfront_connect sets it to XenbusStateConnected
+                * (if connection went OK).
+                *
+                * If the backend (or toolstack) decides to poke at backend
+                * state (and re-trigger the watch by setting the state repeatedly
+                * to XenbusStateConnected (4)) we need to deal with this.
+                * This is allowed as this is used to communicate to the guest
+                * that the size of disk has changed!
+                */
+               if ((dev->state != XenbusStateInitialised) &&
+                   (dev->state != XenbusStateConnected)) {
                        if (talk_to_blkback(dev, info))
                                break;
                }
+
                blkfront_connect(info);
                break;
 
index 94fb407d85615057ce95d80ef8c119f858346c48..44b1bd6baa38b20db8e93e058a1312c255482990 100644 (file)
@@ -3820,6 +3820,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
        while (!list_empty(&intf->waiting_rcv_msgs)) {
                smi_msg = list_entry(intf->waiting_rcv_msgs.next,
                                     struct ipmi_smi_msg, link);
+               list_del(&smi_msg->link);
                if (!run_to_completion)
                        spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
                                               flags);
@@ -3829,11 +3830,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
                if (rv > 0) {
                        /*
                         * To preserve message order, quit if we
-                        * can't handle a message.
+                        * can't handle a message.  Add the message
+                        * back at the head, this is safe because this
+                        * tasklet is the only thing that pulls the
+                        * messages.
                         */
+                       list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
                        break;
                } else {
-                       list_del(&smi_msg->link);
                        if (rv == 0)
                                /* Message handled */
                                ipmi_free_smi_msg(smi_msg);
index 53ddba26578ce341eb3f488001a9cc267342eeda..98efbfcdb503e181d32f44a0dd0b44f286e6080a 100644 (file)
@@ -175,6 +175,7 @@ config COMMON_CLK_KEYSTONE
 config COMMON_CLK_NXP
        def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
        select REGMAP_MMIO if ARCH_LPC32XX
+       select MFD_SYSCON if ARCH_LPC18XX
        ---help---
          Support for clock providers on NXP platforms.
 
index 020a29acc5b079e7c6560745804a6fa91e7d9273..51f54380474b6acbe4f39a6ea1a54c19cb4121f1 100644 (file)
@@ -180,15 +180,15 @@ static int pic32mzda_clk_probe(struct platform_device *pdev)
 
        /* register fixed rate clocks */
        clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
-                                               CLK_IS_ROOT, 24000000);
+                                               0, 24000000);
        clks[FRCCLK] =  clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
-                                               CLK_IS_ROOT, 8000000);
+                                               0, 8000000);
        clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
-                                               CLK_IS_ROOT, 8000000);
+                                               0, 8000000);
        clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
-                                               CLK_IS_ROOT, 32000);
+                                               0, 32000);
        clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
-                                               CLK_IS_ROOT, 24000000);
+                                               0, 24000000);
        /* fixed rate (optional) clock */
        if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
                pr_info("pic32-clk: dt requests SOSC.\n");
index 0d159b513469111572c615a2bda79fce5b8ba150..fe9dc17ea8733b6c691502c74f9a10ad88f97c88 100644 (file)
@@ -372,26 +372,9 @@ static bool intel_pstate_get_ppc_enable_status(void)
        return acpi_ppc;
 }
 
-/*
- * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
- * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
- * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
- * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
- * target ratio 0x17. The _PSS control value stores in a format which can be
- * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
- * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
- * This function converts the _PSS control value to intel pstate driver format
- * for comparison and assignment.
- */
-static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
-{
-       return cpu->acpi_perf_data.states[index].control >> 8;
-}
-
 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu;
-       int turbo_pss_ctl;
        int ret;
        int i;
 
@@ -441,11 +424,10 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
         * max frequency, which will cause a reduced performance as
         * this driver uses real max turbo frequency as the max
         * frequency. So correct this frequency in _PSS table to
-        * correct max turbo frequency based on the turbo ratio.
+        * correct max turbo frequency based on the turbo state.
         * Also need to convert to MHz as _PSS freq is in MHz.
         */
-       turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
-       if (turbo_pss_ctl > cpu->pstate.max_pstate)
+       if (!limits->turbo_disabled)
                cpu->acpi_perf_data.states[0].core_frequency =
                                        policy->cpuinfo.max_freq / 1000;
        cpu->valid_pss_table = true;
@@ -1460,6 +1442,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
        intel_pstate_clear_update_util_hook(policy->cpu);
 
+       pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
+                policy->cpuinfo.max_freq, policy->max);
+
        cpu = all_cpu_data[0];
        if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
            policy->max < policy->cpuinfo.max_freq &&
@@ -1495,13 +1480,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                                   limits->max_sysfs_pct);
        limits->max_perf_pct = max(limits->min_policy_pct,
                                   limits->max_perf_pct);
-       limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
 
        /* Make sure min_perf_pct <= max_perf_pct */
        limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
 
        limits->min_perf = div_fp(limits->min_perf_pct, 100);
        limits->max_perf = div_fp(limits->max_perf_pct, 100);
+       limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
 
  out:
        intel_pstate_set_update_util_hook(policy->cpu);
@@ -1558,8 +1543,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
-       policy->cpuinfo.max_freq =
-               cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+       update_turbo_state();
+       policy->cpuinfo.max_freq = limits->turbo_disabled ?
+                       cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+       policy->cpuinfo.max_freq *= cpu->pstate.scaling;
+
        intel_pstate_init_acpi_perf_limits(policy);
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
        cpumask_set_cpu(policy->cpu, policy->cpus);
index 808a320e9d5d0cdb0a943de7d9af5c89e9b444d5..a7ecb9a84c159a464665b4ad600b3490aa2b29ed 100644 (file)
@@ -487,7 +487,7 @@ static int __init pcc_cpufreq_probe(void)
        doorbell.space_id = reg_resource->space_id;
        doorbell.bit_width = reg_resource->bit_width;
        doorbell.bit_offset = reg_resource->bit_offset;
-       doorbell.access_width = 64;
+       doorbell.access_width = 4;
        doorbell.address = reg_resource->address;
 
        pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
index 1d6c803804d5f44a3cd5ad4b6b90ee60ae6b407d..e92418facc92ae133f6d00553501c8b1ce8d6292 100644 (file)
@@ -268,8 +268,11 @@ int update_devfreq(struct devfreq *devfreq)
        devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
 
        err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
-       if (err)
+       if (err) {
+               freqs.new = cur_freq;
+               devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
                return err;
+       }
 
        freqs.new = freq;
        devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
@@ -552,6 +555,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
        devfreq->profile = profile;
        strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
        devfreq->previous_freq = profile->initial_freq;
+       devfreq->last_status.current_frequency = profile->initial_freq;
        devfreq->data = data;
        devfreq->nb.notifier_call = devfreq_notifier_call;
 
@@ -561,23 +565,22 @@ struct devfreq *devfreq_add_device(struct device *dev,
                mutex_lock(&devfreq->lock);
        }
 
-       devfreq->trans_table =  devm_kzalloc(dev, sizeof(unsigned int) *
-                                               devfreq->profile->max_state *
-                                               devfreq->profile->max_state,
-                                               GFP_KERNEL);
-       devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) *
-                                               devfreq->profile->max_state,
-                                               GFP_KERNEL);
-       devfreq->last_stat_updated = jiffies;
-
        dev_set_name(&devfreq->dev, "%s", dev_name(dev));
        err = device_register(&devfreq->dev);
        if (err) {
-               put_device(&devfreq->dev);
                mutex_unlock(&devfreq->lock);
                goto err_out;
        }
 
+       devfreq->trans_table =  devm_kzalloc(&devfreq->dev, sizeof(unsigned int) *
+                                               devfreq->profile->max_state *
+                                               devfreq->profile->max_state,
+                                               GFP_KERNEL);
+       devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) *
+                                               devfreq->profile->max_state,
+                                               GFP_KERNEL);
+       devfreq->last_stat_updated = jiffies;
+
        srcu_init_notifier_head(&devfreq->transition_notifier_list);
 
        mutex_unlock(&devfreq->lock);
@@ -603,7 +606,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
 err_init:
        list_del(&devfreq->node);
        device_unregister(&devfreq->dev);
-       kfree(devfreq);
 err_out:
        return ERR_PTR(err);
 }
@@ -621,7 +623,6 @@ int devfreq_remove_device(struct devfreq *devfreq)
                return -EINVAL;
 
        device_unregister(&devfreq->dev);
-       put_device(&devfreq->dev);
 
        return 0;
 }
index 6b6a5f310486feda5c9b323a7129819dc163b7f3..a5841403bde8b64c314c921148b6189d7dcef82f 100644 (file)
@@ -220,9 +220,6 @@ static int exynos_nocp_parse_dt(struct platform_device *pdev,
 
        /* Maps the memory mapped IO to control nocp register */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (IS_ERR(res))
-               return PTR_ERR(res);
-
        base = devm_ioremap_resource(dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
index 8e304b1befc56385b8fc34d7697a529289d8ae3b..75bd6621dc5d7fb068ca6f4dd24d71bfa56ad640 100644 (file)
@@ -242,7 +242,7 @@ struct at_xdmac_lld {
        u32             mbr_dus;        /* Destination Microblock Stride Register */
 };
 
-
+/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
 struct at_xdmac_desc {
        struct at_xdmac_lld             lld;
        enum dma_transfer_direction     direction;
@@ -253,7 +253,7 @@ struct at_xdmac_desc {
        unsigned int                    xfer_size;
        struct list_head                descs_list;
        struct list_head                xfer_node;
-};
+} __aligned(sizeof(u64));
 
 static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
 {
@@ -1400,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        u32                     cur_nda, check_nda, cur_ubc, mask, value;
        u8                      dwidth = 0;
        unsigned long           flags;
+       bool                    initd;
 
        ret = dma_cookie_status(chan, cookie, txstate);
        if (ret == DMA_COMPLETE)
@@ -1424,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        residue = desc->xfer_size;
        /*
         * Flush FIFO: only relevant when the transfer is source peripheral
-        * synchronized.
+        * synchronized. Flush is needed before reading CUBC because data in
+        * the FIFO are not reported by CUBC. Reporting a residue of the
+        * transfer length while we have data in FIFO can cause issue.
+        * Usecase: atmel USART has a timeout which means I have received
+        * characters but there is no more character received for a while. On
+        * timeout, it requests the residue. If the data are in the DMA FIFO,
+        * we will return a residue of the transfer length. It means no data
+        * received. If an application is waiting for these data, it will hang
+        * since we won't have another USART timeout without receiving new
+        * data.
         */
        mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
        value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
@@ -1435,34 +1445,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        }
 
        /*
-        * When processing the residue, we need to read two registers but we
-        * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
-        * we stand in the descriptor list and AT_XDMAC_CUBC is used
-        * to know how many data are remaining for the current descriptor.
-        * Since the dma channel is not paused to not loose data, between the
-        * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
-        * descriptor.
-        * For that reason, after reading AT_XDMAC_CUBC, we check if we are
-        * still using the same descriptor by reading a second time
-        * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
-        * read again AT_XDMAC_CUBC.
+        * The easiest way to compute the residue should be to pause the DMA
+        * but doing this can lead to miss some data as some devices don't
+        * have FIFO.
+        * We need to read several registers because:
+        * - DMA is running therefore a descriptor change is possible while
+        * reading these registers
+        * - When the block transfer is done, the value of the CUBC register
+        * is set to its initial value until the fetch of the next descriptor.
+        * This value will corrupt the residue calculation so we have to skip
+        * it.
+        *
+        * INITD --------                    ------------
+        *              |____________________|
+        *       _______________________  _______________
+        * NDA       @desc2             \/   @desc3
+        *       _______________________/\_______________
+        *       __________  ___________  _______________
+        * CUBC       0    \/ MAX desc1 \/  MAX desc2
+        *       __________/\___________/\_______________
+        *
+        * Since descriptors are aligned on 64 bits, we can assume that
+        * the update of NDA and CUBC is atomic.
         * Memory barriers are used to ensure the read order of the registers.
-        * A max number of retries is set because unlikely it can never ends if
-        * we are transferring a lot of data with small buffers.
+        * A max number of retries is set because unlikely it could never ends.
         */
-       cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
-       rmb();
-       cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
        for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
-               rmb();
                check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
-
-               if (likely(cur_nda == check_nda))
-                       break;
-
-               cur_nda = check_nda;
+               rmb();
+               initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
                rmb();
                cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+               rmb();
+               cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+               rmb();
+
+               if ((check_nda == cur_nda) && initd)
+                       break;
        }
 
        if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
@@ -1470,6 +1489,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
                goto spin_unlock;
        }
 
+       /*
+        * Flush FIFO: only relevant when the transfer is source peripheral
+        * synchronized. Another flush is needed here because CUBC is updated
+        * when the controller sends the data write command. It can lead to
+        * report data that are not written in the memory or the device. The
+        * FIFO flush ensures that data are really written.
+        */
+       if ((desc->lld.mbr_cfg & mask) == value) {
+               at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+               while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
+                       cpu_relax();
+       }
+
        /*
         * Remove size of all microblocks already transferred and the current
         * one. Then add the remaining size to transfer of the current
index 25d1dadcddd1ddf22cd63e6d91e0b95901d34bbe..d0446a75990aeb046acb8d50a329bfeb7a9999c6 100644 (file)
@@ -703,8 +703,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
                goto free_resources;
        }
 
-       src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
-                                PAGE_SIZE, DMA_TO_DEVICE);
+       src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
+                              (size_t)src & ~PAGE_MASK, PAGE_SIZE,
+                              DMA_TO_DEVICE);
        unmap->addr[0] = src_dma;
 
        ret = dma_mapping_error(dma_chan->device->dev, src_dma);
@@ -714,8 +715,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
        }
        unmap->to_cnt = 1;
 
-       dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
-                                 PAGE_SIZE, DMA_FROM_DEVICE);
+       dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
+                               (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
+                               DMA_FROM_DEVICE);
        unmap->addr[1] = dest_dma;
 
        ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
index 6aa256b0a1ed34c2e1a4dff091c25dce5309fb34..c3ee3ad98a63834e055b0218465d3033ff127a1c 100644 (file)
@@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value)
        list_for_each(item, &mc_devices) {
                mci = list_entry(item, struct mem_ctl_info, link);
 
-               edac_mod_work(&mci->work, value);
+               if (mci->op_state == OP_RUNNING_POLL)
+                       edac_mod_work(&mci->work, value);
        }
        mutex_unlock(&mem_ctls_mutex);
 }
index b4d0bf6534cf43732df678eafd956fe39209ce03..6744d88bdea89782c524ede7d843c3d6bcd67ee2 100644 (file)
@@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
        { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
 };
 
-#define RIR_RNK_TGT(reg)               GET_BITFIELD(reg, 16, 19)
-#define RIR_OFFSET(reg)                GET_BITFIELD(reg,  2, 14)
+#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
+       GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
+
+#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
+       GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
 
 /* Device 16, functions 2-7 */
 
@@ -326,6 +329,7 @@ struct pci_id_descr {
 struct pci_id_table {
        const struct pci_id_descr       *descr;
        int                             n_devs;
+       enum type                       type;
 };
 
 struct sbridge_dev {
@@ -394,9 +398,14 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = {
        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0)          },
 };
 
-#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
+#define PCI_ID_TABLE_ENTRY(A, T) {     \
+       .descr = A,                     \
+       .n_devs = ARRAY_SIZE(A),        \
+       .type = T                       \
+}
+
 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
        {0,}                    /* 0 terminated list. */
 };
 
@@ -463,7 +472,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
 };
 
 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
        {0,}                    /* 0 terminated list. */
 };
 
@@ -536,7 +545,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = {
 };
 
 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
        {0,}                    /* 0 terminated list. */
 };
 
@@ -580,7 +589,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = {
 };
 
 static const struct pci_id_table pci_dev_descr_knl_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_knl),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
        {0,}
 };
 
@@ -648,7 +657,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = {
 };
 
 static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
        {0,}                    /* 0 terminated list. */
 };
 
@@ -1894,14 +1903,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
                                pci_read_config_dword(pvt->pci_tad[i],
                                                      rir_offset[j][k],
                                                      &reg);
-                               tmp_mb = RIR_OFFSET(reg) << 6;
+                               tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
 
                                gb = div_u64_rem(tmp_mb, 1024, &mb);
                                edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
                                         i, j, k,
                                         gb, (mb*1000)/1024,
                                         ((u64)tmp_mb) << 20L,
-                                        (u32)RIR_RNK_TGT(reg),
+                                        (u32)RIR_RNK_TGT(pvt->info.type, reg),
                                         reg);
                        }
                }
@@ -2234,7 +2243,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
        pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
                              rir_offset[n_rir][idx],
                              &reg);
-       *rank = RIR_RNK_TGT(reg);
+       *rank = RIR_RNK_TGT(pvt->info.type, reg);
 
        edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
                 n_rir,
@@ -3357,12 +3366,12 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
 #define ICPU(model, table) \
        { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
 
-/* Order here must match "enum type" */
 static const struct x86_cpu_id sbridge_cpuids[] = {
        ICPU(0x2d, pci_dev_descr_sbridge_table),        /* SANDY_BRIDGE */
        ICPU(0x3e, pci_dev_descr_ibridge_table),        /* IVY_BRIDGE */
        ICPU(0x3f, pci_dev_descr_haswell_table),        /* HASWELL */
        ICPU(0x4f, pci_dev_descr_broadwell_table),      /* BROADWELL */
+       ICPU(0x56, pci_dev_descr_broadwell_table),      /* BROADWELL-DE */
        ICPU(0x57, pci_dev_descr_knl_table),            /* KNIGHTS_LANDING */
        { }
 };
@@ -3398,7 +3407,7 @@ static int sbridge_probe(const struct x86_cpu_id *id)
                         mc, mc + 1, num_mc);
 
                sbridge_dev->mc = mc++;
-               rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids);
+               rc = sbridge_register_mci(sbridge_dev, ptable->type);
                if (unlikely(rc < 0))
                        goto fail1;
        }
index 8b3226dca1d9d25e004bbb2a030a7d77e9ff48d2..caff46c0e214bd3e18fda8ccbd7949d5c09ea00b 100644 (file)
@@ -360,6 +360,8 @@ static int palmas_usb_probe(struct platform_device *pdev)
 
        palmas_enable_irq(palmas_usb);
        /* perform initial detection */
+       if (palmas_usb->enable_gpio_vbus_detection)
+               palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
        palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
        device_set_wakeup_capable(&pdev->dev, true);
        return 0;
index a850cbc48d8d17a3de7ba51cb7ea033e57ed4fcd..c49d50e68aeebb0dbe368f8e8f526b8c18bdebb0 100644 (file)
@@ -174,6 +174,7 @@ static __init void reserve_regions(void)
 {
        efi_memory_desc_t *md;
        u64 paddr, npages, size;
+       int resv;
 
        if (efi_enabled(EFI_DBG))
                pr_info("Processing EFI memory map:\n");
@@ -190,12 +191,14 @@ static __init void reserve_regions(void)
                paddr = md->phys_addr;
                npages = md->num_pages;
 
+               resv = is_reserve_region(md);
                if (efi_enabled(EFI_DBG)) {
                        char buf[64];
 
-                       pr_info("  0x%012llx-0x%012llx %s",
+                       pr_info("  0x%012llx-0x%012llx %s%s\n",
                                paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
-                               efi_md_typeattr_format(buf, sizeof(buf), md));
+                               efi_md_typeattr_format(buf, sizeof(buf), md),
+                               resv ? "*" : "");
                }
 
                memrange_efi_to_native(&paddr, &npages);
@@ -204,14 +207,9 @@ static __init void reserve_regions(void)
                if (is_normal_ram(md))
                        early_init_dt_add_memory_arch(paddr, size);
 
-               if (is_reserve_region(md)) {
+               if (resv)
                        memblock_mark_nomap(paddr, size);
-                       if (efi_enabled(EFI_DBG))
-                               pr_cont("*");
-               }
 
-               if (efi_enabled(EFI_DBG))
-                       pr_cont("\n");
        }
 
        set_bit(EFI_MEMMAP, &efi.flags);
index 48da857f4774829d2301559c7e69656bae636307..cebcb405812ec0c8a98f771b94f8a4c5caddabc5 100644 (file)
@@ -33,6 +33,7 @@ config ARCH_REQUIRE_GPIOLIB
 
 menuconfig GPIOLIB
        bool "GPIO Support"
+       select ANON_INODES
        help
          This enables GPIO support through the generic GPIO library.
          You only need to enable this, if you also want to enable
@@ -530,7 +531,7 @@ menu "Port-mapped I/O GPIO drivers"
 
 config GPIO_104_DIO_48E
        tristate "ACCES 104-DIO-48E GPIO support"
-       depends on ISA
+       depends on ISA_BUS_API
        select GPIOLIB_IRQCHIP
        help
          Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
@@ -540,7 +541,7 @@ config GPIO_104_DIO_48E
 
 config GPIO_104_IDIO_16
        tristate "ACCES 104-IDIO-16 GPIO support"
-       depends on ISA
+       depends on ISA_BUS_API
        select GPIOLIB_IRQCHIP
        help
          Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16,
@@ -551,7 +552,7 @@ config GPIO_104_IDIO_16
 
 config GPIO_104_IDI_48
        tristate "ACCES 104-IDI-48 GPIO support"
-       depends on ISA
+       depends on ISA_BUS_API
        select GPIOLIB_IRQCHIP
        help
          Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
@@ -627,7 +628,7 @@ config GPIO_TS5500
 
 config GPIO_WS16C48
        tristate "WinSystems WS16C48 GPIO support"
-       depends on ISA
+       depends on ISA_BUS_API
        select GPIOLIB_IRQCHIP
        help
          Enables GPIO support for the WinSystems WS16C48. The base port
index 1a647c07be677784cbac72912a7701e4c0bb1a18..fcf776971ca92ce5e3f997b471d37fe1b87d338a 100644 (file)
@@ -75,7 +75,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
        struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
        const unsigned io_port = offset / 8;
-       const unsigned control_port = io_port / 2;
+       const unsigned int control_port = io_port / 3;
        const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
        unsigned long flags;
        unsigned control;
@@ -115,7 +115,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
 {
        struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
        const unsigned io_port = offset / 8;
-       const unsigned control_port = io_port / 2;
+       const unsigned int control_port = io_port / 3;
        const unsigned mask = BIT(offset % 8);
        const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
        const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port;
index 6c75c83baf5a82a3eb1da0c7a5d5949bcf31e43b..2d2763ea1a68277eedf82f6e9c132be6f8706923 100644 (file)
@@ -247,6 +247,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
        idi48gpio->irq = irq[id];
 
        spin_lock_init(&idi48gpio->lock);
+       spin_lock_init(&idi48gpio->ack_lock);
 
        dev_set_drvdata(dev, idi48gpio);
 
index 9aabc48ff5de0d9302801856b5f8881b11d5c5a6..953e4b829e32b8aa02f0cdb0774f8fe8244ef565 100644 (file)
@@ -547,11 +547,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
        /* disable interrupts and clear status */
        for (i = 0; i < kona_gpio->num_bank; i++) {
                /* Unlock the entire bank first */
-               bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE);
+               bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE);
                writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
                writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
                /* Now re-lock the bank */
-               bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE);
+               bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE);
        }
 }
 
index 75c6355b018df46a192feaa4f57dedcb5be6f7b7..e72794e463aa3fd5f3adb6e05128a3f01126932b 100644 (file)
@@ -709,7 +709,13 @@ static int zynq_gpio_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "input clock not found.\n");
                return PTR_ERR(gpio->clk);
        }
+       ret = clk_prepare_enable(gpio->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "Unable to enable clock.\n");
+               return ret;
+       }
 
+       pm_runtime_set_active(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
        ret = pm_runtime_get_sync(&pdev->dev);
        if (ret < 0)
@@ -747,6 +753,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
        pm_runtime_put(&pdev->dev);
 err_pm_dis:
        pm_runtime_disable(&pdev->dev);
+       clk_disable_unprepare(gpio->clk);
 
        return ret;
 }
index d22dcc38179dba153d3874e39847665a2811f8b9..4aabddb38b593f3bafca1dfd21f44ab75c2fdc4f 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/io.h>
+#include <linux/io-mapping.h>
 #include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
index 24f60d28f0c055dbddfcd2750737534071813d26..570771ed19e6f1f326575fa20cdc621ea2588432 100644 (file)
@@ -449,7 +449,6 @@ static void gpiodevice_release(struct device *dev)
 {
        struct gpio_device *gdev = dev_get_drvdata(dev);
 
-       cdev_del(&gdev->chrdev);
        list_del(&gdev->list);
        ida_simple_remove(&gpio_ida, gdev->id);
        kfree(gdev->label);
@@ -482,7 +481,6 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
 
        /* From this point, the .release() function cleans up gpio_device */
        gdev->dev.release = gpiodevice_release;
-       get_device(&gdev->dev);
        pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
                 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
                 dev_name(&gdev->dev), gdev->chip->label ? : "generic");
@@ -770,6 +768,8 @@ void gpiochip_remove(struct gpio_chip *chip)
         * be removed, else it will be dangling until the last user is
         * gone.
         */
+       cdev_del(&gdev->chrdev);
+       device_del(&gdev->dev);
        put_device(&gdev->dev);
 }
 EXPORT_SYMBOL_GPL(gpiochip_remove);
@@ -869,7 +869,7 @@ struct gpio_chip *gpiochip_find(void *data,
 
        spin_lock_irqsave(&gpio_lock, flags);
        list_for_each_entry(gdev, &gpio_devices, list)
-               if (match(gdev->chip, data))
+               if (gdev->chip && match(gdev->chip, data))
                        break;
 
        /* No match? */
@@ -1373,8 +1373,12 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
 #define VALIDATE_DESC(desc) do { \
        if (!desc) \
                return 0; \
+       if (IS_ERR(desc)) {                                             \
+               pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
+               return PTR_ERR(desc); \
+       } \
        if (!desc->gdev) { \
-               pr_warn("%s: invalid GPIO\n", __func__); \
+               pr_warn("%s: invalid GPIO (no device)\n", __func__); \
                return -EINVAL; \
        } \
        if ( !desc->gdev->chip ) { \
@@ -1386,8 +1390,12 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
 #define VALIDATE_DESC_VOID(desc) do { \
        if (!desc) \
                return; \
+       if (IS_ERR(desc)) {                                             \
+               pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
+               return; \
+       } \
        if (!desc->gdev) { \
-               pr_warn("%s: invalid GPIO\n", __func__); \
+               pr_warn("%s: invalid GPIO (no device)\n", __func__); \
                return; \
        } \
        if (!desc->gdev->chip) { \
@@ -2056,7 +2064,14 @@ int gpiod_to_irq(const struct gpio_desc *desc)
        struct gpio_chip *chip;
        int offset;
 
-       VALIDATE_DESC(desc);
+       /*
+        * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
+        * requires this function to not return zero on an invalid descriptor
+        * but rather a negative error number.
+        */
+       if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
+               return -EINVAL;
+
        chip = desc->gdev->chip;
        offset = gpio_chip_hwgpio(desc);
        if (chip->to_irq) {
index da3d02154fa622a09897d9d5d505befae949c9ee..eb09037a7161aa81e9525b55f567ca61f09ab73c 100644 (file)
@@ -85,8 +85,12 @@ extern int amdgpu_vm_debug;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
 extern int amdgpu_powerplay;
+extern int amdgpu_powercontainment;
 extern unsigned amdgpu_pcie_gen_cap;
 extern unsigned amdgpu_pcie_lane_cap;
+extern unsigned amdgpu_cg_mask;
+extern unsigned amdgpu_pg_mask;
+extern char *amdgpu_disable_cu;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
 #define AMDGPU_MAX_USEC_TIMEOUT                        100000  /* 100 ms */
@@ -183,6 +187,10 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
 int amdgpu_set_powergating_state(struct amdgpu_device *adev,
                                  enum amd_ip_block_type block_type,
                                  enum amd_powergating_state state);
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+                        enum amd_ip_block_type block_type);
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+                   enum amd_ip_block_type block_type);
 
 struct amdgpu_ip_block_version {
        enum amd_ip_block_type type;
@@ -594,11 +602,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
                     struct amdgpu_sync *sync,
                     struct reservation_object *resv,
                     void *owner);
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
-                            struct fence *fence);
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+                                    struct amdgpu_ring *ring);
 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-int amdgpu_sync_wait(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_sync *sync);
 int amdgpu_sync_init(void);
 void amdgpu_sync_fini(void);
@@ -754,12 +760,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
                             struct amdgpu_job **job);
 
+void amdgpu_job_free_resources(struct amdgpu_job *job);
 void amdgpu_job_free(struct amdgpu_job *job);
-void amdgpu_job_free_func(struct kref *refcount);
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
                      struct amd_sched_entity *entity, void *owner,
                      struct fence **f);
-void amdgpu_job_timeout_func(struct work_struct *work);
 
 struct amdgpu_ring {
        struct amdgpu_device            *adev;
@@ -771,8 +776,6 @@ struct amdgpu_ring {
        struct amdgpu_bo        *ring_obj;
        volatile uint32_t       *ring;
        unsigned                rptr_offs;
-       u64                     next_rptr_gpu_addr;
-       volatile u32            *next_rptr_cpu_addr;
        unsigned                wptr;
        unsigned                wptr_old;
        unsigned                ring_size;
@@ -791,7 +794,6 @@ struct amdgpu_ring {
        u32                     doorbell_index;
        bool                    use_doorbell;
        unsigned                wptr_offs;
-       unsigned                next_rptr_offs;
        unsigned                fence_offs;
        uint64_t                current_ctx;
        enum amdgpu_ring_type   type;
@@ -799,6 +801,9 @@ struct amdgpu_ring {
        unsigned                cond_exe_offs;
        u64                             cond_exe_gpu_addr;
        volatile u32    *cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+       struct dentry *ent;
+#endif
 };
 
 /*
@@ -861,6 +866,7 @@ struct amdgpu_vm {
        struct amdgpu_bo        *page_directory;
        unsigned                max_pde_used;
        struct fence            *page_directory_fence;
+       uint64_t                last_eviction_counter;
 
        /* array of page tables, one for each page directory entry */
        struct amdgpu_vm_pt     *page_tables;
@@ -883,13 +889,14 @@ struct amdgpu_vm_id {
        struct fence            *first;
        struct amdgpu_sync      active;
        struct fence            *last_flush;
-       struct amdgpu_ring      *last_user;
        atomic64_t              owner;
 
        uint64_t                pd_gpu_addr;
        /* last flushed PD/PT update */
        struct fence            *flushed_updates;
 
+       uint32_t                current_gpu_reset_count;
+
        uint32_t                gds_base;
        uint32_t                gds_size;
        uint32_t                gws_base;
@@ -905,6 +912,10 @@ struct amdgpu_vm_manager {
        struct list_head                        ids_lru;
        struct amdgpu_vm_id                     ids[AMDGPU_NUM_VM];
 
+       /* Handling of VM fences */
+       u64                                     fence_context;
+       unsigned                                seqno[AMDGPU_MAX_RINGS];
+
        uint32_t                                max_pfn;
        /* vram base address for page table entry  */
        u64                                     vram_base_offset;
@@ -926,17 +937,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
                         struct list_head *validated,
                         struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
+void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                         struct list_head *duplicates);
 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
                                  struct amdgpu_vm *vm);
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_sync *sync, struct fence *fence,
-                     unsigned *vm_id, uint64_t *vm_pd_addr);
-int amdgpu_vm_flush(struct amdgpu_ring *ring,
-                   unsigned vm_id, uint64_t pd_addr,
-                   uint32_t gds_base, uint32_t gds_size,
-                   uint32_t gws_base, uint32_t gws_size,
-                   uint32_t oa_base, uint32_t oa_size);
+                     struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
@@ -1142,6 +1150,12 @@ struct amdgpu_cu_info {
        uint32_t bitmap[4][4];
 };
 
+struct amdgpu_gfx_funcs {
+       /* get the gpu clock counter */
+       uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
+       void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+};
+
 struct amdgpu_gfx {
        struct mutex                    gpu_clock_mutex;
        struct amdgpu_gca_config        config;
@@ -1178,6 +1192,7 @@ struct amdgpu_gfx {
        /* ce ram size*/
        unsigned                        ce_ram_size;
        struct amdgpu_cu_info           cu_info;
+       const struct amdgpu_gfx_funcs   *funcs;
 };
 
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1195,10 +1210,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
-unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
-                           uint32_t **data);
-int amdgpu_ring_restore(struct amdgpu_ring *ring,
-                       unsigned size, uint32_t *data);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                     unsigned ring_size, u32 nop, u32 align_mask,
                     struct amdgpu_irq_src *irq_src, unsigned irq_type,
@@ -1250,6 +1261,7 @@ struct amdgpu_job {
        uint32_t                num_ibs;
        void                    *owner;
        uint64_t                ctx;
+       bool                    vm_needs_flush;
        unsigned                vm_id;
        uint64_t                vm_pd_addr;
        uint32_t                gds_base, gds_size;
@@ -1257,8 +1269,7 @@ struct amdgpu_job {
        uint32_t                oa_base, oa_size;
 
        /* user fence handling */
-       struct amdgpu_bo        *uf_bo;
-       uint32_t                uf_offset;
+       uint64_t                uf_addr;
        uint64_t                uf_sequence;
 
 };
@@ -1560,6 +1571,12 @@ struct amdgpu_dpm_funcs {
        u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
        int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
        int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+       int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+       int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+       int (*get_sclk_od)(struct amdgpu_device *adev);
+       int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+       int (*get_mclk_od)(struct amdgpu_device *adev);
+       int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
 };
 
 struct amdgpu_dpm {
@@ -1767,6 +1784,8 @@ int amdgpu_debugfs_init(struct drm_minor *minor);
 void amdgpu_debugfs_cleanup(struct drm_minor *minor);
 #endif
 
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+
 /*
  * amdgpu smumgr functions
  */
@@ -1811,15 +1830,13 @@ struct amdgpu_asic_funcs {
                             u32 sh_num, u32 reg_offset, u32 *value);
        void (*set_vga_state)(struct amdgpu_device *adev, bool state);
        int (*reset)(struct amdgpu_device *adev);
-       /* wait for mc_idle */
-       int (*wait_for_mc_idle)(struct amdgpu_device *adev);
        /* get the reference clock */
        u32 (*get_xclk)(struct amdgpu_device *adev);
-       /* get the gpu clock counter */
-       uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
        /* MM block clocks */
        int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
        int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
+       /* query virtual capabilities */
+       u32 (*get_virtual_caps)(struct amdgpu_device *adev);
 };
 
 /*
@@ -1914,8 +1931,12 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
 
 
 /* GPU virtualization */
+#define AMDGPU_VIRT_CAPS_SRIOV_EN       (1 << 0)
+#define AMDGPU_VIRT_CAPS_IS_VF          (1 << 1)
 struct amdgpu_virtualization {
        bool supports_sr_iov;
+       bool is_virtual;
+       u32 caps;
 };
 
 /*
@@ -1997,6 +2018,10 @@ struct amdgpu_device {
        spinlock_t didt_idx_lock;
        amdgpu_rreg_t                   didt_rreg;
        amdgpu_wreg_t                   didt_wreg;
+       /* protects concurrent gc_cac register access */
+       spinlock_t gc_cac_idx_lock;
+       amdgpu_rreg_t                   gc_cac_rreg;
+       amdgpu_wreg_t                   gc_cac_wreg;
        /* protects concurrent ENDPOINT (audio) register access */
        spinlock_t audio_endpt_idx_lock;
        amdgpu_block_rreg_t             audio_endpt_rreg;
@@ -2022,6 +2047,7 @@ struct amdgpu_device {
        atomic64_t                      vram_vis_usage;
        atomic64_t                      gtt_usage;
        atomic64_t                      num_bytes_moved;
+       atomic64_t                      num_evictions;
        atomic_t                        gpu_reset_counter;
 
        /* display */
@@ -2125,6 +2151,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
+#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
+#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
 #define WREG32_P(reg, val, mask)                               \
@@ -2200,11 +2228,10 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
  */
 #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
-#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
-#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
+#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
@@ -2257,6 +2284,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
 #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
 #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
+#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
 
 #define amdgpu_dpm_get_temperature(adev) \
        ((adev)->pp_enabled ?                                           \
@@ -2335,6 +2364,18 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_dpm_force_clock_level(adev, type, level) \
                (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
 
+#define amdgpu_dpm_get_sclk_od(adev) \
+       (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+       (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+       ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+       ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
 #define amdgpu_dpm_dispatch_task(adev, event_id, input, output)                \
        (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
 
@@ -2376,9 +2417,13 @@ bool amdgpu_device_is_px(struct drm_device *dev);
 #if defined(CONFIG_VGA_SWITCHEROO)
 void amdgpu_register_atpx_handler(void);
 void amdgpu_unregister_atpx_handler(void);
+bool amdgpu_has_atpx_dgpu_power_cntl(void);
+bool amdgpu_is_atpx_hybrid(void);
 #else
 static inline void amdgpu_register_atpx_handler(void) {}
 static inline void amdgpu_unregister_atpx_handler(void) {}
+static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
 #endif
 
 /*
index 32809f749903c2196cdd2a2fe2006a1bf3f2a0a9..d080d0807a5bb81fbb19f6e45926c2d8ff06050d 100644 (file)
@@ -240,8 +240,8 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
 {
        struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
 
-       if (rdev->asic_funcs->get_gpu_clock_counter)
-               return rdev->asic_funcs->get_gpu_clock_counter(rdev);
+       if (rdev->gfx.funcs->get_gpu_clock_counter)
+               return rdev->gfx.funcs->get_gpu_clock_counter(rdev);
        return 0;
 }
 
index 35a1248aaa7778a04fa64957d4d2b65a3911ab9c..0494fe7b62c0163abded8b316e5bc2e7d7e77d51 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/slab.h>
 #include <linux/acpi.h>
 #include <linux/pci.h>
+#include <linux/delay.h>
 
 #include "amd_acpi.h"
 
@@ -27,6 +28,7 @@ struct amdgpu_atpx_functions {
 struct amdgpu_atpx {
        acpi_handle handle;
        struct amdgpu_atpx_functions functions;
+       bool is_hybrid;
 };
 
 static struct amdgpu_atpx_priv {
@@ -63,6 +65,14 @@ bool amdgpu_has_atpx(void) {
        return amdgpu_atpx_priv.atpx_detected;
 }
 
+bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+       return amdgpu_atpx_priv.atpx.functions.power_cntl;
+}
+
+bool amdgpu_is_atpx_hybrid(void) {
+       return amdgpu_atpx_priv.atpx.is_hybrid;
+}
+
 /**
  * amdgpu_atpx_call - call an ATPX method
  *
@@ -142,18 +152,12 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
  */
 static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
 {
-       /* make sure required functions are enabled */
-       /* dGPU power control is required */
-       if (atpx->functions.power_cntl == false) {
-               printk("ATPX dGPU power cntl not present, forcing\n");
-               atpx->functions.power_cntl = true;
-       }
+       u32 valid_bits = 0;
 
        if (atpx->functions.px_params) {
                union acpi_object *info;
                struct atpx_px_params output;
                size_t size;
-               u32 valid_bits;
 
                info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
                if (!info)
@@ -172,19 +176,43 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
                memcpy(&output, info->buffer.pointer, size);
 
                valid_bits = output.flags & output.valid_flags;
-               /* if separate mux flag is set, mux controls are required */
-               if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
-                       atpx->functions.i2c_mux_cntl = true;
-                       atpx->functions.disp_mux_cntl = true;
-               }
-               /* if any outputs are muxed, mux controls are required */
-               if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
-                                 ATPX_TV_SIGNAL_MUXED |
-                                 ATPX_DFP_SIGNAL_MUXED))
-                       atpx->functions.disp_mux_cntl = true;
 
                kfree(info);
        }
+
+       /* if separate mux flag is set, mux controls are required */
+       if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+               atpx->functions.i2c_mux_cntl = true;
+               atpx->functions.disp_mux_cntl = true;
+       }
+       /* if any outputs are muxed, mux controls are required */
+       if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+                         ATPX_TV_SIGNAL_MUXED |
+                         ATPX_DFP_SIGNAL_MUXED))
+               atpx->functions.disp_mux_cntl = true;
+
+
+       /* some bioses set these bits rather than flagging power_cntl as supported */
+       if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
+                         ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
+               atpx->functions.power_cntl = true;
+
+       atpx->is_hybrid = false;
+       if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+               printk("ATPX Hybrid Graphics\n");
+#if 1
+               /* This is a temporary hack until the D3 cold support
+                * makes it upstream.  The ATPX power_control method seems
+                * to still work on even if the system should be using
+                * the new standardized hybrid D3 cold ACPI interface.
+                */
+               atpx->functions.power_cntl = true;
+#else
+               atpx->functions.power_cntl = false;
+#endif
+               atpx->is_hybrid = true;
+       }
+
        return 0;
 }
 
@@ -259,6 +287,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
                if (!info)
                        return -EIO;
                kfree(info);
+
+               /* 200ms delay is required after off */
+               if (state == 0)
+                       msleep(200);
        }
        return 0;
 }
index 823bf5e0b0c841606e8a27cf7d488938113cc4a6..651115dcce12c6ff332eac87de6e9e5712b6d69c 100644 (file)
@@ -94,6 +94,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
        unsigned last_entry = 0, first_userptr = num_entries;
        unsigned i;
        int r;
+       unsigned long total_size = 0;
 
        array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
        if (!array)
@@ -140,6 +141,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
                if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
                        oa_obj = entry->robj;
 
+               total_size += amdgpu_bo_size(entry->robj);
                trace_amdgpu_bo_list_set(list, entry->robj);
        }
 
@@ -155,6 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
        list->array = array;
        list->num_entries = num_entries;
 
+       trace_amdgpu_cs_bo_status(list->num_entries, total_size);
        return 0;
 
 error_free:
index 199f76baf22c3eea20207fec60c1e868d173d1cb..5556ce97919921254a924aa71b5bdeba128b8b09 100644 (file)
@@ -312,6 +312,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
                return RREG32_UVD_CTX(index);
        case CGS_IND_REG__DIDT:
                return RREG32_DIDT(index);
+       case CGS_IND_REG_GC_CAC:
+               return RREG32_GC_CAC(index);
        case CGS_IND_REG__AUDIO_ENDPT:
                DRM_ERROR("audio endpt register access not implemented.\n");
                return 0;
@@ -336,6 +338,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
                return WREG32_UVD_CTX(index, value);
        case CGS_IND_REG__DIDT:
                return WREG32_DIDT(index, value);
+       case CGS_IND_REG_GC_CAC:
+               return WREG32_GC_CAC(index, value);
        case CGS_IND_REG__AUDIO_ENDPT:
                DRM_ERROR("audio endpt register access not implemented.\n");
                return;
@@ -696,6 +700,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
        return result;
 }
 
+static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
+{
+       CGS_FUNC_ADEV;
+       if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
+               release_firmware(adev->pm.fw);
+               return 0;
+       }
+       /* cannot release other firmware because they are not created by cgs */
+       return -EINVAL;
+}
+
 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                        enum cgs_ucode_id type,
                                        struct cgs_firmware_info *info)
@@ -776,6 +791,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                }
 
                hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+               amdgpu_ucode_print_smc_hdr(&hdr->header);
                adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
                ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
                ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
@@ -819,6 +835,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
        case CGS_SYSTEM_INFO_GFX_CU_INFO:
                sys_info->value = adev->gfx.cu_info.number;
                break;
+       case CGS_SYSTEM_INFO_GFX_SE_INFO:
+               sys_info->value = adev->gfx.config.max_shader_engines;
+               break;
        default:
                return -ENODEV;
        }
@@ -898,7 +917,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
        struct cgs_acpi_method_argument *argument = NULL;
        uint32_t i, count;
        acpi_status status;
-       int result;
+       int result = 0;
        uint32_t func_no = 0xFFFFFFFF;
 
        handle = ACPI_HANDLE(&adev->pdev->dev);
@@ -961,11 +980,11 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
                                params->integer.value = argument->value;
                                break;
                        case ACPI_TYPE_STRING:
-                               params->string.length = argument->method_length;
+                               params->string.length = argument->data_length;
                                params->string.pointer = argument->pointer;
                                break;
                        case ACPI_TYPE_BUFFER:
-                               params->buffer.length = argument->method_length;
+                               params->buffer.length = argument->data_length;
                                params->buffer.pointer = argument->pointer;
                                break;
                        default:
@@ -1068,17 +1087,14 @@ int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
        struct cgs_acpi_method_info info = {0};
 
        acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
-       acpi_input[0].method_length = sizeof(uint32_t);
        acpi_input[0].data_length = sizeof(uint32_t);
        acpi_input[0].value = acpi_function;
 
        acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
-       acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
        acpi_input[1].data_length = input_size;
        acpi_input[1].pointer = pinput;
 
        acpi_output.type = CGS_ACPI_TYPE_BUFFER;
-       acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
        acpi_output.data_length = output_size;
        acpi_output.pointer = poutput;
 
@@ -1125,6 +1141,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
        amdgpu_cgs_pm_query_clock_limits,
        amdgpu_cgs_set_camera_voltages,
        amdgpu_cgs_get_firmware_info,
+       amdgpu_cgs_rel_firmware,
        amdgpu_cgs_set_powergating_state,
        amdgpu_cgs_set_clockgating_state,
        amdgpu_cgs_get_active_displays_info,
index cb07da41152b7f8f7ef3229d56832fc1fb881c75..ff0b55a65ca3fcb29d51a2b11bf46b14c4a848a0 100644 (file)
@@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
                                                   DRM_MODE_SCALE_NONE);
                        /* no HPD on analog connectors */
                        amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
-                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
                        connector->interlace_allowed = true;
                        connector->doublescan_allowed = true;
                        break;
@@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
        }
 
        if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
-               if (i2c_bus->valid)
-                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+               if (i2c_bus->valid) {
+                       connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                                           DRM_CONNECTOR_POLL_DISCONNECT;
+               }
        } else
                connector->polled = DRM_CONNECTOR_POLL_HPD;
 
index 9bc8f1d99733e6817ba35efe55e179aa8999e9f1..0307ff5887c593a079d8fd63fa3245fb683162f3 100644 (file)
@@ -216,11 +216,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
        if (ret)
                goto free_all_kdata;
 
-       if (p->uf_entry.robj) {
-               p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
-               p->job->uf_offset = uf_offset;
-       }
-
+       if (p->uf_entry.robj)
+               p->job->uf_addr = uf_offset;
        kfree(chunk_array);
        return 0;
 
@@ -459,7 +456,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                list_splice(&need_pages, &p->validated);
        }
 
-       amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
+       amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
 
        p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
        p->bytes_moved = 0;
@@ -472,6 +469,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
        if (r)
                goto error_validate;
 
+       fpriv->vm.last_eviction_counter =
+               atomic64_read(&p->adev->num_evictions);
+
        if (p->bo_list) {
                struct amdgpu_bo *gds = p->bo_list->gds_obj;
                struct amdgpu_bo *gws = p->bo_list->gws_obj;
@@ -499,6 +499,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                }
        }
 
+       if (p->uf_entry.robj)
+               p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+
 error_validate:
        if (r) {
                amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
@@ -653,18 +656,21 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
 
        /* Only for UVD/VCE VM emulation */
        if (ring->funcs->parse_cs) {
+               p->job->vm = NULL;
                for (i = 0; i < p->job->num_ibs; i++) {
                        r = amdgpu_ring_parse_cs(ring, p, i);
                        if (r)
                                return r;
                }
-       }
+       } else {
+               p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 
-       r = amdgpu_bo_vm_update_pte(p, vm);
-       if (!r)
-               amdgpu_cs_sync_rings(p);
+               r = amdgpu_bo_vm_update_pte(p, vm);
+               if (r)
+                       return r;
+       }
 
-       return r;
+       return amdgpu_cs_sync_rings(p);
 }
 
 static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
@@ -761,7 +767,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
        }
 
        /* UVD & VCE fw doesn't support user fences */
-       if (parser->job->uf_bo && (
+       if (parser->job->uf_addr && (
            parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
            parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
                return -EINVAL;
@@ -830,17 +836,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 {
        struct amdgpu_ring *ring = p->job->ring;
        struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
-       struct fence *fence;
        struct amdgpu_job *job;
        int r;
 
        job = p->job;
        p->job = NULL;
 
-       r = amd_sched_job_init(&job->base, &ring->sched,
-                              entity, amdgpu_job_timeout_func,
-                              amdgpu_job_free_func,
-                              p->filp, &fence);
+       r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
        if (r) {
                amdgpu_job_free(job);
                return r;
@@ -848,9 +850,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        job->owner = p->filp;
        job->ctx = entity->fence_context;
-       p->fence = fence_get(fence);
-       cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
+       p->fence = fence_get(&job->base.s_fence->finished);
+       cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
        job->uf_sequence = cs->out.handle;
+       amdgpu_job_free_resources(job);
 
        trace_amdgpu_cs_ioctl(job);
        amd_sched_entity_push_job(&job->base);
index 2ab5e0b184fb0f8ed613355d83387d6c544c46fd..614fb026436d347f62cd6debe35f3152ebd76df8 100644 (file)
@@ -25,6 +25,7 @@
  *          Alex Deucher
  *          Jerome Glisse
  */
+#include <linux/kthread.h>
 #include <linux/console.h>
 #include <linux/slab.h>
 #include <linux/debugfs.h>
@@ -35,6 +36,7 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/efi.h>
 #include "amdgpu.h"
+#include "amdgpu_trace.h"
 #include "amdgpu_i2c.h"
 #include "atom.h"
 #include "amdgpu_atombios.h"
@@ -79,24 +81,27 @@ bool amdgpu_device_is_px(struct drm_device *dev)
 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
                        bool always_indirect)
 {
+       uint32_t ret;
+
        if ((reg * 4) < adev->rmmio_size && !always_indirect)
-               return readl(((void __iomem *)adev->rmmio) + (reg * 4));
+               ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
        else {
                unsigned long flags;
-               uint32_t ret;
 
                spin_lock_irqsave(&adev->mmio_idx_lock, flags);
                writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
                ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
                spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-
-               return ret;
        }
+       trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+       return ret;
 }
 
 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
                    bool always_indirect)
 {
+       trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+       
        if ((reg * 4) < adev->rmmio_size && !always_indirect)
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
        else {
@@ -827,8 +832,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
  */
 static void amdgpu_atombios_fini(struct amdgpu_device *adev)
 {
-       if (adev->mode_info.atom_context)
+       if (adev->mode_info.atom_context) {
                kfree(adev->mode_info.atom_context->scratch);
+               kfree(adev->mode_info.atom_context->iio);
+       }
        kfree(adev->mode_info.atom_context);
        adev->mode_info.atom_context = NULL;
        kfree(adev->mode_info.atom_card_info);
@@ -1068,11 +1075,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
        int i, r = 0;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
                if (adev->ip_blocks[i].type == block_type) {
                        r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
                                                                            state);
                        if (r)
                                return r;
+                       break;
                }
        }
        return r;
@@ -1085,16 +1095,53 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
        int i, r = 0;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
                if (adev->ip_blocks[i].type == block_type) {
                        r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
                                                                            state);
                        if (r)
                                return r;
+                       break;
                }
        }
        return r;
 }
 
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+                        enum amd_ip_block_type block_type)
+{
+       int i, r;
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
+               if (adev->ip_blocks[i].type == block_type) {
+                       r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+                       if (r)
+                               return r;
+                       break;
+               }
+       }
+       return 0;
+
+}
+
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+                   enum amd_ip_block_type block_type)
+{
+       int i;
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
+               if (adev->ip_blocks[i].type == block_type)
+                       return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+       }
+       return true;
+
+}
+
 const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
                                        struct amdgpu_device *adev,
                                        enum amd_ip_block_type type)
@@ -1207,6 +1254,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
                }
        }
 
+       adev->cg_flags &= amdgpu_cg_mask;
+       adev->pg_flags &= amdgpu_pg_mask;
+
        return 0;
 }
 
@@ -1325,6 +1375,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
                adev->ip_block_status[i].valid = false;
        }
 
+       for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+               if (adev->ip_blocks[i].funcs->late_fini)
+                       adev->ip_blocks[i].funcs->late_fini((void *)adev);
+       }
+
        return 0;
 }
 
@@ -1378,6 +1433,15 @@ static int amdgpu_resume(struct amdgpu_device *adev)
        return 0;
 }
 
+static bool amdgpu_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+       return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+       return false;
+#endif
+}
+
 /**
  * amdgpu_device_init - initialize the driver
  *
@@ -1424,9 +1488,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
        adev->didt_rreg = &amdgpu_invalid_rreg;
        adev->didt_wreg = &amdgpu_invalid_wreg;
+       adev->gc_cac_rreg = &amdgpu_invalid_rreg;
+       adev->gc_cac_wreg = &amdgpu_invalid_wreg;
        adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
        adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
 
+
        DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
                 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
                 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -1451,6 +1518,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        spin_lock_init(&adev->pcie_idx_lock);
        spin_lock_init(&adev->uvd_ctx_idx_lock);
        spin_lock_init(&adev->didt_idx_lock);
+       spin_lock_init(&adev->gc_cac_idx_lock);
        spin_lock_init(&adev->audio_endpt_idx_lock);
 
        adev->rmmio_base = pci_resource_start(adev->pdev, 5);
@@ -1495,29 +1563,38 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
 
        /* Read BIOS */
-       if (!amdgpu_get_bios(adev))
-               return -EINVAL;
+       if (!amdgpu_get_bios(adev)) {
+               r = -EINVAL;
+               goto failed;
+       }
        /* Must be an ATOMBIOS */
        if (!adev->is_atom_bios) {
                dev_err(adev->dev, "Expecting atombios for GPU\n");
-               return -EINVAL;
+               r = -EINVAL;
+               goto failed;
        }
        r = amdgpu_atombios_init(adev);
        if (r) {
                dev_err(adev->dev, "amdgpu_atombios_init failed\n");
-               return r;
+               goto failed;
        }
 
        /* See if the asic supports SR-IOV */
        adev->virtualization.supports_sr_iov =
                amdgpu_atombios_has_gpu_virtualization_table(adev);
 
+       /* Check if we are executing in a virtualized environment */
+       adev->virtualization.is_virtual = amdgpu_device_is_virtual();
+       adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
+
        /* Post card if necessary */
        if (!amdgpu_card_posted(adev) ||
-           adev->virtualization.supports_sr_iov) {
+           (adev->virtualization.is_virtual &&
+            !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
                if (!adev->bios) {
                        dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto failed;
                }
                DRM_INFO("GPU not posted. posting now...\n");
                amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1527,7 +1604,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_atombios_get_clock_info(adev);
        if (r) {
                dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
-               return r;
+               goto failed;
        }
        /* init i2c buses */
        amdgpu_atombios_i2c_init(adev);
@@ -1536,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_fence_driver_init(adev);
        if (r) {
                dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
-               return r;
+               goto failed;
        }
 
        /* init the mode config */
@@ -1546,7 +1623,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (r) {
                dev_err(adev->dev, "amdgpu_init failed\n");
                amdgpu_fini(adev);
-               return r;
+               goto failed;
        }
 
        adev->accel_working = true;
@@ -1556,7 +1633,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_ib_pool_init(adev);
        if (r) {
                dev_err(adev->dev, "IB initialization failed (%d).\n", r);
-               return r;
+               goto failed;
        }
 
        r = amdgpu_ib_ring_tests(adev);
@@ -1573,6 +1650,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                DRM_ERROR("registering register debugfs failed (%d).\n", r);
        }
 
+       r = amdgpu_debugfs_firmware_init(adev);
+       if (r) {
+               DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+               return r;
+       }
+
        if ((amdgpu_testing & 1)) {
                if (adev->accel_working)
                        amdgpu_test_moves(adev);
@@ -1598,10 +1681,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_late_init(adev);
        if (r) {
                dev_err(adev->dev, "amdgpu_late_init failed\n");
-               return r;
+               goto failed;
        }
 
        return 0;
+
+failed:
+       if (runtime)
+               vga_switcheroo_fini_domain_pm_ops(adev->dev);
+       return r;
 }
 
 static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
@@ -1636,6 +1724,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        kfree(adev->bios);
        adev->bios = NULL;
        vga_switcheroo_unregister_client(adev->pdev);
+       if (adev->flags & AMD_IS_PX)
+               vga_switcheroo_fini_domain_pm_ops(adev->dev);
        vga_client_register(adev->pdev, NULL, NULL, NULL);
        if (adev->rio_mem)
                pci_iounmap(adev->pdev, adev->rio_mem);
@@ -1841,11 +1931,6 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
  */
 int amdgpu_gpu_reset(struct amdgpu_device *adev)
 {
-       unsigned ring_sizes[AMDGPU_MAX_RINGS];
-       uint32_t *ring_data[AMDGPU_MAX_RINGS];
-
-       bool saved = false;
-
        int i, r;
        int resched;
 
@@ -1854,22 +1939,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 
-       r = amdgpu_suspend(adev);
-
+       /* block scheduler */
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
+
                if (!ring)
                        continue;
-
-               ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]);
-               if (ring_sizes[i]) {
-                       saved = true;
-                       dev_info(adev->dev, "Saved %d dwords of commands "
-                                "on ring %d.\n", ring_sizes[i], i);
-               }
+               kthread_park(ring->sched.thread);
+               amd_sched_hw_job_reset(&ring->sched);
        }
+       /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+       amdgpu_fence_driver_force_completion(adev);
+
+       /* save scratch */
+       amdgpu_atombios_scratch_regs_save(adev);
+       r = amdgpu_suspend(adev);
 
 retry:
+       /* Disable fb access */
+       if (adev->mode_info.num_crtc) {
+               struct amdgpu_mode_mc_save save;
+               amdgpu_display_stop_mc_access(adev, &save);
+               amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+       }
+
        r = amdgpu_asic_reset(adev);
        /* post card */
        amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1878,32 +1971,29 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
                dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
                r = amdgpu_resume(adev);
        }
-
+       /* restore scratch */
+       amdgpu_atombios_scratch_regs_restore(adev);
        if (!r) {
+               r = amdgpu_ib_ring_tests(adev);
+               if (r) {
+                       dev_err(adev->dev, "ib ring test failed (%d).\n", r);
+                       r = amdgpu_suspend(adev);
+                       goto retry;
+               }
+
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = adev->rings[i];
                        if (!ring)
                                continue;
-
-                       amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
-                       ring_sizes[i] = 0;
-                       ring_data[i] = NULL;
-               }
-
-               r = amdgpu_ib_ring_tests(adev);
-               if (r) {
-                       dev_err(adev->dev, "ib ring test failed (%d).\n", r);
-                       if (saved) {
-                               saved = false;
-                               r = amdgpu_suspend(adev);
-                               goto retry;
-                       }
+                       amd_sched_job_recovery(&ring->sched);
+                       kthread_unpark(ring->sched.thread);
                }
        } else {
-               amdgpu_fence_driver_force_completion(adev);
+               dev_err(adev->dev, "asic resume failed (%d).\n", r);
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-                       if (adev->rings[i])
-                               kfree(ring_data[i]);
+                       if (adev->rings[i]) {
+                               kthread_unpark(adev->rings[i]->sched.thread);
+                       }
                }
        }
 
@@ -1914,13 +2004,11 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
                /* bad news, how to tell it to userspace ? */
                dev_info(adev->dev, "GPU reset failed\n");
        }
+       amdgpu_irq_gpu_reset_resume_helper(adev);
 
        return r;
 }
 
-#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007  /* gen: chipset 1/2, asic 1/2/3 */
-#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
-
 void amdgpu_get_pcie_info(struct amdgpu_device *adev)
 {
        u32 mask;
@@ -2074,20 +2162,43 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
        struct amdgpu_device *adev = f->f_inode->i_private;
        ssize_t result = 0;
        int r;
+       bool use_bank;
+       unsigned instance_bank, sh_bank, se_bank;
 
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
+       if (*pos & (1ULL << 62)) {
+               se_bank = (*pos >> 24) & 0x3FF;
+               sh_bank = (*pos >> 34) & 0x3FF;
+               instance_bank = (*pos >> 44) & 0x3FF;
+               use_bank = 1;
+               *pos &= 0xFFFFFF;
+       } else {
+               use_bank = 0;
+       }
+
+       if (use_bank) {
+               if (sh_bank >= adev->gfx.config.max_sh_per_se ||
+                   se_bank >= adev->gfx.config.max_shader_engines)
+                       return -EINVAL;
+               mutex_lock(&adev->grbm_idx_mutex);
+               amdgpu_gfx_select_se_sh(adev, se_bank,
+                                       sh_bank, instance_bank);
+       }
+
        while (size) {
                uint32_t value;
 
                if (*pos > adev->rmmio_size)
-                       return result;
+                       goto end;
 
                value = RREG32(*pos >> 2);
                r = put_user(value, (uint32_t *)buf);
-               if (r)
-                       return r;
+               if (r) {
+                       result = r;
+                       goto end;
+               }
 
                result += 4;
                buf += 4;
@@ -2095,6 +2206,12 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
                size -= 4;
        }
 
+end:
+       if (use_bank) {
+               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+               mutex_unlock(&adev->grbm_idx_mutex);
+       }
+
        return result;
 }
 
@@ -2294,6 +2411,68 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
        return result;
 }
 
+static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+                                       size_t size, loff_t *pos)
+{
+       struct amdgpu_device *adev = f->f_inode->i_private;
+       ssize_t result = 0;
+       int r;
+       uint32_t *config, no_regs = 0;
+
+       if (size & 0x3 || *pos & 0x3)
+               return -EINVAL;
+
+       config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
+       if (!config)
+               return -ENOMEM;
+
+       /* version, increment each time something is added */
+       config[no_regs++] = 0;
+       config[no_regs++] = adev->gfx.config.max_shader_engines;
+       config[no_regs++] = adev->gfx.config.max_tile_pipes;
+       config[no_regs++] = adev->gfx.config.max_cu_per_sh;
+       config[no_regs++] = adev->gfx.config.max_sh_per_se;
+       config[no_regs++] = adev->gfx.config.max_backends_per_se;
+       config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
+       config[no_regs++] = adev->gfx.config.max_gprs;
+       config[no_regs++] = adev->gfx.config.max_gs_threads;
+       config[no_regs++] = adev->gfx.config.max_hw_contexts;
+       config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
+       config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
+       config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
+       config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
+       config[no_regs++] = adev->gfx.config.num_tile_pipes;
+       config[no_regs++] = adev->gfx.config.backend_enable_mask;
+       config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
+       config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
+       config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
+       config[no_regs++] = adev->gfx.config.num_gpus;
+       config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
+       config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
+       config[no_regs++] = adev->gfx.config.gb_addr_config;
+       config[no_regs++] = adev->gfx.config.num_rbs;
+
+       while (size && (*pos < no_regs * 4)) {
+               uint32_t value;
+
+               value = config[*pos >> 2];
+               r = put_user(value, (uint32_t *)buf);
+               if (r) {
+                       kfree(config);
+                       return r;
+               }
+
+               result += 4;
+               buf += 4;
+               *pos += 4;
+               size -= 4;
+       }
+
+       kfree(config);
+       return result;
+}
+
+
 static const struct file_operations amdgpu_debugfs_regs_fops = {
        .owner = THIS_MODULE,
        .read = amdgpu_debugfs_regs_read,
@@ -2319,11 +2498,18 @@ static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
        .llseek = default_llseek
 };
 
+static const struct file_operations amdgpu_debugfs_gca_config_fops = {
+       .owner = THIS_MODULE,
+       .read = amdgpu_debugfs_gca_config_read,
+       .llseek = default_llseek
+};
+
 static const struct file_operations *debugfs_regs[] = {
        &amdgpu_debugfs_regs_fops,
        &amdgpu_debugfs_regs_didt_fops,
        &amdgpu_debugfs_regs_pcie_fops,
        &amdgpu_debugfs_regs_smc_fops,
+       &amdgpu_debugfs_gca_config_fops,
 };
 
 static const char *debugfs_regs_names[] = {
@@ -2331,6 +2517,7 @@ static const char *debugfs_regs_names[] = {
        "amdgpu_regs_didt",
        "amdgpu_regs_pcie",
        "amdgpu_regs_smc",
+       "amdgpu_gca_config",
 };
 
 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
index a6eecf6f9065df5e466219f0ce73798643c9097f..7dbe8d02c5a637d42f9ea345b3cfc087966555c6 100644 (file)
@@ -220,19 +220,17 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
 
        r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
        if (unlikely(r != 0)) {
-               amdgpu_bo_unreserve(new_rbo);
                r = -EINVAL;
                DRM_ERROR("failed to pin new rbo buffer before flip\n");
-               goto cleanup;
+               goto unreserve;
        }
 
        r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
                                              &work->shared_count,
                                              &work->shared);
        if (unlikely(r != 0)) {
-               amdgpu_bo_unreserve(new_rbo);
                DRM_ERROR("failed to get fences for buffer\n");
-               goto cleanup;
+               goto unpin;
        }
 
        amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
@@ -275,9 +273,11 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
                DRM_ERROR("failed to reserve new rbo in error path\n");
                goto cleanup;
        }
+unpin:
        if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
                DRM_ERROR("failed to unpin new rbo in error path\n");
        }
+unreserve:
        amdgpu_bo_unreserve(new_rbo);
 
 cleanup:
index f888c015f76c2e945e0b5d0fbcb20dbd538eb25c..015f1f4aae53263a66b008aeb49b43c0fdc2ce7c 100644 (file)
@@ -82,8 +82,12 @@ int amdgpu_exp_hw_support = 0;
 int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
 int amdgpu_powerplay = -1;
+int amdgpu_powercontainment = 1;
 unsigned amdgpu_pcie_gen_cap = 0;
 unsigned amdgpu_pcie_lane_cap = 0;
+unsigned amdgpu_cg_mask = 0xffffffff;
+unsigned amdgpu_pg_mask = 0xffffffff;
+char *amdgpu_disable_cu = NULL;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -160,6 +164,9 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
 #ifdef CONFIG_DRM_AMD_POWERPLAY
 MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
 module_param_named(powerplay, amdgpu_powerplay, int, 0444);
+
+MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
+module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
 #endif
 
 MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
@@ -168,6 +175,15 @@ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
 MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
 module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
 
+MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
+module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
+
+MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
+module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+
+MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
+module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+
 static const struct pci_device_id pciidlist[] = {
 #ifdef CONFIG_DRM_AMDGPU_CIK
        /* Kaveri */
@@ -413,7 +429,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
        pci_save_state(pdev);
        pci_disable_device(pdev);
        pci_ignore_hotplug(pdev);
-       pci_set_power_state(pdev, PCI_D3cold);
+       if (amdgpu_is_atpx_hybrid())
+               pci_set_power_state(pdev, PCI_D3cold);
+       else if (!amdgpu_has_atpx_dgpu_power_cntl())
+               pci_set_power_state(pdev, PCI_D3hot);
        drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
 
        return 0;
@@ -430,7 +449,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
 
        drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
-       pci_set_power_state(pdev, PCI_D0);
+       if (amdgpu_is_atpx_hybrid() ||
+           !amdgpu_has_atpx_dgpu_power_cntl())
+               pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        ret = pci_enable_device(pdev);
        if (ret)
@@ -515,7 +536,7 @@ static struct drm_driver kms_driver = {
        .driver_features =
            DRIVER_USE_AGP |
            DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
-           DRIVER_PRIME | DRIVER_RENDER,
+           DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
        .dev_priv_size = 0,
        .load = amdgpu_driver_load_kms,
        .open = amdgpu_driver_open_kms,
@@ -590,7 +611,6 @@ static int __init amdgpu_init(void)
        DRM_INFO("amdgpu kernel modesetting enabled.\n");
        driver = &kms_driver;
        pdriver = &amdgpu_kms_pci_driver;
-       driver->driver_features |= DRIVER_MODESET;
        driver->num_ioctls = amdgpu_max_kms_ioctl;
        amdgpu_register_atpx_handler();
        /* let modprobe override vga console setting */
index 8fab6486064f507c219872851ef4cd6a7fe98e10..88fbed2389c0a181d69c7918d340ec82010dc811 100644 (file)
@@ -503,7 +503,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
        if (r)
                goto error_print;
 
-       amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
+       amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
        list_for_each_entry(entry, &list, head) {
                domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
                /* if anything is swapped out don't swap it in here,
index 9f95da4f053616f396418d7b794cf033620ff7cb..a074edd95c701c2705e2323bfaf713692a852654 100644 (file)
@@ -70,3 +70,47 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
                }
        }
 }
+
+/**
+ * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
+ *
+ * @mask: array in which the per-shader array disable masks will be stored
+ * @max_se: number of SEs
+ * @max_sh: number of SHs
+ *
+ * The bitmask of CUs to be disabled in the shader array determined by se and
+ * sh is stored in mask[se * max_sh + sh].
+ */
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
+{
+       unsigned se, sh, cu;
+       const char *p;
+
+       memset(mask, 0, sizeof(*mask) * max_se * max_sh);
+
+       if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
+               return;
+
+       p = amdgpu_disable_cu;
+       for (;;) {
+               char *next;
+               int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
+               if (ret < 3) {
+                       DRM_ERROR("amdgpu: could not parse disable_cu\n");
+                       return;
+               }
+
+               if (se < max_se && sh < max_sh && cu < 16) {
+                       DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
+                       mask[se * max_sh + sh] |= 1u << cu;
+               } else {
+                       DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
+                                 se, sh, cu);
+               }
+
+               next = strchr(p, ',');
+               if (!next)
+                       break;
+               p = next + 1;
+       }
+}
index dc06cbda7be6c4a6c9e67008d7bc69b39044b86d..51321e154c09997037cf51ffa8bad5a0a90f5e67 100644 (file)
@@ -27,4 +27,6 @@
 int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
 void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
 
+unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+
 #endif
index 34e35423b78e81d3d91102e2acacf889fa9792c0..46c3097c5224780c16868bc86aec5cbe5e79a45c 100644 (file)
@@ -160,10 +160,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                patch_offset = amdgpu_ring_init_cond_exec(ring);
 
        if (vm) {
-               r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
-                                   job->gds_base, job->gds_size,
-                                   job->gws_base, job->gws_size,
-                                   job->oa_base, job->oa_size);
+               r = amdgpu_vm_flush(ring, job);
                if (r) {
                        amdgpu_ring_undo(ring);
                        return r;
@@ -203,11 +200,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        }
 
        /* wrap the last IB with fence */
-       if (job && job->uf_bo) {
-               uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo);
-
-               addr += job->uf_offset;
-               amdgpu_ring_emit_fence(ring, addr, job->uf_sequence,
+       if (job && job->uf_addr) {
+               amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
                                       AMDGPU_FENCE_FLAG_64BIT);
        }
 
index 835a3fa8d8dfde320d73ba3af8f8fa306bd758e1..278708f5a744eebb69f0d719bfcb198efcf2ec11 100644 (file)
@@ -383,6 +383,18 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
        return r;
 }
 
+void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
+{
+       int i, j;
+       for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) {
+               struct amdgpu_irq_src *src = adev->irq.sources[i];
+               if (!src)
+                       continue;
+               for (j = 0; j < src->num_types; j++)
+                       amdgpu_irq_update(adev, src, j);
+       }
+}
+
 /**
  * amdgpu_irq_get - enable interrupt
  *
index e124b59f39c146dadf7d34effe307d3376349c81..7ef09352e5347c9ce761b07328bb8a3c8c7d9514 100644 (file)
@@ -94,6 +94,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                   unsigned type);
 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                        unsigned type);
+void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
 
 int amdgpu_irq_add_domain(struct amdgpu_device *adev);
 void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
index f0dafa514fe48f785b5dbf53cc3367e214732414..aaee0c8f6731d568bb1fbffee8c139da0c309274 100644 (file)
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
-static void amdgpu_job_free_handler(struct work_struct *ws)
+static void amdgpu_job_timedout(struct amd_sched_job *s_job)
 {
-       struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
-       amd_sched_job_put(&job->base);
-}
+       struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
-void amdgpu_job_timeout_func(struct work_struct *work)
-{
-       struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
        DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
-                               job->base.sched->name,
-                               (uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
-                               job->ring->fence_drv.sync_seq);
-
-       amd_sched_job_put(&job->base);
+                 job->base.sched->name,
+                 atomic_read(&job->ring->fence_drv.last_seq),
+                 job->ring->fence_drv.sync_seq);
+       amdgpu_gpu_reset(job->adev);
 }
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -63,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
        (*job)->vm = vm;
        (*job)->ibs = (void *)&(*job)[1];
        (*job)->num_ibs = num_ibs;
-       INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
 
        amdgpu_sync_create(&(*job)->sync);
 
@@ -86,27 +79,33 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
        return r;
 }
 
-void amdgpu_job_free(struct amdgpu_job *job)
+void amdgpu_job_free_resources(struct amdgpu_job *job)
 {
-       unsigned i;
        struct fence *f;
+       unsigned i;
+
        /* use sched fence if available */
-       f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
+       f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
 
        for (i = 0; i < job->num_ibs; ++i)
-               amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
-       fence_put(job->fence);
+               amdgpu_ib_free(job->adev, &job->ibs[i], f);
+}
 
-       amdgpu_bo_unref(&job->uf_bo);
-       amdgpu_sync_free(&job->sync);
+void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+{
+       struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
-       if (!job->base.use_sched)
-               kfree(job);
+       fence_put(job->fence);
+       amdgpu_sync_free(&job->sync);
+       kfree(job);
 }
 
-void amdgpu_job_free_func(struct kref *refcount)
+void amdgpu_job_free(struct amdgpu_job *job)
 {
-       struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
+       amdgpu_job_free_resources(job);
+
+       fence_put(job->fence);
+       amdgpu_sync_free(&job->sync);
        kfree(job);
 }
 
@@ -114,22 +113,20 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
                      struct amd_sched_entity *entity, void *owner,
                      struct fence **f)
 {
-       struct fence *fence;
        int r;
        job->ring = ring;
 
        if (!f)
                return -EINVAL;
 
-       r = amd_sched_job_init(&job->base, &ring->sched,
-                              entity, amdgpu_job_timeout_func,
-                              amdgpu_job_free_func, owner, &fence);
+       r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
        if (r)
                return r;
 
        job->owner = owner;
        job->ctx = entity->fence_context;
-       *f = fence_get(fence);
+       *f = fence_get(&job->base.s_fence->finished);
+       amdgpu_job_free_resources(job);
        amd_sched_entity_push_job(&job->base);
 
        return 0;
@@ -147,8 +144,8 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
                int r;
 
                r = amdgpu_vm_grab_id(vm, ring, &job->sync,
-                                     &job->base.s_fence->base,
-                                     &job->vm_id, &job->vm_pd_addr);
+                                     &job->base.s_fence->finished,
+                                     job);
                if (r)
                        DRM_ERROR("Error getting VM ID (%d)\n", r);
 
@@ -170,11 +167,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
        }
        job = to_amdgpu_job(sched_job);
 
-       r = amdgpu_sync_wait(&job->sync);
-       if (r) {
-               DRM_ERROR("failed to sync wait (%d)\n", r);
-               return NULL;
-       }
+       BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
 
        trace_amdgpu_sched_run_job(job);
        r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
@@ -185,14 +178,15 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
        }
 
 err:
+       /* if gpu reset, hw fence will be replaced here */
+       fence_put(job->fence);
        job->fence = fence;
-       amdgpu_job_free(job);
        return fence;
 }
 
 const struct amd_sched_backend_ops amdgpu_sched_ops = {
        .dependency = amdgpu_job_dependency,
        .run_job = amdgpu_job_run,
-       .begin_job = amd_sched_job_begin,
-       .finish_job = amd_sched_job_finish,
+       .timedout_job = amdgpu_job_timedout,
+       .free_job = amdgpu_job_free_cb
 };
index 38a28d137f1ded6b6955a0ed39b36f8850f2ce92..a8efbb54423f60740fb197f4bcd9a1d28761c9f1 100644 (file)
@@ -148,6 +148,65 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
        return r;
 }
 
+static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
+                               struct drm_amdgpu_query_fw *query_fw,
+                               struct amdgpu_device *adev)
+{
+       switch (query_fw->fw_type) {
+       case AMDGPU_INFO_FW_VCE:
+               fw_info->ver = adev->vce.fw_version;
+               fw_info->feature = adev->vce.fb_version;
+               break;
+       case AMDGPU_INFO_FW_UVD:
+               fw_info->ver = adev->uvd.fw_version;
+               fw_info->feature = 0;
+               break;
+       case AMDGPU_INFO_FW_GMC:
+               fw_info->ver = adev->mc.fw_version;
+               fw_info->feature = 0;
+               break;
+       case AMDGPU_INFO_FW_GFX_ME:
+               fw_info->ver = adev->gfx.me_fw_version;
+               fw_info->feature = adev->gfx.me_feature_version;
+               break;
+       case AMDGPU_INFO_FW_GFX_PFP:
+               fw_info->ver = adev->gfx.pfp_fw_version;
+               fw_info->feature = adev->gfx.pfp_feature_version;
+               break;
+       case AMDGPU_INFO_FW_GFX_CE:
+               fw_info->ver = adev->gfx.ce_fw_version;
+               fw_info->feature = adev->gfx.ce_feature_version;
+               break;
+       case AMDGPU_INFO_FW_GFX_RLC:
+               fw_info->ver = adev->gfx.rlc_fw_version;
+               fw_info->feature = adev->gfx.rlc_feature_version;
+               break;
+       case AMDGPU_INFO_FW_GFX_MEC:
+               if (query_fw->index == 0) {
+                       fw_info->ver = adev->gfx.mec_fw_version;
+                       fw_info->feature = adev->gfx.mec_feature_version;
+               } else if (query_fw->index == 1) {
+                       fw_info->ver = adev->gfx.mec2_fw_version;
+                       fw_info->feature = adev->gfx.mec2_feature_version;
+               } else
+                       return -EINVAL;
+               break;
+       case AMDGPU_INFO_FW_SMC:
+               fw_info->ver = adev->pm.fw_version;
+               fw_info->feature = 0;
+               break;
+       case AMDGPU_INFO_FW_SDMA:
+               if (query_fw->index >= adev->sdma.num_instances)
+                       return -EINVAL;
+               fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
+               fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
 /*
  * Userspace get information ioctl
  */
@@ -294,67 +353,20 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
        }
        case AMDGPU_INFO_TIMESTAMP:
-               ui64 = amdgpu_asic_get_gpu_clock_counter(adev);
+               ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
                return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
        case AMDGPU_INFO_FW_VERSION: {
                struct drm_amdgpu_info_firmware fw_info;
+               int ret;
 
                /* We only support one instance of each IP block right now. */
                if (info->query_fw.ip_instance != 0)
                        return -EINVAL;
 
-               switch (info->query_fw.fw_type) {
-               case AMDGPU_INFO_FW_VCE:
-                       fw_info.ver = adev->vce.fw_version;
-                       fw_info.feature = adev->vce.fb_version;
-                       break;
-               case AMDGPU_INFO_FW_UVD:
-                       fw_info.ver = adev->uvd.fw_version;
-                       fw_info.feature = 0;
-                       break;
-               case AMDGPU_INFO_FW_GMC:
-                       fw_info.ver = adev->mc.fw_version;
-                       fw_info.feature = 0;
-                       break;
-               case AMDGPU_INFO_FW_GFX_ME:
-                       fw_info.ver = adev->gfx.me_fw_version;
-                       fw_info.feature = adev->gfx.me_feature_version;
-                       break;
-               case AMDGPU_INFO_FW_GFX_PFP:
-                       fw_info.ver = adev->gfx.pfp_fw_version;
-                       fw_info.feature = adev->gfx.pfp_feature_version;
-                       break;
-               case AMDGPU_INFO_FW_GFX_CE:
-                       fw_info.ver = adev->gfx.ce_fw_version;
-                       fw_info.feature = adev->gfx.ce_feature_version;
-                       break;
-               case AMDGPU_INFO_FW_GFX_RLC:
-                       fw_info.ver = adev->gfx.rlc_fw_version;
-                       fw_info.feature = adev->gfx.rlc_feature_version;
-                       break;
-               case AMDGPU_INFO_FW_GFX_MEC:
-                       if (info->query_fw.index == 0) {
-                               fw_info.ver = adev->gfx.mec_fw_version;
-                               fw_info.feature = adev->gfx.mec_feature_version;
-                       } else if (info->query_fw.index == 1) {
-                               fw_info.ver = adev->gfx.mec2_fw_version;
-                               fw_info.feature = adev->gfx.mec2_feature_version;
-                       } else
-                               return -EINVAL;
-                       break;
-               case AMDGPU_INFO_FW_SMC:
-                       fw_info.ver = adev->pm.fw_version;
-                       fw_info.feature = 0;
-                       break;
-               case AMDGPU_INFO_FW_SDMA:
-                       if (info->query_fw.index >= adev->sdma.num_instances)
-                               return -EINVAL;
-                       fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
-                       fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
-                       break;
-               default:
-                       return -EINVAL;
-               }
+               ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
+               if (ret)
+                       return ret;
+
                return copy_to_user(out, &fw_info,
                                    min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
        }
@@ -453,7 +465,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        dev_info.max_memory_clock = adev->pm.default_mclk * 10;
                }
                dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
-               dev_info.num_rb_pipes = adev->gfx.config.num_rbs;
+               dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
+                       adev->gfx.config.max_shader_engines;
                dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
                dev_info._pad = 0;
                dev_info.ids_flags = 0;
@@ -761,3 +774,130 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
        DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 };
 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct drm_amdgpu_info_firmware fw_info;
+       struct drm_amdgpu_query_fw query_fw;
+       int ret, i;
+
+       /* VCE */
+       query_fw.fw_type = AMDGPU_INFO_FW_VCE;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* UVD */
+       query_fw.fw_type = AMDGPU_INFO_FW_UVD;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* GMC */
+       query_fw.fw_type = AMDGPU_INFO_FW_GMC;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* ME */
+       query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* PFP */
+       query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* CE */
+       query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* RLC */
+       query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* MEC */
+       query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
+       query_fw.index = 0;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* MEC2 */
+       if (adev->asic_type == CHIP_KAVERI ||
+           (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
+               query_fw.index = 1;
+               ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+               if (ret)
+                       return ret;
+               seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
+                          fw_info.feature, fw_info.ver);
+       }
+
+       /* SMC */
+       query_fw.fw_type = AMDGPU_INFO_FW_SMC;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* SDMA */
+       query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               query_fw.index = i;
+               ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+               if (ret)
+                       return ret;
+               seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
+                          i, fw_info.feature, fw_info.ver);
+       }
+
+       return 0;
+}
+
+static const struct drm_info_list amdgpu_firmware_info_list[] = {
+       {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
+};
+#endif
+
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
+                                       ARRAY_SIZE(amdgpu_firmware_info_list));
+#else
+       return 0;
+#endif
+}
index 7ecea83ce453cae868306b95b1600a01653a6de7..6f0873c75a25650feb9c65330f1a3b133ab64f50 100644 (file)
@@ -589,6 +589,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
                           struct ttm_mem_reg *new_mem)
 {
        struct amdgpu_bo *rbo;
+       struct ttm_mem_reg *old_mem = &bo->mem;
 
        if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
                return;
@@ -602,6 +603,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 
        /* move_notify is called before move happens */
        amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
+
+       trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
 }
 
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
index 589b36e8c5cf4c1ac3373da7352bd57b0fd5758d..ff63b88b0ffaf721a58d63f81bb47ab227ac93b9 100644 (file)
@@ -270,30 +270,28 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        enum amd_pm_state_type state = 0;
-       long idx;
+       unsigned long idx;
        int ret;
 
        if (strlen(buf) == 1)
                adev->pp_force_state_enabled = false;
-       else {
-               ret = kstrtol(buf, 0, &idx);
+       else if (adev->pp_enabled) {
+               struct pp_states_info data;
 
-               if (ret) {
+               ret = kstrtoul(buf, 0, &idx);
+               if (ret || idx >= ARRAY_SIZE(data.states)) {
                        count = -EINVAL;
                        goto fail;
                }
 
-               if (adev->pp_enabled) {
-                       struct pp_states_info data;
-                       amdgpu_dpm_get_pp_num_states(adev, &data);
-                       state = data.states[idx];
-                       /* only set user selected power states */
-                       if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
-                               state != POWER_STATE_TYPE_DEFAULT) {
-                               amdgpu_dpm_dispatch_task(adev,
-                                               AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
-                               adev->pp_force_state_enabled = true;
-                       }
+               amdgpu_dpm_get_pp_num_states(adev, &data);
+               state = data.states[idx];
+               /* only set user selected power states */
+               if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
+                   state != POWER_STATE_TYPE_DEFAULT) {
+                       amdgpu_dpm_dispatch_task(adev,
+                                       AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+                       adev->pp_force_state_enabled = true;
                }
        }
 fail:
@@ -349,6 +347,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
 
        if (adev->pp_enabled)
                size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+       else if (adev->pm.funcs->print_clock_levels)
+               size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
 
        return size;
 }
@@ -365,7 +365,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
        uint32_t i, mask = 0;
        char sub_str[2];
 
-       for (i = 0; i < strlen(buf) - 1; i++) {
+       for (i = 0; i < strlen(buf); i++) {
+               if (*(buf + i) == '\n')
+                       continue;
                sub_str[0] = *(buf + i);
                sub_str[1] = '\0';
                ret = kstrtol(sub_str, 0, &level);
@@ -379,6 +381,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
 
        if (adev->pp_enabled)
                amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+       else if (adev->pm.funcs->force_clock_level)
+               adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
 fail:
        return count;
 }
@@ -393,6 +397,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
 
        if (adev->pp_enabled)
                size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+       else if (adev->pm.funcs->print_clock_levels)
+               size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
 
        return size;
 }
@@ -409,7 +415,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
        uint32_t i, mask = 0;
        char sub_str[2];
 
-       for (i = 0; i < strlen(buf) - 1; i++) {
+       for (i = 0; i < strlen(buf); i++) {
+               if (*(buf + i) == '\n')
+                       continue;
                sub_str[0] = *(buf + i);
                sub_str[1] = '\0';
                ret = kstrtol(sub_str, 0, &level);
@@ -423,6 +431,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
 
        if (adev->pp_enabled)
                amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+       else if (adev->pm.funcs->force_clock_level)
+               adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
 fail:
        return count;
 }
@@ -437,6 +447,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
 
        if (adev->pp_enabled)
                size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+       else if (adev->pm.funcs->print_clock_levels)
+               size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
 
        return size;
 }
@@ -453,7 +465,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
        uint32_t i, mask = 0;
        char sub_str[2];
 
-       for (i = 0; i < strlen(buf) - 1; i++) {
+       for (i = 0; i < strlen(buf); i++) {
+               if (*(buf + i) == '\n')
+                       continue;
                sub_str[0] = *(buf + i);
                sub_str[1] = '\0';
                ret = kstrtol(sub_str, 0, &level);
@@ -467,6 +481,100 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
 
        if (adev->pp_enabled)
                amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+       else if (adev->pm.funcs->force_clock_level)
+               adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
+fail:
+       return count;
+}
+
+static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
+               struct device_attribute *attr,
+               char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+       uint32_t value = 0;
+
+       if (adev->pp_enabled)
+               value = amdgpu_dpm_get_sclk_od(adev);
+       else if (adev->pm.funcs->get_sclk_od)
+               value = adev->pm.funcs->get_sclk_od(adev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
+               struct device_attribute *attr,
+               const char *buf,
+               size_t count)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+       int ret;
+       long int value;
+
+       ret = kstrtol(buf, 0, &value);
+
+       if (ret) {
+               count = -EINVAL;
+               goto fail;
+       }
+
+       if (adev->pp_enabled) {
+               amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
+               amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
+       } else if (adev->pm.funcs->set_sclk_od) {
+               adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
+               adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+               amdgpu_pm_compute_clocks(adev);
+       }
+
+fail:
+       return count;
+}
+
+static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
+               struct device_attribute *attr,
+               char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+       uint32_t value = 0;
+
+       if (adev->pp_enabled)
+               value = amdgpu_dpm_get_mclk_od(adev);
+       else if (adev->pm.funcs->get_mclk_od)
+               value = adev->pm.funcs->get_mclk_od(adev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
+               struct device_attribute *attr,
+               const char *buf,
+               size_t count)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+       int ret;
+       long int value;
+
+       ret = kstrtol(buf, 0, &value);
+
+       if (ret) {
+               count = -EINVAL;
+               goto fail;
+       }
+
+       if (adev->pp_enabled) {
+               amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
+               amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
+       } else if (adev->pm.funcs->set_mclk_od) {
+               adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
+               adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+               amdgpu_pm_compute_clocks(adev);
+       }
+
 fail:
        return count;
 }
@@ -492,6 +600,12 @@ static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
 static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
                amdgpu_get_pp_dpm_pcie,
                amdgpu_set_pp_dpm_pcie);
+static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
+               amdgpu_get_pp_sclk_od,
+               amdgpu_set_pp_sclk_od);
+static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
+               amdgpu_get_pp_mclk_od,
+               amdgpu_set_pp_mclk_od);
 
 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
                                      struct device_attribute *attr,
@@ -1110,22 +1224,34 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
                        DRM_ERROR("failed to create device file pp_table\n");
                        return ret;
                }
-               ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pp_dpm_sclk\n");
-                       return ret;
-               }
-               ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pp_dpm_mclk\n");
-                       return ret;
-               }
-               ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pp_dpm_pcie\n");
-                       return ret;
-               }
        }
+
+       ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
+       if (ret) {
+               DRM_ERROR("failed to create device file pp_dpm_sclk\n");
+               return ret;
+       }
+       ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
+       if (ret) {
+               DRM_ERROR("failed to create device file pp_dpm_mclk\n");
+               return ret;
+       }
+       ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
+       if (ret) {
+               DRM_ERROR("failed to create device file pp_dpm_pcie\n");
+               return ret;
+       }
+       ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
+       if (ret) {
+               DRM_ERROR("failed to create device file pp_sclk_od\n");
+               return ret;
+       }
+       ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
+       if (ret) {
+               DRM_ERROR("failed to create device file pp_mclk_od\n");
+               return ret;
+       }
+
        ret = amdgpu_debugfs_pm_init(adev);
        if (ret) {
                DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1148,10 +1274,12 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
                device_remove_file(adev->dev, &dev_attr_pp_cur_state);
                device_remove_file(adev->dev, &dev_attr_pp_force_state);
                device_remove_file(adev->dev, &dev_attr_pp_table);
-               device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
-               device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
-               device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
        }
+       device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
+       device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
+       device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
+       device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
+       device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
 }
 
 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
index 6bd961fb43dcc7c7e3562d994e5fc0e79515ed01..c5738a22b69041356db2ef7e488defce76db6f30 100644 (file)
@@ -52,6 +52,7 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
                pp_init->chip_family = adev->family;
                pp_init->chip_id = adev->asic_type;
                pp_init->device = amdgpu_cgs_create_device(adev);
+               pp_init->powercontainment_enabled = amdgpu_powercontainment;
 
                ret = amd_powerplay_init(pp_init, amd_pp);
                kfree(pp_init);
@@ -183,13 +184,6 @@ static int amdgpu_pp_sw_fini(void *handle)
        if (ret)
                return ret;
 
-#ifdef CONFIG_DRM_AMD_POWERPLAY
-       if (adev->pp_enabled) {
-               amdgpu_pm_sysfs_fini(adev);
-               amd_powerplay_fini(adev->powerplay.pp_handle);
-       }
-#endif
-
        return ret;
 }
 
@@ -223,6 +217,22 @@ static int amdgpu_pp_hw_fini(void *handle)
        return ret;
 }
 
+static void amdgpu_pp_late_fini(void *handle)
+{
+#ifdef CONFIG_DRM_AMD_POWERPLAY
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->pp_enabled) {
+               amdgpu_pm_sysfs_fini(adev);
+               amd_powerplay_fini(adev->powerplay.pp_handle);
+       }
+
+       if (adev->powerplay.ip_funcs->late_fini)
+               adev->powerplay.ip_funcs->late_fini(
+                         adev->powerplay.pp_handle);
+#endif
+}
+
 static int amdgpu_pp_suspend(void *handle)
 {
        int ret = 0;
@@ -311,6 +321,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
        .sw_fini = amdgpu_pp_sw_fini,
        .hw_init = amdgpu_pp_hw_init,
        .hw_fini = amdgpu_pp_hw_fini,
+       .late_fini = amdgpu_pp_late_fini,
        .suspend = amdgpu_pp_suspend,
        .resume = amdgpu_pp_resume,
        .is_idle = amdgpu_pp_is_idle,
index 3b02272db678e173f2caaffd96527aa332326dc0..3b885e3e9b56c006e3881bc966b3f969c8c244cb 100644 (file)
@@ -28,6 +28,7 @@
  */
 #include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/debugfs.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
@@ -48,6 +49,7 @@
  */
 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
                                    struct amdgpu_ring *ring);
+static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
 
 /**
  * amdgpu_ring_alloc - allocate space on the ring buffer
@@ -139,78 +141,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
        ring->wptr = ring->wptr_old;
 }
 
-/**
- * amdgpu_ring_backup - Back up the content of a ring
- *
- * @ring: the ring we want to back up
- *
- * Saves all unprocessed commits from a ring, returns the number of dwords saved.
- */
-unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
-                           uint32_t **data)
-{
-       unsigned size, ptr, i;
-
-       *data = NULL;
-
-       if (ring->ring_obj == NULL)
-               return 0;
-
-       /* it doesn't make sense to save anything if all fences are signaled */
-       if (!amdgpu_fence_count_emitted(ring))
-               return 0;
-
-       ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
-       size = ring->wptr + (ring->ring_size / 4);
-       size -= ptr;
-       size &= ring->ptr_mask;
-       if (size == 0)
-               return 0;
-
-       /* and then save the content of the ring */
-       *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
-       if (!*data)
-               return 0;
-       for (i = 0; i < size; ++i) {
-               (*data)[i] = ring->ring[ptr++];
-               ptr &= ring->ptr_mask;
-       }
-
-       return size;
-}
-
-/**
- * amdgpu_ring_restore - append saved commands to the ring again
- *
- * @ring: ring to append commands to
- * @size: number of dwords we want to write
- * @data: saved commands
- *
- * Allocates space on the ring and restore the previously saved commands.
- */
-int amdgpu_ring_restore(struct amdgpu_ring *ring,
-                       unsigned size, uint32_t *data)
-{
-       int i, r;
-
-       if (!size || !data)
-               return 0;
-
-       /* restore the saved ring content */
-       r = amdgpu_ring_alloc(ring, size);
-       if (r)
-               return r;
-
-       for (i = 0; i < size; ++i) {
-               amdgpu_ring_write(ring, data[i]);
-       }
-
-       amdgpu_ring_commit(ring);
-       kfree(data);
-       return 0;
-}
-
 /**
  * amdgpu_ring_init - init driver ring struct.
  *
@@ -260,14 +190,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                return r;
        }
 
-       r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
-       if (r) {
-               dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
-               return r;
-       }
-       ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
-       ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
-
        r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
        if (r) {
                dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
@@ -310,6 +232,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                }
                r = amdgpu_bo_kmap(ring->ring_obj,
                                       (void **)&ring->ring);
+
+               memset((void *)ring->ring, 0, ring->ring_size);
+
                amdgpu_bo_unreserve(ring->ring_obj);
                if (r) {
                        dev_err(adev->dev, "(%d) ring map failed\n", r);
@@ -343,10 +268,10 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
        ring->ring = NULL;
        ring->ring_obj = NULL;
 
+       amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
        amdgpu_wb_free(ring->adev, ring->fence_offs);
        amdgpu_wb_free(ring->adev, ring->rptr_offs);
        amdgpu_wb_free(ring->adev, ring->wptr_offs);
-       amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
 
        if (ring_obj) {
                r = amdgpu_bo_reserve(ring_obj, false);
@@ -357,6 +282,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
                }
                amdgpu_bo_unref(&ring_obj);
        }
+       amdgpu_debugfs_ring_fini(ring);
 }
 
 /*
@@ -364,57 +290,62 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
  */
 #if defined(CONFIG_DEBUG_FS)
 
-static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
+/* Layout of file is 12 bytes consisting of
+ * - rptr
+ * - wptr
+ * - driver's copy of wptr
+ *
+ * followed by n-words of ring data
+ */
+static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
+                                       size_t size, loff_t *pos)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct amdgpu_device *adev = dev->dev_private;
-       int roffset = (unsigned long)node->info_ent->data;
-       struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
-       uint32_t rptr, wptr, rptr_next;
-       unsigned i;
-
-       wptr = amdgpu_ring_get_wptr(ring);
-       seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr);
-
-       rptr = amdgpu_ring_get_rptr(ring);
-       rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
-       seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr);
-
-       seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
-                  ring->wptr, ring->wptr);
-
-       if (!ring->ready)
-               return 0;
-
-       /* print 8 dw before current rptr as often it's the last executed
-        * packet that is the root issue
-        */
-       i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
-       while (i != rptr) {
-               seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
-               if (i == rptr)
-                       seq_puts(m, " *");
-               if (i == rptr_next)
-                       seq_puts(m, " #");
-               seq_puts(m, "\n");
-               i = (i + 1) & ring->ptr_mask;
+       struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private;
+       int r, i;
+       uint32_t value, result, early[3];
+
+       if (*pos & 3 || size & 3)
+               return -EINVAL;
+
+       result = 0;
+
+       if (*pos < 12) {
+               early[0] = amdgpu_ring_get_rptr(ring);
+               early[1] = amdgpu_ring_get_wptr(ring);
+               early[2] = ring->wptr;
+               for (i = *pos / 4; i < 3 && size; i++) {
+                       r = put_user(early[i], (uint32_t *)buf);
+                       if (r)
+                               return r;
+                       buf += 4;
+                       result += 4;
+                       size -= 4;
+                       *pos += 4;
+               }
        }
-       while (i != wptr) {
-               seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
-               if (i == rptr)
-                       seq_puts(m, " *");
-               if (i == rptr_next)
-                       seq_puts(m, " #");
-               seq_puts(m, "\n");
-               i = (i + 1) & ring->ptr_mask;
+
+       while (size) {
+               if (*pos >= (ring->ring_size + 12))
+                       return result;
+                       
+               value = ring->ring[(*pos - 12)/4];
+               r = put_user(value, (uint32_t*)buf);
+               if (r)
+                       return r;
+               buf += 4;
+               result += 4;
+               size -= 4;
+               *pos += 4;
        }
-       return 0;
+
+       return result;
 }
 
-static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS];
-static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32];
+static const struct file_operations amdgpu_debugfs_ring_fops = {
+       .owner = THIS_MODULE,
+       .read = amdgpu_debugfs_ring_read,
+       .llseek = default_llseek
+};
 
 #endif
 
@@ -422,28 +353,27 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
                                    struct amdgpu_ring *ring)
 {
 #if defined(CONFIG_DEBUG_FS)
-       unsigned offset = (uint8_t*)ring - (uint8_t*)adev;
-       unsigned i;
-       struct drm_info_list *info;
-       char *name;
-
-       for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
-               info = &amdgpu_debugfs_ring_info_list[i];
-               if (!info->data)
-                       break;
-       }
+       struct drm_minor *minor = adev->ddev->primary;
+       struct dentry *ent, *root = minor->debugfs_root;
+       char name[32];
 
-       if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list))
-               return -ENOSPC;
-
-       name = &amdgpu_debugfs_ring_names[i][0];
        sprintf(name, "amdgpu_ring_%s", ring->name);
-       info->name = name;
-       info->show = amdgpu_debugfs_ring_info;
-       info->driver_features = 0;
-       info->data = (void*)(uintptr_t)offset;
 
-       return amdgpu_debugfs_add_files(adev, info, 1);
+       ent = debugfs_create_file(name,
+                                 S_IFREG | S_IRUGO, root,
+                                 ring, &amdgpu_debugfs_ring_fops);
+       if (IS_ERR(ent))
+               return PTR_ERR(ent);
+
+       i_size_write(ent->d_inode, ring->ring_size + 12);
+       ring->ent = ent;
 #endif
        return 0;
 }
+
+static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
+{
+#if defined(CONFIG_DEBUG_FS)
+       debugfs_remove(ring->ent);
+#endif
+}
index b16366c2b4a0863ce0b40994c62cbc45ba779c7b..d8af37a845f4ee3d80ffdf2b27f00565150817f3 100644 (file)
@@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
                return r;
        }
        r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+       memset(sa_manager->cpu_ptr, 0, sa_manager->size);
        amdgpu_bo_unreserve(sa_manager->bo);
        return r;
 }
index 34a92808bbd4dabc7ad7f62e4d5beec259bb6f82..5c8d3022fb8705acaf643519e27b2e80a187f0eb 100644 (file)
@@ -223,13 +223,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 }
 
 /**
- * amdgpu_sync_is_idle - test if all fences are signaled
+ * amdgpu_sync_peek_fence - get the next fence not signaled yet
  *
  * @sync: the sync object
+ * @ring: optional ring to use for test
  *
- * Returns true if all fences in the sync object are signaled.
+ * Returns the next fence not signaled yet without removing it from the sync
+ * object.
  */
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+                                    struct amdgpu_ring *ring)
 {
        struct amdgpu_sync_entry *e;
        struct hlist_node *tmp;
@@ -237,6 +240,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
 
        hash_for_each_safe(sync->fences, i, tmp, e, node) {
                struct fence *f = e->fence;
+               struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+               if (ring && s_fence) {
+                       /* For fences from the same ring it is sufficient
+                        * when they are scheduled.
+                        */
+                       if (s_fence->sched == &ring->sched) {
+                               if (fence_is_signaled(&s_fence->scheduled))
+                                       continue;
+
+                               return &s_fence->scheduled;
+                       }
+               }
 
                if (fence_is_signaled(f)) {
                        hash_del(&e->node);
@@ -245,58 +261,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
                        continue;
                }
 
-               return false;
+               return f;
        }
 
-       return true;
+       return NULL;
 }
 
 /**
- * amdgpu_sync_cycle_fences - move fences from one sync object into another
+ * amdgpu_sync_get_fence - get the next fence from the sync object
  *
- * @dst: the destination sync object
- * @src: the source sync object
- * @fence: fence to add to source
+ * @sync: sync object to use
  *
- * Remove all fences from source and put them into destination and add
- * fence as new one into source.
+ * Get and removes the next fence from the sync object not signaled yet.
  */
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
-                            struct fence *fence)
-{
-       struct amdgpu_sync_entry *e, *newone;
-       struct hlist_node *tmp;
-       int i;
-
-       /* Allocate the new entry before moving the old ones */
-       newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
-       if (!newone)
-               return -ENOMEM;
-
-       hash_for_each_safe(src->fences, i, tmp, e, node) {
-               struct fence *f = e->fence;
-
-               hash_del(&e->node);
-               if (fence_is_signaled(f)) {
-                       fence_put(f);
-                       kmem_cache_free(amdgpu_sync_slab, e);
-                       continue;
-               }
-
-               if (amdgpu_sync_add_later(dst, f)) {
-                       kmem_cache_free(amdgpu_sync_slab, e);
-                       continue;
-               }
-
-               hash_add(dst->fences, &e->node, f->context);
-       }
-
-       hash_add(src->fences, &newone->node, fence->context);
-       newone->fence = fence_get(fence);
-
-       return 0;
-}
-
 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
 {
        struct amdgpu_sync_entry *e;
@@ -319,25 +296,6 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
        return NULL;
 }
 
-int amdgpu_sync_wait(struct amdgpu_sync *sync)
-{
-       struct amdgpu_sync_entry *e;
-       struct hlist_node *tmp;
-       int i, r;
-
-       hash_for_each_safe(sync->fences, i, tmp, e, node) {
-               r = fence_wait(e->fence, false);
-               if (r)
-                       return r;
-
-               hash_del(&e->node);
-               fence_put(e->fence);
-               kmem_cache_free(amdgpu_sync_slab, e);
-       }
-
-       return 0;
-}
-
 /**
  * amdgpu_sync_free - free the sync object
  *
index 26a5f4acf5840ed6218a49cd654b678005431481..499803f3ce3acd5f4c0f5a43a491c83a27b6f980 100644 (file)
 #define TRACE_SYSTEM amdgpu
 #define TRACE_INCLUDE_FILE amdgpu_trace
 
+TRACE_EVENT(amdgpu_mm_rreg,
+           TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
+           TP_ARGS(did, reg, value),
+           TP_STRUCT__entry(
+                               __field(unsigned, did)
+                               __field(uint32_t, reg)
+                               __field(uint32_t, value)
+                           ),
+           TP_fast_assign(
+                          __entry->did = did;
+                          __entry->reg = reg;
+                          __entry->value = value;
+                          ),
+           TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+                     (unsigned long)__entry->did,
+                     (unsigned long)__entry->reg,
+                     (unsigned long)__entry->value)
+);
+
+TRACE_EVENT(amdgpu_mm_wreg,
+           TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
+           TP_ARGS(did, reg, value),
+           TP_STRUCT__entry(
+                               __field(unsigned, did)
+                               __field(uint32_t, reg)
+                               __field(uint32_t, value)
+                           ),
+           TP_fast_assign(
+                          __entry->did = did;
+                          __entry->reg = reg;
+                          __entry->value = value;
+                          ),
+           TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+                     (unsigned long)__entry->did,
+                     (unsigned long)__entry->reg,
+                     (unsigned long)__entry->value)
+);
+
 TRACE_EVENT(amdgpu_bo_create,
            TP_PROTO(struct amdgpu_bo *bo),
            TP_ARGS(bo),
            TP_STRUCT__entry(
                             __field(struct amdgpu_bo *, bo)
                             __field(u32, pages)
+                            __field(u32, type)
+                            __field(u32, prefer)
+                            __field(u32, allow)
+                            __field(u32, visible)
                             ),
 
            TP_fast_assign(
                           __entry->bo = bo;
                           __entry->pages = bo->tbo.num_pages;
+                          __entry->type = bo->tbo.mem.mem_type;
+                          __entry->prefer = bo->prefered_domains;
+                          __entry->allow = bo->allowed_domains;
+                          __entry->visible = bo->flags;
                           ),
-           TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+
+           TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d",
+                      __entry->bo, __entry->pages, __entry->type,
+                      __entry->prefer, __entry->allow, __entry->visible)
 );
 
 TRACE_EVENT(amdgpu_cs,
@@ -64,7 +113,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
                           __entry->adev = job->adev;
                           __entry->sched_job = &job->base;
                           __entry->ib = job->ibs;
-                          __entry->fence = &job->base.s_fence->base;
+                          __entry->fence = &job->base.s_fence->finished;
                           __entry->ring_name = job->ring->name;
                           __entry->num_ibs = job->num_ibs;
                           ),
@@ -89,7 +138,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
                           __entry->adev = job->adev;
                           __entry->sched_job = &job->base;
                           __entry->ib = job->ibs;
-                          __entry->fence = &job->base.s_fence->base;
+                          __entry->fence = &job->base.s_fence->finished;
                           __entry->ring_name = job->ring->name;
                           __entry->num_ibs = job->num_ibs;
                           ),
@@ -244,13 +293,55 @@ TRACE_EVENT(amdgpu_bo_list_set,
            TP_STRUCT__entry(
                             __field(struct amdgpu_bo_list *, list)
                             __field(struct amdgpu_bo *, bo)
+                            __field(u64, bo_size)
                             ),
 
            TP_fast_assign(
                           __entry->list = list;
                           __entry->bo = bo;
+                          __entry->bo_size = amdgpu_bo_size(bo);
                           ),
-           TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
+           TP_printk("list=%p, bo=%p, bo_size = %Ld",
+                     __entry->list,
+                     __entry->bo,
+                     __entry->bo_size)
+);
+
+TRACE_EVENT(amdgpu_cs_bo_status,
+           TP_PROTO(uint64_t total_bo, uint64_t total_size),
+           TP_ARGS(total_bo, total_size),
+           TP_STRUCT__entry(
+                       __field(u64, total_bo)
+                       __field(u64, total_size)
+                       ),
+
+           TP_fast_assign(
+                       __entry->total_bo = total_bo;
+                       __entry->total_size = total_size;
+                       ),
+           TP_printk("total bo size = %Ld, total bo count = %Ld",
+                       __entry->total_bo, __entry->total_size)
+);
+
+TRACE_EVENT(amdgpu_ttm_bo_move,
+           TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
+           TP_ARGS(bo, new_placement, old_placement),
+           TP_STRUCT__entry(
+                       __field(struct amdgpu_bo *, bo)
+                       __field(u64, bo_size)
+                       __field(u32, new_placement)
+                       __field(u32, old_placement)
+                       ),
+
+           TP_fast_assign(
+                       __entry->bo      = bo;
+                       __entry->bo_size = amdgpu_bo_size(bo);
+                       __entry->new_placement = new_placement;
+                       __entry->old_placement = old_placement;
+                       ),
+           TP_printk("bo=%p from:%d to %d with size = %Ld",
+                       __entry->bo, __entry->old_placement,
+                       __entry->new_placement, __entry->bo_size)
 );
 
 #endif
index 3b9053af47629617544f8f55d6ec5b714063146c..b7742e62972a61aae553dca11477e89ab8be6e2e 100644 (file)
@@ -286,9 +286,10 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
        r = amdgpu_copy_buffer(ring, old_start, new_start,
                               new_mem->num_pages * PAGE_SIZE, /* bytes */
                               bo->resv, &fence);
-       /* FIXME: handle copy error */
-       r = ttm_bo_move_accel_cleanup(bo, fence,
-                                     evict, no_wait_gpu, new_mem);
+       if (r)
+               return r;
+
+       r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
        fence_put(fence);
        return r;
 }
@@ -396,6 +397,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
                return -EINVAL;
 
        adev = amdgpu_get_adev(bo->bdev);
+
+       /* remember the eviction */
+       if (evict)
+               atomic64_inc(&adev->num_evictions);
+
        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
                amdgpu_move_null(bo, new_mem);
                return 0;
@@ -429,7 +435,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
 
        if (r) {
 memcpy:
-               r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+               r = ttm_bo_move_memcpy(bo, evict, interruptible,
+                                      no_wait_gpu, new_mem);
                if (r) {
                        return r;
                }
index 01abfc21b4a2249c43b5b0ce747b36ac111940c2..e19520c4b4b6e3cd6bd4285c008885998e87187f 100644 (file)
@@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
        int r;
 
-       if (adev->uvd.vcpu_bo == NULL)
-               return 0;
+       kfree(adev->uvd.saved_bo);
 
        amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
 
-       r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
-       if (!r) {
-               amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
-               amdgpu_bo_unpin(adev->uvd.vcpu_bo);
-               amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-       }
+       if (adev->uvd.vcpu_bo) {
+               r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
+               if (!r) {
+                       amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
+                       amdgpu_bo_unpin(adev->uvd.vcpu_bo);
+                       amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+               }
 
-       amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+               amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+       }
 
        amdgpu_ring_fini(&adev->uvd.ring);
 
index 9f36ed30ba115b9be28ee3dff2eea75c2fe796f4..2f8496d48c94d9d8a632795c7cca78b5517fd16d 100644 (file)
@@ -25,6 +25,7 @@
  *          Alex Deucher
  *          Jerome Glisse
  */
+#include <linux/fence-array.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
@@ -114,16 +115,26 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 /**
  * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
  *
+ * @adev: amdgpu device pointer
  * @vm: vm providing the BOs
  * @duplicates: head of duplicates list
  *
  * Add the page directory to the BO duplicates list
  * for command submission.
  */
-void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
+void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                         struct list_head *duplicates)
 {
+       uint64_t num_evictions;
        unsigned i;
 
+       /* We only need to validate the page tables
+        * if they aren't already valid.
+        */
+       num_evictions = atomic64_read(&adev->num_evictions);
+       if (num_evictions == vm->last_eviction_counter)
+               return;
+
        /* add the vm page table to the list */
        for (i = 0; i <= vm->max_pde_used; ++i) {
                struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
@@ -162,6 +173,13 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
        spin_unlock(&glob->lru_lock);
 }
 
+static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
+                             struct amdgpu_vm_id *id)
+{
+       return id->current_gpu_reset_count !=
+               atomic_read(&adev->gpu_reset_counter) ? true : false;
+}
+
 /**
  * amdgpu_vm_grab_id - allocate the next free VMID
  *
@@ -174,20 +192,69 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
  */
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_sync *sync, struct fence *fence,
-                     unsigned *vm_id, uint64_t *vm_pd_addr)
+                     struct amdgpu_job *job)
 {
-       uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
        struct amdgpu_device *adev = ring->adev;
        struct fence *updates = sync->last_vm_update;
-       struct amdgpu_vm_id *id;
-       unsigned i = ring->idx;
-       int r;
+       struct amdgpu_vm_id *id, *idle;
+       struct fence **fences;
+       unsigned i;
+       int r = 0;
+
+       fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
+                              GFP_KERNEL);
+       if (!fences)
+               return -ENOMEM;
 
        mutex_lock(&adev->vm_manager.lock);
 
+       /* Check if we have an idle VMID */
+       i = 0;
+       list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
+               fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
+               if (!fences[i])
+                       break;
+               ++i;
+       }
+
+       /* If we can't find a idle VMID to use, wait till one becomes available */
+       if (&idle->list == &adev->vm_manager.ids_lru) {
+               u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+               unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+               struct fence_array *array;
+               unsigned j;
+
+               for (j = 0; j < i; ++j)
+                       fence_get(fences[j]);
+
+               array = fence_array_create(i, fences, fence_context,
+                                          seqno, true);
+               if (!array) {
+                       for (j = 0; j < i; ++j)
+                               fence_put(fences[j]);
+                       kfree(fences);
+                       r = -ENOMEM;
+                       goto error;
+               }
+
+
+               r = amdgpu_sync_fence(ring->adev, sync, &array->base);
+               fence_put(&array->base);
+               if (r)
+                       goto error;
+
+               mutex_unlock(&adev->vm_manager.lock);
+               return 0;
+
+       }
+       kfree(fences);
+
+       job->vm_needs_flush = true;
        /* Check if we can use a VMID already assigned to this VM */
+       i = ring->idx;
        do {
                struct fence *flushed;
+               bool same_ring = ring->idx == i;
 
                id = vm->ids[i++];
                if (i == AMDGPU_MAX_RINGS)
@@ -196,67 +263,49 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                /* Check all the prerequisites to using this VMID */
                if (!id)
                        continue;
+               if (amdgpu_vm_is_gpu_reset(adev, id))
+                       continue;
 
                if (atomic64_read(&id->owner) != vm->client_id)
                        continue;
 
-               if (pd_addr != id->pd_gpu_addr)
+               if (job->vm_pd_addr != id->pd_gpu_addr)
                        continue;
 
-               if (id->last_user != ring &&
+               if (!same_ring &&
                    (!id->last_flush || !fence_is_signaled(id->last_flush)))
                        continue;
 
                flushed  = id->flushed_updates;
-               if (updates && (!flushed || fence_is_later(updates, flushed)))
+               if (updates &&
+                   (!flushed || fence_is_later(updates, flushed)))
                        continue;
 
-               /* Good we can use this VMID */
-               if (id->last_user == ring) {
-                       r = amdgpu_sync_fence(ring->adev, sync,
-                                             id->first);
-                       if (r)
-                               goto error;
-               }
-
-               /* And remember this submission as user of the VMID */
+               /* Good we can use this VMID. Remember this submission as
+                * user of the VMID.
+                */
                r = amdgpu_sync_fence(ring->adev, &id->active, fence);
                if (r)
                        goto error;
 
+               id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
                list_move_tail(&id->list, &adev->vm_manager.ids_lru);
                vm->ids[ring->idx] = id;
 
-               *vm_id = id - adev->vm_manager.ids;
-               *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
-               trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+               job->vm_id = id - adev->vm_manager.ids;
+               job->vm_needs_flush = false;
+               trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr);
 
                mutex_unlock(&adev->vm_manager.lock);
                return 0;
 
        } while (i != ring->idx);
 
-       id = list_first_entry(&adev->vm_manager.ids_lru,
-                             struct amdgpu_vm_id,
-                             list);
+       /* Still no ID to use? Then use the idle one found earlier */
+       id = idle;
 
-       if (!amdgpu_sync_is_idle(&id->active)) {
-               struct list_head *head = &adev->vm_manager.ids_lru;
-               struct amdgpu_vm_id *tmp;
-
-               list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
-                                        list) {
-                       if (amdgpu_sync_is_idle(&id->active)) {
-                               list_move(&id->list, head);
-                               head = &id->list;
-                       }
-               }
-               id = list_first_entry(&adev->vm_manager.ids_lru,
-                                     struct amdgpu_vm_id,
-                                     list);
-       }
-
-       r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
+       /* Remember this submission as user of the VMID */
+       r = amdgpu_sync_fence(ring->adev, &id->active, fence);
        if (r)
                goto error;
 
@@ -269,22 +318,46 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        fence_put(id->flushed_updates);
        id->flushed_updates = fence_get(updates);
 
-       id->pd_gpu_addr = pd_addr;
-
+       id->pd_gpu_addr = job->vm_pd_addr;
+       id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
        list_move_tail(&id->list, &adev->vm_manager.ids_lru);
-       id->last_user = ring;
        atomic64_set(&id->owner, vm->client_id);
        vm->ids[ring->idx] = id;
 
-       *vm_id = id - adev->vm_manager.ids;
-       *vm_pd_addr = pd_addr;
-       trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+       job->vm_id = id - adev->vm_manager.ids;
+       trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr);
 
 error:
        mutex_unlock(&adev->vm_manager.lock);
        return r;
 }
 
+static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       const struct amdgpu_ip_block_version *ip_block;
+
+       if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+               /* only compute rings */
+               return false;
+
+       ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+       if (!ip_block)
+               return false;
+
+       if (ip_block->major <= 7) {
+               /* gfx7 has no workaround */
+               return true;
+       } else if (ip_block->major == 8) {
+               if (adev->gfx.mec_fw_version >= 673)
+                       /* gfx8 is fixed in MEC firmware 673 */
+                       return false;
+               else
+                       return true;
+       }
+       return false;
+}
+
 /**
  * amdgpu_vm_flush - hardware flush the vm
  *
@@ -294,59 +367,52 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
  *
  * Emit a VM flush when it is necessary.
  */
-int amdgpu_vm_flush(struct amdgpu_ring *ring,
-                   unsigned vm_id, uint64_t pd_addr,
-                   uint32_t gds_base, uint32_t gds_size,
-                   uint32_t gws_base, uint32_t gws_size,
-                   uint32_t oa_base, uint32_t oa_size)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
+       struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
        bool gds_switch_needed = ring->funcs->emit_gds_switch && (
-               id->gds_base != gds_base ||
-               id->gds_size != gds_size ||
-               id->gws_base != gws_base ||
-               id->gws_size != gws_size ||
-               id->oa_base != oa_base ||
-               id->oa_size != oa_size);
+               id->gds_base != job->gds_base ||
+               id->gds_size != job->gds_size ||
+               id->gws_base != job->gws_base ||
+               id->gws_size != job->gws_size ||
+               id->oa_base != job->oa_base ||
+               id->oa_size != job->oa_size);
        int r;
 
        if (ring->funcs->emit_pipeline_sync && (
-           pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
-                   ring->type == AMDGPU_RING_TYPE_COMPUTE))
+           job->vm_needs_flush || gds_switch_needed ||
+           amdgpu_vm_ring_has_compute_vm_bug(ring)))
                amdgpu_ring_emit_pipeline_sync(ring);
 
-       if (ring->funcs->emit_vm_flush &&
-           pd_addr != AMDGPU_VM_NO_FLUSH) {
+       if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
+           amdgpu_vm_is_gpu_reset(adev, id))) {
                struct fence *fence;
 
-               trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
-               amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
+               trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
+               amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
+
+               r = amdgpu_fence_emit(ring, &fence);
+               if (r)
+                       return r;
 
                mutex_lock(&adev->vm_manager.lock);
-               if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) {
-                       r = amdgpu_fence_emit(ring, &fence);
-                       if (r) {
-                               mutex_unlock(&adev->vm_manager.lock);
-                               return r;
-                       }
-                       fence_put(id->last_flush);
-                       id->last_flush = fence;
-               }
+               fence_put(id->last_flush);
+               id->last_flush = fence;
                mutex_unlock(&adev->vm_manager.lock);
        }
 
        if (gds_switch_needed) {
-               id->gds_base = gds_base;
-               id->gds_size = gds_size;
-               id->gws_base = gws_base;
-               id->gws_size = gws_size;
-               id->oa_base = oa_base;
-               id->oa_size = oa_size;
-               amdgpu_ring_emit_gds_switch(ring, vm_id,
-                                           gds_base, gds_size,
-                                           gws_base, gws_size,
-                                           oa_base, oa_size);
+               id->gds_base = job->gds_base;
+               id->gds_size = job->gds_size;
+               id->gws_base = job->gws_base;
+               id->gws_size = job->gws_size;
+               id->oa_base = job->oa_base;
+               id->oa_size = job->oa_size;
+               amdgpu_ring_emit_gds_switch(ring, job->vm_id,
+                                           job->gds_base, job->gds_size,
+                                           job->gws_base, job->gws_size,
+                                           job->oa_base, job->oa_size);
        }
 
        return 0;
@@ -723,7 +789,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
  * @vm: requested vm
  * @start: start of GPU address range
  * @end: end of GPU address range
- * @dst: destination address to map to
+ * @dst: destination address to map to, the next dst inside the function
  * @flags: mapping flags
  *
  * Update the page tables in the range @start - @end.
@@ -737,49 +803,75 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 {
        const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 
-       uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0;
-       uint64_t addr;
+       uint64_t cur_pe_start, cur_pe_end, cur_dst;
+       uint64_t addr; /* next GPU address to be updated */
+       uint64_t pt_idx;
+       struct amdgpu_bo *pt;
+       unsigned nptes; /* next number of ptes to be updated */
+       uint64_t next_pe_start;
+
+       /* initialize the variables */
+       addr = start;
+       pt_idx = addr >> amdgpu_vm_block_size;
+       pt = vm->page_tables[pt_idx].entry.robj;
+
+       if ((addr & ~mask) == (end & ~mask))
+               nptes = end - addr;
+       else
+               nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+
+       cur_pe_start = amdgpu_bo_gpu_offset(pt);
+       cur_pe_start += (addr & mask) * 8;
+       cur_pe_end = cur_pe_start + 8 * nptes;
+       cur_dst = dst;
+
+       /* for next ptb*/
+       addr += nptes;
+       dst += nptes * AMDGPU_GPU_PAGE_SIZE;
 
        /* walk over the address space and update the page tables */
-       for (addr = start; addr < end; ) {
-               uint64_t pt_idx = addr >> amdgpu_vm_block_size;
-               struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
-               unsigned nptes;
-               uint64_t pe_start;
+       while (addr < end) {
+               pt_idx = addr >> amdgpu_vm_block_size;
+               pt = vm->page_tables[pt_idx].entry.robj;
 
                if ((addr & ~mask) == (end & ~mask))
                        nptes = end - addr;
                else
                        nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
 
-               pe_start = amdgpu_bo_gpu_offset(pt);
-               pe_start += (addr & mask) * 8;
-
-               if (last_pe_end != pe_start) {
+               next_pe_start = amdgpu_bo_gpu_offset(pt);
+               next_pe_start += (addr & mask) * 8;
 
+               if (cur_pe_end == next_pe_start) {
+                       /* The next ptb is consecutive to current ptb.
+                        * Don't call amdgpu_vm_frag_ptes now.
+                        * Will update two ptbs together in future.
+                       */
+                       cur_pe_end += 8 * nptes;
+               } else {
                        amdgpu_vm_frag_ptes(adev, vm_update_params,
-                                           last_pe_start, last_pe_end,
-                                           last_dst, flags);
+                                           cur_pe_start, cur_pe_end,
+                                           cur_dst, flags);
 
-                       last_pe_start = pe_start;
-                       last_pe_end = pe_start + 8 * nptes;
-                       last_dst = dst;
-               } else {
-                       last_pe_end += 8 * nptes;
+                       cur_pe_start = next_pe_start;
+                       cur_pe_end = next_pe_start + 8 * nptes;
+                       cur_dst = dst;
                }
 
+               /* for next ptb*/
                addr += nptes;
                dst += nptes * AMDGPU_GPU_PAGE_SIZE;
        }
 
-       amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start,
-                           last_pe_end, last_dst, flags);
+       amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start,
+                           cur_pe_end, cur_dst, flags);
 }
 
 /**
  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
  *
  * @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
  * @src: address where to copy page table entries from
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
@@ -793,6 +885,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
  * Returns 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+                                      struct fence *exclusive,
                                       uint64_t src,
                                       dma_addr_t *pages_addr,
                                       struct amdgpu_vm *vm,
@@ -853,6 +946,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
        vm_update_params.ib = &job->ibs[0];
 
+       r = amdgpu_sync_fence(adev, &job->sync, exclusive);
+       if (r)
+               goto error_free;
+
        r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
                             owner);
        if (r)
@@ -889,6 +986,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
  *
  * @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
  * @gtt_flags: flags as they are used for GTT
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
@@ -902,6 +1000,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
  * Returns 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+                                     struct fence *exclusive,
                                      uint32_t gtt_flags,
                                      dma_addr_t *pages_addr,
                                      struct amdgpu_vm *vm,
@@ -932,7 +1031,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
        addr += mapping->offset;
 
        if (!pages_addr || src)
-               return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
+               return amdgpu_vm_bo_update_mapping(adev, exclusive,
+                                                  src, pages_addr, vm,
                                                   start, mapping->it.last,
                                                   flags, addr, fence);
 
@@ -940,7 +1040,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                uint64_t last;
 
                last = min((uint64_t)mapping->it.last, start + max_size - 1);
-               r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
+               r = amdgpu_vm_bo_update_mapping(adev, exclusive,
+                                               src, pages_addr, vm,
                                                start, last, flags, addr,
                                                fence);
                if (r)
@@ -973,6 +1074,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        struct amdgpu_bo_va_mapping *mapping;
        dma_addr_t *pages_addr = NULL;
        uint32_t gtt_flags, flags;
+       struct fence *exclusive;
        uint64_t addr;
        int r;
 
@@ -994,8 +1096,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                default:
                        break;
                }
+
+               exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
        } else {
                addr = 0;
+               exclusive = NULL;
        }
 
        flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
@@ -1007,7 +1112,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        spin_unlock(&vm->status_lock);
 
        list_for_each_entry(mapping, &bo_va->invalids, list) {
-               r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm,
+               r = amdgpu_vm_bo_split_mapping(adev, exclusive,
+                                              gtt_flags, pages_addr, vm,
                                               mapping, flags, addr,
                                               &bo_va->last_pt_update);
                if (r)
@@ -1054,7 +1160,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
                        struct amdgpu_bo_va_mapping, list);
                list_del(&mapping->list);
 
-               r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping,
+               r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
                                               0, 0, NULL);
                kfree(mapping);
                if (r)
@@ -1445,6 +1551,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        amdgpu_bo_unreserve(vm->page_directory);
        if (r)
                goto error_free_page_directory;
+       vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
 
        return 0;
 
@@ -1516,6 +1623,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
                              &adev->vm_manager.ids_lru);
        }
 
+       adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+               adev->vm_manager.seqno[i] = 0;
+
        atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
        atomic64_set(&adev->vm_manager.client_counter, 0);
 }
index ea407db1fbcfaaa54c844e7f41e06eb3a04bb1a2..5c33ed862695074112f6e9621917c2d598e3a620 100644 (file)
@@ -50,7 +50,9 @@
 #include "gmc/gmc_7_1_sh_mask.h"
 
 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
 
 #define MC_CG_ARB_FREQ_F0           0x0a
 #define MC_CG_ARB_FREQ_F1           0x0b
@@ -736,19 +738,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
 
        if (pi->caps_sq_ramping || pi->caps_db_ramping ||
            pi->caps_td_ramping || pi->caps_tcp_ramping) {
-               gfx_v7_0_enter_rlc_safe_mode(adev);
+               adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
                if (enable) {
                        ret = ci_program_pt_config_registers(adev, didt_config_ci);
                        if (ret) {
-                               gfx_v7_0_exit_rlc_safe_mode(adev);
+                               adev->gfx.rlc.funcs->exit_safe_mode(adev);
                                return ret;
                        }
                }
 
                ci_do_enable_didt(adev, enable);
 
-               gfx_v7_0_exit_rlc_safe_mode(adev);
+               adev->gfx.rlc.funcs->exit_safe_mode(adev);
        }
 
        return 0;
@@ -3636,6 +3638,10 @@ static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
 
        ci_setup_default_pcie_tables(adev);
 
+       /* save a copy of the default DPM table */
+       memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
+                       sizeof(struct ci_dpm_table));
+
        return 0;
 }
 
@@ -5754,10 +5760,18 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
 
        switch (adev->asic_type) {
        case CHIP_BONAIRE:
-               chip_name = "bonaire";
+               if ((adev->pdev->revision == 0x80) ||
+                   (adev->pdev->revision == 0x81) ||
+                   (adev->pdev->device == 0x665f))
+                       chip_name = "bonaire_k";
+               else
+                       chip_name = "bonaire";
                break;
        case CHIP_HAWAII:
-               chip_name = "hawaii";
+               if (adev->pdev->revision == 0x80)
+                       chip_name = "hawaii_k";
+               else
+                       chip_name = "hawaii";
                break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
@@ -6221,6 +6235,9 @@ static int ci_dpm_sw_fini(void *handle)
        ci_dpm_fini(adev);
        mutex_unlock(&adev->pm.mutex);
 
+       release_firmware(adev->pm.fw);
+       adev->pm.fw = NULL;
+
        return 0;
 }
 
@@ -6401,6 +6418,186 @@ static int ci_dpm_set_powergating_state(void *handle,
        return 0;
 }
 
+static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
+               enum pp_clock_type type, char *buf)
+{
+       struct ci_power_info *pi = ci_get_pi(adev);
+       struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
+       struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
+       struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
+
+       int i, now, size = 0;
+       uint32_t clock, pcie_speed;
+
+       switch (type) {
+       case PP_SCLK:
+               amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
+               clock = RREG32(mmSMC_MSG_ARG_0);
+
+               for (i = 0; i < sclk_table->count; i++) {
+                       if (clock > sclk_table->dpm_levels[i].value)
+                               continue;
+                       break;
+               }
+               now = i;
+
+               for (i = 0; i < sclk_table->count; i++)
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                                       i, sclk_table->dpm_levels[i].value / 100,
+                                       (i == now) ? "*" : "");
+               break;
+       case PP_MCLK:
+               amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
+               clock = RREG32(mmSMC_MSG_ARG_0);
+
+               for (i = 0; i < mclk_table->count; i++) {
+                       if (clock > mclk_table->dpm_levels[i].value)
+                               continue;
+                       break;
+               }
+               now = i;
+
+               for (i = 0; i < mclk_table->count; i++)
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                                       i, mclk_table->dpm_levels[i].value / 100,
+                                       (i == now) ? "*" : "");
+               break;
+       case PP_PCIE:
+               pcie_speed = ci_get_current_pcie_speed(adev);
+               for (i = 0; i < pcie_table->count; i++) {
+                       if (pcie_speed != pcie_table->dpm_levels[i].value)
+                               continue;
+                       break;
+               }
+               now = i;
+
+               for (i = 0; i < pcie_table->count; i++)
+                       size += sprintf(buf + size, "%d: %s %s\n", i,
+                                       (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
+                                       (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+                                       (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+                                       (i == now) ? "*" : "");
+               break;
+       default:
+               break;
+       }
+
+       return size;
+}
+
+static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
+               enum pp_clock_type type, uint32_t mask)
+{
+       struct ci_power_info *pi = ci_get_pi(adev);
+
+       if (adev->pm.dpm.forced_level
+                       != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
+               return -EINVAL;
+
+       switch (type) {
+       case PP_SCLK:
+               if (!pi->sclk_dpm_key_disabled)
+                       amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+                                       PPSMC_MSG_SCLKDPM_SetEnabledMask,
+                                       pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+               break;
+
+       case PP_MCLK:
+               if (!pi->mclk_dpm_key_disabled)
+                       amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+                                       PPSMC_MSG_MCLKDPM_SetEnabledMask,
+                                       pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+               break;
+
+       case PP_PCIE:
+       {
+               uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+               uint32_t level = 0;
+
+               while (tmp >>= 1)
+                       level++;
+
+               if (!pi->pcie_dpm_key_disabled)
+                       amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+                                       PPSMC_MSG_PCIeDPM_ForceLevel,
+                                       level);
+               break;
+       }
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
+{
+       struct ci_power_info *pi = ci_get_pi(adev);
+       struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
+       struct ci_single_dpm_table *golden_sclk_table =
+                       &(pi->golden_dpm_table.sclk_table);
+       int value;
+
+       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+                       100 /
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return value;
+}
+
+static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+       struct ci_power_info *pi = ci_get_pi(adev);
+       struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
+       struct ci_single_dpm_table *golden_sclk_table =
+                       &(pi->golden_dpm_table.sclk_table);
+
+       if (value > 20)
+               value = 20;
+
+       ps->performance_levels[ps->performance_level_count - 1].sclk =
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+                       value / 100 +
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return 0;
+}
+
+static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
+{
+       struct ci_power_info *pi = ci_get_pi(adev);
+       struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
+       struct ci_single_dpm_table *golden_mclk_table =
+                       &(pi->golden_dpm_table.mclk_table);
+       int value;
+
+       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+                       100 /
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return value;
+}
+
+static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+       struct ci_power_info *pi = ci_get_pi(adev);
+       struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
+       struct ci_single_dpm_table *golden_mclk_table =
+                       &(pi->golden_dpm_table.mclk_table);
+
+       if (value > 20)
+               value = 20;
+
+       ps->performance_levels[ps->performance_level_count - 1].mclk =
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+                       value / 100 +
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return 0;
+}
+
 const struct amd_ip_funcs ci_dpm_ip_funcs = {
        .name = "ci_dpm",
        .early_init = ci_dpm_early_init,
@@ -6435,6 +6632,12 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
        .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
        .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
        .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
+       .print_clock_levels = ci_dpm_print_clock_levels,
+       .force_clock_level = ci_dpm_force_clock_level,
+       .get_sclk_od = ci_dpm_get_sclk_od,
+       .set_sclk_od = ci_dpm_set_sclk_od,
+       .get_mclk_od = ci_dpm_get_mclk_od,
+       .set_mclk_od = ci_dpm_set_mclk_od,
 };
 
 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
index faccc30c93bf9a023546cbdc97733688e07f6a45..91be2996ae7cee447703cd47e82568cac241a160 100644 (file)
@@ -193,6 +193,7 @@ struct ci_pt_defaults {
 
 struct ci_power_info {
        struct ci_dpm_table dpm_table;
+       struct ci_dpm_table golden_dpm_table;
        u32 voltage_control;
        u32 mvdd_control;
        u32 vddci_control;
index 07bc795a4ca9f87662bdaaa6ebd34f6846bb932b..a7de4d18ac946c2f8da67ba16664ed6f956c985f 100644 (file)
@@ -962,6 +962,12 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
        return true;
 }
 
+static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
+{
+       /* CIK does not support SR-IOV */
+       return 0;
+}
+
 static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
        {mmGRBM_STATUS, false},
        {mmGB_ADDR_CONFIG, false},
@@ -1029,12 +1035,12 @@ static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
 
        mutex_lock(&adev->grbm_idx_mutex);
        if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               gfx_v7_0_select_se_sh(adev, se_num, sh_num);
+               amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
        val = RREG32(reg_offset);
 
        if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
        return val;
 }
@@ -1152,10 +1158,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
        WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
 }
 
-static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
 {
        struct kv_reset_save_regs kv_save = { 0 };
        u32 i;
+       int r = -EINVAL;
 
        dev_info(adev->dev, "GPU pci config reset\n");
 
@@ -1171,14 +1178,20 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
 
        /* wait for asic to come out of reset */
        for (i = 0; i < adev->usec_timeout; i++) {
-               if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
+               if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+                       /* enable BM */
+                       pci_set_master(adev->pdev);
+                       r = 0;
                        break;
+               }
                udelay(1);
        }
 
        /* does asic init need to be run first??? */
        if (adev->flags & AMD_IS_APU)
                kv_restore_regs_for_reset(adev, &kv_save);
+
+       return r;
 }
 
 static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
@@ -1204,13 +1217,14 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
  */
 static int cik_asic_reset(struct amdgpu_device *adev)
 {
+       int r;
        cik_set_bios_scratch_engine_hung(adev, true);
 
-       cik_gpu_pci_config_reset(adev);
+       r = cik_gpu_pci_config_reset(adev);
 
        cik_set_bios_scratch_engine_hung(adev, false);
 
-       return 0;
+       return r;
 }
 
 static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -2007,9 +2021,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
        .get_xclk = &cik_get_xclk,
        .set_uvd_clocks = &cik_set_uvd_clocks,
        .set_vce_clocks = &cik_set_vce_clocks,
-       /* these should be moved to their own ip modules */
-       .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
-       .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
+       .get_virtual_caps = &cik_get_virtual_caps,
 };
 
 static int cik_common_early_init(void *handle)
index 518dca43b133a2eaa638031be93c4af9522c5b81..46aca16a40aaa4d1abe63693da45ef524852541a 100644 (file)
@@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
 
 u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
 
+
+static void cik_sdma_free_microcode(struct amdgpu_device *adev)
+{
+       int i;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+                       release_firmware(adev->sdma.instance[i].fw);
+                       adev->sdma.instance[i].fw = NULL;
+       }
+}
+
 /*
  * sDMA - System DMA
  * Starting with CIK, the GPU has new asynchronous
@@ -214,17 +224,6 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
                                  unsigned vm_id, bool ctx_switch)
 {
        u32 extra_bits = vm_id & 0xf;
-       u32 next_rptr = ring->wptr + 5;
-
-       while ((next_rptr & 7) != 4)
-               next_rptr++;
-
-       next_rptr += 4;
-       amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
-       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
-       amdgpu_ring_write(ring, 1); /* number of DWs to follow */
-       amdgpu_ring_write(ring, next_rptr);
 
        /* IB packet must end on a 8 DW boundary */
        cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
@@ -419,6 +418,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
                /* Initialize the ring buffer's read and write pointers */
                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 
                /* set the wb address whether it's enabled or not */
                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -446,7 +447,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
                ring->ready = true;
+       }
+
+       cik_sdma_enable(adev, true);
 
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                        ring->ready = false;
@@ -529,8 +535,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       /* unhalt the MEs */
-       cik_sdma_enable(adev, true);
+       /* halt the engine before programing */
+       cik_sdma_enable(adev, false);
 
        /* start the gfx rings and rlc compute queues */
        r = cik_sdma_gfx_resume(adev);
@@ -998,6 +1004,7 @@ static int cik_sdma_sw_fini(void *handle)
        for (i = 0; i < adev->sdma.num_instances; i++)
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
+       cik_sdma_free_microcode(adev);
        return 0;
 }
 
index 933e425a81545c22d80945f08da923f964fd0d16..8ba07e79d4cb17317cebd37811b24bcf046ef847 100644 (file)
@@ -2219,6 +2219,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
                        }
                }
        } else { /*pi->caps_vce_pg*/
+               pi->vce_power_gated = gate;
                cz_update_vce_dpm(adev);
                cz_enable_vce_dpm(adev, !gate);
        }
index c90408bc0fde41e3ad553549a0cdf74d53cfec59..d4bf133908b17ae2c58e5156acf2175d493f6b58 100644 (file)
@@ -307,11 +307,10 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev,
        struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
        u32 tmp;
 
-       /* flip at hsync for async, default is vsync */
-       /* use UPDATE_IMMEDIATE_EN instead for async? */
+       /* flip immediate for async, default is vsync */
        tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
-                           GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
+                           GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
        WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
        /* update the scanout addresses */
        WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
index 300ff4aab0fda41761f60859ecfd7afcb80b4b53..4fdfab1e920012b4e770071af2c05e0357753c07 100644 (file)
@@ -526,36 +526,16 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
                crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
                                             CRTC_CONTROL, CRTC_MASTER_EN);
                if (crtc_enabled) {
-#if 0
-                       u32 frame_count;
-                       int j;
-
+#if 1
                        save->crtc_enabled[i] = true;
                        tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
                        if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
-                               amdgpu_display_vblank_wait(adev, i);
-                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                               /*it is correct only for RGB ; black is 0*/
+                               WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
                                tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
                                WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-                       }
-                       /* wait for the next frame */
-                       frame_count = amdgpu_display_vblank_get_counter(adev, i);
-                       for (j = 0; j < adev->usec_timeout; j++) {
-                               if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
-                                       break;
-                               udelay(1);
-                       }
-                       tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-                       if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
-                               tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
-                               WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
-                       }
-                       tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
-                       if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
-                               tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
-                               WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
                        }
+                       mdelay(20);
 #else
                        /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
                        WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
@@ -575,55 +555,22 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
 static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
                                      struct amdgpu_mode_mc_save *save)
 {
-       u32 tmp, frame_count;
-       int i, j;
+       u32 tmp;
+       int i;
 
        /* update crtc base addresses */
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
                WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
                       upper_32_bits(adev->mc.vram_start));
-               WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
-                      upper_32_bits(adev->mc.vram_start));
                WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
                       (u32)adev->mc.vram_start);
-               WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
-                      (u32)adev->mc.vram_start);
 
                if (save->crtc_enabled[i]) {
-                       tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
-                       if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
-                               tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
-                               WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
-                       }
-                       tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-                       if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
-                               tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
-                               WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
-                       }
-                       tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
-                       if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
-                               tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
-                               WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
-                       }
-                       for (j = 0; j < adev->usec_timeout; j++) {
-                               tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-                               if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
-                                       break;
-                               udelay(1);
-                       }
                        tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
                        tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
-                       WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                        WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-                       WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-                       /* wait for the next frame */
-                       frame_count = amdgpu_display_vblank_get_counter(adev, i);
-                       for (j = 0; j < adev->usec_timeout; j++) {
-                               if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
-                                       break;
-                               udelay(1);
-                       }
                }
+               mdelay(20);
        }
 
        WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
index 245cabf06575c73e1e9427011d878237e2e96fdf..ed03b75175d48b1ba24754f2e12499ac1041bb17 100644 (file)
@@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle)
 
 static int fiji_dpm_sw_fini(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       release_firmware(adev->pm.fw);
+       adev->pm.fw = NULL;
+
        return 0;
 }
 
index 7f18a53ab53ac8bc18c992d7fdc76d10a283c3d4..f6bd9465dbdcdd9eee4062ffae2ead68dc6070bc 100644 (file)
@@ -991,6 +991,22 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
        return err;
 }
 
+static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
+{
+       release_firmware(adev->gfx.pfp_fw);
+       adev->gfx.pfp_fw = NULL;
+       release_firmware(adev->gfx.me_fw);
+       adev->gfx.me_fw = NULL;
+       release_firmware(adev->gfx.ce_fw);
+       adev->gfx.ce_fw = NULL;
+       release_firmware(adev->gfx.mec_fw);
+       adev->gfx.mec_fw = NULL;
+       release_firmware(adev->gfx.mec2_fw);
+       adev->gfx.mec2_fw = NULL;
+       release_firmware(adev->gfx.rlc_fw);
+       adev->gfx.rlc_fw = NULL;
+}
+
 /**
  * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
  *
@@ -1567,9 +1583,15 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
  * registers are instanced per SE or SH.  0xffffffff means
  * broadcast to all SEs or SHs (CIK).
  */
-void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
+                                 u32 se_num, u32 sh_num, u32 instance)
 {
-       u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK;
+       u32 data;
+
+       if (instance == 0xffffffff)
+               data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+       else
+               data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
 
        if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
                data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
@@ -1643,13 +1665,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
-                       gfx_v7_0_select_se_sh(adev, i, j);
+                       gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
                        data = gfx_v7_0_get_rb_active_bitmap(adev);
                        active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
                                               rb_bitmap_width_per_sh);
                }
        }
-       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        adev->gfx.config.backend_enable_mask = active_rbs;
@@ -1730,7 +1752,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
         * making sure that the following register writes will be broadcasted
         * to all the shaders
         */
-       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
        /* XXX SH_MEM regs */
        /* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -2034,17 +2056,6 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                      unsigned vm_id, bool ctx_switch)
 {
        u32 header, control = 0;
-       u32 next_rptr = ring->wptr + 5;
-
-       if (ctx_switch)
-               next_rptr += 2;
-
-       next_rptr += 4;
-       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
-       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
-       amdgpu_ring_write(ring, next_rptr);
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
        if (ctx_switch) {
@@ -2073,22 +2084,9 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
                                          struct amdgpu_ib *ib,
                                          unsigned vm_id, bool ctx_switch)
 {
-       u32 header, control = 0;
-       u32 next_rptr = ring->wptr + 5;
-
-       control |= INDIRECT_BUFFER_VALID;
-       next_rptr += 4;
-       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
-       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
-       amdgpu_ring_write(ring, next_rptr);
-
-       header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
-
-       control |= ib->length_dw | (vm_id << 24);
+       u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
 
-       amdgpu_ring_write(ring, header);
+       amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
        amdgpu_ring_write(ring,
 #ifdef __BIG_ENDIAN
                                          (2 << 0) |
@@ -3205,7 +3203,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
                }
        }
        adev->gfx.rlc.cs_data = ci_cs_data;
-       adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+       adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
+       adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
 
        src_ptr = adev->gfx.rlc.reg_list;
        dws = adev->gfx.rlc.reg_list_size;
@@ -3363,7 +3362,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
-                       gfx_v7_0_select_se_sh(adev, i, j);
+                       gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
                        for (k = 0; k < adev->usec_timeout; k++) {
                                if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
                                        break;
@@ -3371,7 +3370,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
                        }
                }
        }
-       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3418,7 +3417,7 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
        return orig;
 }
 
-void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
 {
        u32 tmp, i, mask;
 
@@ -3440,7 +3439,7 @@ void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
        }
 }
 
-void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
 {
        u32 tmp;
 
@@ -3455,7 +3454,7 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
  *
  * Halt the RLC ME (MicroEngine) (CIK).
  */
-void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
 {
        WREG32(mmRLC_CNTL, 0);
 
@@ -3531,7 +3530,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
        WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
 
        mutex_lock(&adev->grbm_idx_mutex);
-       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
        WREG32(mmRLC_LB_PARAMS, 0x00600408);
        WREG32(mmRLC_LB_CNTL, 0x80000004);
@@ -3571,7 +3570,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
                tmp = gfx_v7_0_halt_rlc(adev);
 
                mutex_lock(&adev->grbm_idx_mutex);
-               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
                WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
                WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
                tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3622,7 +3621,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
                tmp = gfx_v7_0_halt_rlc(adev);
 
                mutex_lock(&adev->grbm_idx_mutex);
-               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
                WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
                WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
                data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3673,7 +3672,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
                tmp = gfx_v7_0_halt_rlc(adev);
 
                mutex_lock(&adev->grbm_idx_mutex);
-               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
                WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
                WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
                data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
@@ -3851,6 +3850,20 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
        }
 }
 
+static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+                                                u32 bitmap)
+{
+       u32 data;
+
+       if (!bitmap)
+               return;
+
+       data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+       data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+       WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
 static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
 {
        u32 data, mask;
@@ -4107,7 +4120,7 @@ static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
  * Fetches a GPU clock counter snapshot (SI).
  * Returns the 64 bit clock counter snapshot.
  */
-uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 {
        uint64_t clock;
 
@@ -4167,12 +4180,24 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
 }
 
+static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
+       .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
+       .select_se_sh = &gfx_v7_0_select_se_sh,
+};
+
+static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
+       .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
+       .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+};
+
 static int gfx_v7_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
        adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
+       adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
+       adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
        gfx_v7_0_set_ring_funcs(adev);
        gfx_v7_0_set_irq_funcs(adev);
        gfx_v7_0_set_gds_init(adev);
@@ -4489,6 +4514,7 @@ static int gfx_v7_0_sw_fini(void *handle)
        gfx_v7_0_cp_compute_fini(adev);
        gfx_v7_0_rlc_fini(adev);
        gfx_v7_0_mec_fini(adev);
+       gfx_v7_0_free_microcode(adev);
 
        return 0;
 }
@@ -4816,7 +4842,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
        case 2:
                for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                        ring = &adev->gfx.compute_ring[i];
-                       if ((ring->me == me_id) & (ring->pipe == pipe_id))
+                       if ((ring->me == me_id) && (ring->pipe == pipe_id))
                                amdgpu_fence_process(ring);
                }
                break;
@@ -5015,16 +5041,22 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
        int i, j, k, counter, active_cu_number = 0;
        u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
        struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+       unsigned disable_masks[4 * 2];
 
        memset(cu_info, 0, sizeof(*cu_info));
 
+       amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
                        mask = 1;
                        ao_bitmap = 0;
                        counter = 0;
-                       gfx_v7_0_select_se_sh(adev, i, j);
+                       gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+                       if (i < 4 && j < 2)
+                               gfx_v7_0_set_user_cu_inactive_bitmap(
+                                       adev, disable_masks[i * 2 + j]);
                        bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
                        cu_info->bitmap[i][j] = bitmap;
 
@@ -5040,7 +5072,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
                        ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
                }
        }
-       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        cu_info->number = active_cu_number;
index e747aa935c88dc2342562ed5f80f316f8e952490..94e3ea147c26ebae5e31f81d07b590002f58e79b 100644 (file)
 
 extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
 
-/* XXX these shouldn't be exported */
-void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev);
-void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev);
-void gfx_v7_0_rlc_stop(struct amdgpu_device *adev);
-uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
 #endif
index f19bab68fd837ffe3cacb54ef04f9d50dc8fee27..c30b6ac25d8984f13ba70c1f962964a280373fe7 100644 (file)
@@ -297,7 +297,8 @@ static const u32 polaris11_golden_common_all[] =
 static const u32 golden_settings_polaris10_a11[] =
 {
        mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
-       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
+       mmCB_HW_CONTROL_2, 0, 0x0f000000,
        mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
        mmDB_DEBUG2, 0xf00fffff, 0x00000400,
        mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -836,6 +837,26 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
        return r;
 }
 
+
+static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
+       release_firmware(adev->gfx.pfp_fw);
+       adev->gfx.pfp_fw = NULL;
+       release_firmware(adev->gfx.me_fw);
+       adev->gfx.me_fw = NULL;
+       release_firmware(adev->gfx.ce_fw);
+       adev->gfx.ce_fw = NULL;
+       release_firmware(adev->gfx.rlc_fw);
+       adev->gfx.rlc_fw = NULL;
+       release_firmware(adev->gfx.mec_fw);
+       adev->gfx.mec_fw = NULL;
+       if ((adev->asic_type != CHIP_STONEY) &&
+           (adev->asic_type != CHIP_TOPAZ))
+               release_firmware(adev->gfx.mec2_fw);
+       adev->gfx.mec2_fw = NULL;
+
+       kfree(adev->gfx.rlc.register_list_format);
+}
+
 static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 {
        const char *chip_name;
@@ -1129,6 +1150,71 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
        buffer[count++] = cpu_to_le32(0);
 }
 
+static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+{
+       const __le32 *fw_data;
+       volatile u32 *dst_ptr;
+       int me, i, max_me = 4;
+       u32 bo_offset = 0;
+       u32 table_offset, table_size;
+
+       if (adev->asic_type == CHIP_CARRIZO)
+               max_me = 5;
+
+       /* write the cp table buffer */
+       dst_ptr = adev->gfx.rlc.cp_table_ptr;
+       for (me = 0; me < max_me; me++) {
+               if (me == 0) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.ce_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 1) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.pfp_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 2) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.me_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 3) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.mec_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else  if (me == 4) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.mec2_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               }
+
+               for (i = 0; i < table_size; i ++) {
+                       dst_ptr[bo_offset + i] =
+                               cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+               }
+
+               bo_offset += table_size;
+       }
+}
+
 static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
 {
        int r;
@@ -1144,6 +1230,18 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
                amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
                adev->gfx.rlc.clear_state_obj = NULL;
        }
+
+       /* jump table block */
+       if (adev->gfx.rlc.cp_table_obj) {
+               r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+               if (unlikely(r != 0))
+                       dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+               amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+               amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+               adev->gfx.rlc.cp_table_obj = NULL;
+       }
 }
 
 static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
@@ -1200,6 +1298,46 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
                amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
        }
 
+       if ((adev->asic_type == CHIP_CARRIZO) ||
+           (adev->asic_type == CHIP_STONEY)) {
+               adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
+               if (adev->gfx.rlc.cp_table_obj == NULL) {
+                       r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
+                                            AMDGPU_GEM_DOMAIN_VRAM,
+                                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                                            NULL, NULL,
+                                            &adev->gfx.rlc.cp_table_obj);
+                       if (r) {
+                               dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+                               return r;
+                       }
+               }
+
+               r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+               if (unlikely(r != 0)) {
+                       dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+                       return r;
+               }
+               r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
+                                 &adev->gfx.rlc.cp_table_gpu_addr);
+               if (r) {
+                       amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+                       dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+                       return r;
+               }
+               r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+               if (r) {
+                       dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+                       return r;
+               }
+
+               cz_init_cp_jump_table(adev);
+
+               amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+       }
+
        return 0;
 }
 
@@ -1983,7 +2121,7 @@ static int gfx_v8_0_sw_fini(void *handle)
 
        gfx_v8_0_rlc_fini(adev);
 
-       kfree(adev->gfx.rlc.register_list_format);
+       gfx_v8_0_free_microcode(adev);
 
        return 0;
 }
@@ -3308,9 +3446,15 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
        }
 }
 
-void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
+                                 u32 se_num, u32 sh_num, u32 instance)
 {
-       u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+       u32 data;
+
+       if (instance == 0xffffffff)
+               data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+       else
+               data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
 
        if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
                data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
@@ -3360,13 +3504,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
-                       gfx_v8_0_select_se_sh(adev, i, j);
+                       gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
                        data = gfx_v8_0_get_rb_active_bitmap(adev);
                        active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
                                               rb_bitmap_width_per_sh);
                }
        }
-       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        adev->gfx.config.backend_enable_mask = active_rbs;
@@ -3470,7 +3614,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
         * making sure that the following register writes will be broadcasted
         * to all the shaders
         */
-       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
        WREG32(mmPA_SC_FIFO_SIZE,
                   (adev->gfx.config.sc_prim_fifo_size_frontend <<
@@ -3493,7 +3637,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
-                       gfx_v8_0_select_se_sh(adev, i, j);
+                       gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
                        for (k = 0; k < adev->usec_timeout; k++) {
                                if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
                                        break;
@@ -3501,7 +3645,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
                        }
                }
        }
-       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3662,13 +3806,13 @@ static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
        WREG32(mmRLC_SRM_CNTL, data);
 }
 
-static void polaris11_init_power_gating(struct amdgpu_device *adev)
+static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
 {
        uint32_t data;
 
        if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
-                       AMD_PG_SUPPORT_GFX_SMG |
-                       AMD_PG_SUPPORT_GFX_DMG)) {
+                             AMD_PG_SUPPORT_GFX_SMG |
+                             AMD_PG_SUPPORT_GFX_DMG)) {
                data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
                data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
                data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
@@ -3693,6 +3837,53 @@ static void polaris11_init_power_gating(struct amdgpu_device *adev)
        }
 }
 
+static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
+                                               bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(mmRLC_PG_CNTL);
+
+       if (enable)
+               data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
+       else
+               data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
+
+       if (orig != data)
+               WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
+                                                 bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(mmRLC_PG_CNTL);
+
+       if (enable)
+               data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
+       else
+               data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
+
+       if (orig != data)
+               WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(mmRLC_PG_CNTL);
+
+       if (enable)
+               data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
+       else
+               data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
+
+       if (orig != data)
+               WREG32(mmRLC_PG_CNTL, data);
+}
+
 static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
 {
        if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
@@ -3705,8 +3896,25 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
                gfx_v8_0_init_save_restore_list(adev);
                gfx_v8_0_enable_save_restore_machine(adev);
 
-               if (adev->asic_type == CHIP_POLARIS11)
-                       polaris11_init_power_gating(adev);
+               if ((adev->asic_type == CHIP_CARRIZO) ||
+                   (adev->asic_type == CHIP_STONEY)) {
+                       WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
+                       gfx_v8_0_init_power_gating(adev);
+                       WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
+                       if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+                               cz_enable_sck_slow_down_on_power_up(adev, true);
+                               cz_enable_sck_slow_down_on_power_down(adev, true);
+                       } else {
+                               cz_enable_sck_slow_down_on_power_up(adev, false);
+                               cz_enable_sck_slow_down_on_power_down(adev, false);
+                       }
+                       if (adev->pg_flags & AMD_PG_SUPPORT_CP)
+                               cz_enable_cp_power_gating(adev, true);
+                       else
+                               cz_enable_cp_power_gating(adev, false);
+               } else if (adev->asic_type == CHIP_POLARIS11) {
+                       gfx_v8_0_init_power_gating(adev);
+               }
        }
 }
 
@@ -3974,11 +4182,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
                amdgpu_ring_write(ring, 0x3a00161a);
                amdgpu_ring_write(ring, 0x0000002e);
                break;
-       case CHIP_TOPAZ:
        case CHIP_CARRIZO:
                amdgpu_ring_write(ring, 0x00000002);
                amdgpu_ring_write(ring, 0x00000000);
                break;
+       case CHIP_TOPAZ:
+               amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
+                               0x00000000 : 0x00000002);
+               amdgpu_ring_write(ring, 0x00000000);
+               break;
        case CHIP_STONEY:
                amdgpu_ring_write(ring, 0x00000000);
                amdgpu_ring_write(ring, 0x00000000);
@@ -4941,7 +5153,7 @@ static int gfx_v8_0_soft_reset(void *handle)
  * Fetches a GPU clock counter snapshot.
  * Returns the 64 bit clock counter snapshot.
  */
-uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 {
        uint64_t clock;
 
@@ -5001,12 +5213,18 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
 }
 
+static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
+       .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
+       .select_se_sh = &gfx_v8_0_select_se_sh,
+};
+
 static int gfx_v8_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
        adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS;
+       adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
        gfx_v8_0_set_ring_funcs(adev);
        gfx_v8_0_set_irq_funcs(adev);
        gfx_v8_0_set_gds_init(adev);
@@ -5039,51 +5257,43 @@ static int gfx_v8_0_late_init(void *handle)
        return 0;
 }
 
-static void polaris11_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
-               bool enable)
+static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
+                                                      bool enable)
 {
        uint32_t data, temp;
 
-       /* Send msg to SMU via Powerplay */
-       amdgpu_set_powergating_state(adev,
-                       AMD_IP_BLOCK_TYPE_SMC,
-                       enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+       if (adev->asic_type == CHIP_POLARIS11)
+               /* Send msg to SMU via Powerplay */
+               amdgpu_set_powergating_state(adev,
+                                            AMD_IP_BLOCK_TYPE_SMC,
+                                            enable ?
+                                            AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
 
-       if (enable) {
-               /* Enable static MGPG */
-               temp = data = RREG32(mmRLC_PG_CNTL);
+       temp = data = RREG32(mmRLC_PG_CNTL);
+       /* Enable static MGPG */
+       if (enable)
                data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-
-               if (temp != data)
-                       WREG32(mmRLC_PG_CNTL, data);
-       } else {
-               temp = data = RREG32(mmRLC_PG_CNTL);
+       else
                data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
 
-               if (temp != data)
-                       WREG32(mmRLC_PG_CNTL, data);
-       }
+       if (temp != data)
+               WREG32(mmRLC_PG_CNTL, data);
 }
 
-static void polaris11_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
-               bool enable)
+static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
+                                                       bool enable)
 {
        uint32_t data, temp;
 
-       if (enable) {
-               /* Enable dynamic MGPG */
-               temp = data = RREG32(mmRLC_PG_CNTL);
+       temp = data = RREG32(mmRLC_PG_CNTL);
+       /* Enable dynamic MGPG */
+       if (enable)
                data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-
-               if (temp != data)
-                       WREG32(mmRLC_PG_CNTL, data);
-       } else {
-               temp = data = RREG32(mmRLC_PG_CNTL);
+       else
                data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
 
-               if (temp != data)
-                       WREG32(mmRLC_PG_CNTL, data);
-       }
+       if (temp != data)
+               WREG32(mmRLC_PG_CNTL, data);
 }
 
 static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
@@ -5091,19 +5301,63 @@ static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *ade
 {
        uint32_t data, temp;
 
-       if (enable) {
-               /* Enable quick PG */
-               temp = data = RREG32(mmRLC_PG_CNTL);
-               data |= 0x100000;
+       temp = data = RREG32(mmRLC_PG_CNTL);
+       /* Enable quick PG */
+       if (enable)
+               data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
+       else
+               data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
 
-               if (temp != data)
-                       WREG32(mmRLC_PG_CNTL, data);
-       } else {
-               temp = data = RREG32(mmRLC_PG_CNTL);
-               data &= ~0x100000;
+       if (temp != data)
+               WREG32(mmRLC_PG_CNTL, data);
+}
 
-               if (temp != data)
-                       WREG32(mmRLC_PG_CNTL, data);
+static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
+                                         bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(mmRLC_PG_CNTL);
+
+       if (enable)
+               data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+       else
+               data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+       if (orig != data)
+               WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
+                                               bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(mmRLC_PG_CNTL);
+
+       if (enable)
+               data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+       else
+               data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+
+       if (orig != data)
+               WREG32(mmRLC_PG_CNTL, data);
+
+       /* Read any GFX register to wake up GFX. */
+       if (!enable)
+               data = RREG32(mmDB_RENDER_CONTROL);
+}
+
+static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
+                                         bool enable)
+{
+       if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
+               cz_enable_gfx_cg_power_gating(adev, true);
+               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
+                       cz_enable_gfx_pipeline_power_gating(adev, true);
+       } else {
+               cz_enable_gfx_cg_power_gating(adev, false);
+               cz_enable_gfx_pipeline_power_gating(adev, false);
        }
 }
 
@@ -5111,21 +5365,42 @@ static int gfx_v8_0_set_powergating_state(void *handle,
                                          enum amd_powergating_state state)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
 
        if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
                return 0;
 
        switch (adev->asic_type) {
+       case CHIP_CARRIZO:
+       case CHIP_STONEY:
+               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
+                       cz_update_gfx_cg_power_gating(adev, enable);
+
+               if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+                       gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+               else
+                       gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
+
+               if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
+                       gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
+               else
+                       gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
+               break;
        case CHIP_POLARIS11:
-               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)
-                       polaris11_enable_gfx_static_mg_power_gating(adev,
-                                       state == AMD_PG_STATE_GATE ? true : false);
-               else if (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)
-                       polaris11_enable_gfx_dynamic_mg_power_gating(adev,
-                                       state == AMD_PG_STATE_GATE ? true : false);
+               if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+                       gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+               else
+                       gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
+
+               if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
+                       gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
+               else
+                       gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
+
+               if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
+                       polaris11_enable_gfx_quick_mg_power_gating(adev, true);
                else
-                       polaris11_enable_gfx_quick_mg_power_gating(adev,
-                                       state == AMD_PG_STATE_GATE ? true : false);
+                       polaris11_enable_gfx_quick_mg_power_gating(adev, false);
                break;
        default:
                break;
@@ -5139,7 +5414,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
 {
        uint32_t data;
 
-       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
        WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
        WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
@@ -5527,6 +5802,8 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
                        WREG32(mmRLC_CGCG_CGLS_CTRL, data);
        }
 
+       gfx_v8_0_wait_for_rlc_serdes(adev);
+
        adev->gfx.rlc.funcs->exit_safe_mode(adev);
 }
 static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -5652,17 +5929,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                      unsigned vm_id, bool ctx_switch)
 {
        u32 header, control = 0;
-       u32 next_rptr = ring->wptr + 5;
-
-       if (ctx_switch)
-               next_rptr += 2;
-
-       next_rptr += 4;
-       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
-       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
-       amdgpu_ring_write(ring, next_rptr);
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
        if (ctx_switch) {
@@ -5691,23 +5957,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
                                          struct amdgpu_ib *ib,
                                          unsigned vm_id, bool ctx_switch)
 {
-       u32 header, control = 0;
-       u32 next_rptr = ring->wptr + 5;
-
-       control |= INDIRECT_BUFFER_VALID;
-
-       next_rptr += 4;
-       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
-       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
-       amdgpu_ring_write(ring, next_rptr);
-
-       header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+       u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
 
-       control |= ib->length_dw | (vm_id << 24);
-
-       amdgpu_ring_write(ring, header);
+       amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
        amdgpu_ring_write(ring,
 #ifdef __BIG_ENDIAN
                                          (2 << 0) |
@@ -6160,9 +6412,9 @@ static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
 {
        switch (adev->asic_type) {
        case CHIP_TOPAZ:
-       case CHIP_STONEY:
                adev->gfx.rlc.funcs = &iceland_rlc_funcs;
                break;
+       case CHIP_STONEY:
        case CHIP_CARRIZO:
                adev->gfx.rlc.funcs = &cz_rlc_funcs;
                break;
@@ -6200,6 +6452,20 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
        }
 }
 
+static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+                                                u32 bitmap)
+{
+       u32 data;
+
+       if (!bitmap)
+               return;
+
+       data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+       data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+       WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
 static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
 {
        u32 data, mask;
@@ -6220,16 +6486,22 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
        int i, j, k, counter, active_cu_number = 0;
        u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
        struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+       unsigned disable_masks[4 * 2];
 
        memset(cu_info, 0, sizeof(*cu_info));
 
+       amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
                        mask = 1;
                        ao_bitmap = 0;
                        counter = 0;
-                       gfx_v8_0_select_se_sh(adev, i, j);
+                       gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+                       if (i < 4 && j < 2)
+                               gfx_v8_0_set_user_cu_inactive_bitmap(
+                                       adev, disable_masks[i * 2 + j]);
                        bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
                        cu_info->bitmap[i][j] = bitmap;
 
@@ -6245,7 +6517,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
                        ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
                }
        }
-       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        cu_info->number = active_cu_number;
index 16a49f53a2fa2aecbf78296e87100dcd36426647..bc82c794312ca88c89bdd89cfe665c85aec74214 100644 (file)
@@ -26,7 +26,6 @@
 
 extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
 
-uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev);
 void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
 
 #endif
index 1feb6439cb0b7fd4768a232f5ad7520df0716c2a..d24a82bd0c7a119d7b450ee801f3b5ffa1dc51e3 100644 (file)
@@ -39,6 +39,7 @@
 
 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v7_0_wait_for_idle(void *handle);
 
 MODULE_FIRMWARE("radeon/bonaire_mc.bin");
 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
@@ -73,39 +74,15 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
        }
 }
 
-/**
- * gmc7_mc_wait_for_idle - wait for MC idle callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Wait for the MC (memory controller) to be idle.
- * (evergreen+).
- * Returns 0 if the MC is idle, -1 if not.
- */
-int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
-       unsigned i;
-       u32 tmp;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               /* read MC_STATUS */
-               tmp = RREG32(mmSRBM_STATUS) & 0x1F00;
-               if (!tmp)
-                       return 0;
-               udelay(1);
-       }
-       return -1;
-}
-
-void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
-                     struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
+                            struct amdgpu_mode_mc_save *save)
 {
        u32 blackout;
 
        if (adev->mode_info.num_crtc)
                amdgpu_display_stop_mc_access(adev, save);
 
-       amdgpu_asic_wait_for_mc_idle(adev);
+       gmc_v7_0_wait_for_idle((void *)adev);
 
        blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
        if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -120,8 +97,8 @@ void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
        udelay(100);
 }
 
-void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
-                       struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
+                              struct amdgpu_mode_mc_save *save)
 {
        u32 tmp;
 
@@ -311,7 +288,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
                amdgpu_display_set_vga_render_state(adev, false);
 
        gmc_v7_0_mc_stop(adev, &save);
-       if (amdgpu_asic_wait_for_mc_idle(adev)) {
+       if (gmc_v7_0_wait_for_idle((void *)adev)) {
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
        }
        /* Update configuration */
@@ -331,7 +308,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
        WREG32(mmMC_VM_AGP_BASE, 0);
        WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
        WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
-       if (amdgpu_asic_wait_for_mc_idle(adev)) {
+       if (gmc_v7_0_wait_for_idle((void *)adev)) {
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
        }
        gmc_v7_0_mc_resume(adev, &save);
@@ -1137,7 +1114,7 @@ static int gmc_v7_0_soft_reset(void *handle)
 
        if (srbm_soft_reset) {
                gmc_v7_0_mc_stop(adev, &save);
-               if (gmc_v7_0_wait_for_idle(adev)) {
+               if (gmc_v7_0_wait_for_idle((void *)adev)) {
                        dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
                }
 
index 36fcbbc46adae49eeffe708a2e27dacec34c40d6..0b386b5d2f7aabdc3b2fd89ca625733049fa7a89 100644 (file)
 
 extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
 
-/* XXX these shouldn't be exported */
-void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
-                     struct amdgpu_mode_mc_save *save);
-void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
-                       struct amdgpu_mode_mc_save *save);
-int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev);
-
 #endif
index 9945d5bbf1fe06244bf950167f09589b266b2a23..717359d3ba8c506b77b09d46663b7afc598e9df9 100644 (file)
@@ -41,6 +41,7 @@
 
 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v8_0_wait_for_idle(void *handle);
 
 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
@@ -147,44 +148,15 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
        }
 }
 
-/**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Wait for the MC (memory controller) to be idle.
- * (evergreen+).
- * Returns 0 if the MC is idle, -1 if not.
- */
-int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
-       unsigned i;
-       u32 tmp;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               /* read MC_STATUS */
-               tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
-                                              SRBM_STATUS__MCB_BUSY_MASK |
-                                              SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
-                                              SRBM_STATUS__MCC_BUSY_MASK |
-                                              SRBM_STATUS__MCD_BUSY_MASK |
-                                              SRBM_STATUS__VMC1_BUSY_MASK);
-               if (!tmp)
-                       return 0;
-               udelay(1);
-       }
-       return -1;
-}
-
-void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
-                     struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
+                            struct amdgpu_mode_mc_save *save)
 {
        u32 blackout;
 
        if (adev->mode_info.num_crtc)
                amdgpu_display_stop_mc_access(adev, save);
 
-       amdgpu_asic_wait_for_mc_idle(adev);
+       gmc_v8_0_wait_for_idle(adev);
 
        blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
        if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -199,8 +171,8 @@ void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
        udelay(100);
 }
 
-void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
-                       struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
+                              struct amdgpu_mode_mc_save *save)
 {
        u32 tmp;
 
@@ -393,7 +365,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
                amdgpu_display_set_vga_render_state(adev, false);
 
        gmc_v8_0_mc_stop(adev, &save);
-       if (amdgpu_asic_wait_for_mc_idle(adev)) {
+       if (gmc_v8_0_wait_for_idle((void *)adev)) {
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
        }
        /* Update configuration */
@@ -413,7 +385,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
        WREG32(mmMC_VM_AGP_BASE, 0);
        WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
        WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
-       if (amdgpu_asic_wait_for_mc_idle(adev)) {
+       if (gmc_v8_0_wait_for_idle((void *)adev)) {
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
        }
        gmc_v8_0_mc_resume(adev, &save);
@@ -1140,7 +1112,7 @@ static int gmc_v8_0_soft_reset(void *handle)
 
        if (srbm_soft_reset) {
                gmc_v8_0_mc_stop(adev, &save);
-               if (gmc_v8_0_wait_for_idle(adev)) {
+               if (gmc_v8_0_wait_for_idle((void *)adev)) {
                        dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
                }
 
index 973436086b380f966e83b56ef58b74a665ba21ce..fc5001a8119dabab06e0e45888c6670b48163e77 100644 (file)
 
 extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
 
-/* XXX these shouldn't be exported */
-void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
-                     struct amdgpu_mode_mc_save *save);
-void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
-                       struct amdgpu_mode_mc_save *save);
-int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev);
-
 #endif
index 460bc8ad37e6252a910c068e5114bcc687350faf..825ccd63f2dc313ced6199d476f0b96d8b8cdba6 100644 (file)
@@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle)
 
 static int iceland_dpm_sw_fini(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       release_firmware(adev->pm.fw);
+       adev->pm.fw = NULL;
+
        return 0;
 }
 
index a789a863d677faf641bb626022123ec20a46082f..5a0e245771ce4accc8d7911ca061fc9658a10d15 100644 (file)
@@ -507,19 +507,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
            pi->caps_db_ramping ||
            pi->caps_td_ramping ||
            pi->caps_tcp_ramping) {
-               gfx_v7_0_enter_rlc_safe_mode(adev);
+               adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
                if (enable) {
                        ret = kv_program_pt_config_registers(adev, didt_config_kv);
                        if (ret) {
-                               gfx_v7_0_exit_rlc_safe_mode(adev);
+                               adev->gfx.rlc.funcs->exit_safe_mode(adev);
                                return ret;
                        }
                }
 
                kv_do_enable_didt(adev, enable);
 
-               gfx_v7_0_exit_rlc_safe_mode(adev);
+               adev->gfx.rlc.funcs->exit_safe_mode(adev);
        }
 
        return 0;
index f4c3130d3fdb20eee732b7c4e6e123ca008906a1..ac3730a6e49f9cfe62e803b4038d83575535e453 100644 (file)
@@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
        }
 }
 
+static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
+{
+       int i;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               release_firmware(adev->sdma.instance[i].fw);
+               adev->sdma.instance[i].fw = NULL;
+       }
+}
+
 /**
  * sdma_v2_4_init_microcode - load ucode images from disk
  *
@@ -246,19 +255,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
                                   unsigned vm_id, bool ctx_switch)
 {
        u32 vmid = vm_id & 0xf;
-       u32 next_rptr = ring->wptr + 5;
-
-       while ((next_rptr & 7) != 2)
-               next_rptr++;
-
-       next_rptr += 6;
-
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
-                         SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
-       amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
-       amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
-       amdgpu_ring_write(ring, next_rptr);
 
        /* IB packet must end on a 8 DW boundary */
        sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
@@ -461,6 +457,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
                /* Initialize the ring buffer's read and write pointers */
                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 
                /* set the wb address whether it's enabled or not */
                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -489,7 +487,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
                ring->ready = true;
+       }
 
+       sdma_v2_4_enable(adev, true);
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                        ring->ready = false;
@@ -580,8 +582,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
                        return -EINVAL;
        }
 
-       /* unhalt the MEs */
-       sdma_v2_4_enable(adev, true);
+       /* halt the engine before programing */
+       sdma_v2_4_enable(adev, false);
 
        /* start the gfx rings and rlc compute queues */
        r = sdma_v2_4_gfx_resume(adev);
@@ -1012,6 +1014,7 @@ static int sdma_v2_4_sw_fini(void *handle)
        for (i = 0; i < adev->sdma.num_instances; i++)
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
+       sdma_v2_4_free_microcode(adev);
        return 0;
 }
 
index 31d99b0010f79a032540b06cf0bc3c04d498f78c..f00db6f4c04cd6c283f1176364c8a0dcdbba3f74 100644 (file)
@@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
        }
 }
 
+static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
+{
+       int i;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               release_firmware(adev->sdma.instance[i].fw);
+               adev->sdma.instance[i].fw = NULL;
+       }
+}
+
 /**
  * sdma_v3_0_init_microcode - load ucode images from disk
  *
@@ -406,18 +415,6 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
                                   unsigned vm_id, bool ctx_switch)
 {
        u32 vmid = vm_id & 0xf;
-       u32 next_rptr = ring->wptr + 5;
-
-       while ((next_rptr & 7) != 2)
-               next_rptr++;
-       next_rptr += 6;
-
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
-                         SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
-       amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
-       amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
-       amdgpu_ring_write(ring, next_rptr);
 
        /* IB packet must end on a 8 DW boundary */
        sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
@@ -672,6 +669,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
                /* Initialize the ring buffer's read and write pointers */
                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 
                /* set the wb address whether it's enabled or not */
                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -711,7 +710,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
                ring->ready = true;
+       }
 
+       /* unhalt the MEs */
+       sdma_v3_0_enable(adev, true);
+       /* enable sdma ring preemption */
+       sdma_v3_0_ctx_switch_enable(adev, true);
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                        ring->ready = false;
@@ -804,10 +811,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
                }
        }
 
-       /* unhalt the MEs */
-       sdma_v3_0_enable(adev, true);
-       /* enable sdma ring preemption */
-       sdma_v3_0_ctx_switch_enable(adev, true);
+       /* disble sdma engine before programing it */
+       sdma_v3_0_ctx_switch_enable(adev, false);
+       sdma_v3_0_enable(adev, false);
 
        /* start the gfx rings and rlc compute queues */
        r = sdma_v3_0_gfx_resume(adev);
@@ -1247,6 +1253,7 @@ static int sdma_v3_0_sw_fini(void *handle)
        for (i = 0; i < adev->sdma.num_instances; i++)
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
+       sdma_v3_0_free_microcode(adev);
        return 0;
 }
 
index b7615cefcac40b5d2607a194b5184ed2fe3a9bcf..f06f6f4dc3a8a93730a38050d269d6df00e3cc1e 100644 (file)
@@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle)
 
 static int tonga_dpm_sw_fini(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       release_firmware(adev->pm.fw);
+       adev->pm.fw = NULL;
+
        return 0;
 }
 
index f07551476a70db5ba2fd4b7c6c3eb3f87a2889b1..416c8567d3ed1054c2b4be75b8f83d57b9da5604 100644 (file)
@@ -34,6 +34,8 @@
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
+#include "bif/bif_4_1_d.h"
+
 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
 static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
@@ -438,6 +440,32 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
        amdgpu_ring_write(ring, 2);
 }
 
+/**
+ * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+       amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+       amdgpu_ring_write(ring, 1);
+}
+
 /**
  * uvd_v4_2_ring_test_ring - register write test
  *
@@ -763,6 +791,8 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
        .parse_cs = amdgpu_uvd_ring_parse_cs,
        .emit_ib = uvd_v4_2_ring_emit_ib,
        .emit_fence = uvd_v4_2_ring_emit_fence,
+       .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
        .test_ring = uvd_v4_2_ring_test_ring,
        .test_ib = uvd_v4_2_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
index e0a76a883d46bab43513df7b3e1b5f9e3cda90dd..dd636c4c4b089d68593f1ffbddd05fab452d662c 100644 (file)
@@ -31,6 +31,7 @@
 #include "uvd/uvd_5_0_sh_mask.h"
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
+#include "bif/bif_5_0_d.h"
 #include "vi.h"
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -488,6 +489,32 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
        amdgpu_ring_write(ring, 2);
 }
 
+/**
+ * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+       amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+       amdgpu_ring_write(ring, 1);
+}
+
 /**
  * uvd_v5_0_ring_test_ring - register write test
  *
@@ -815,6 +842,8 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
        .parse_cs = amdgpu_uvd_ring_parse_cs,
        .emit_ib = uvd_v5_0_ring_emit_ib,
        .emit_fence = uvd_v5_0_ring_emit_fence,
+       .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
        .test_ring = uvd_v5_0_ring_test_ring,
        .test_ib = uvd_v5_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
index c9929d665c015195837cf1dc587b4e8c5ee371ba..07e9a987fbee413f6d21d9323dbd8045b7ac4d97 100644 (file)
@@ -33,6 +33,7 @@
 #include "oss/oss_2_0_sh_mask.h"
 #include "smu/smu_7_1_3_d.h"
 #include "smu/smu_7_1_3_sh_mask.h"
+#include "bif/bif_5_1_d.h"
 #include "vi.h"
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -385,8 +386,8 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        uint32_t mp_swap_cntl;
        int i, j, r;
 
-       /*disable DPG */
-       WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
+       /* disable DPG */
+       WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 
        /* disable byte swapping */
        lmi_swap_cntl = 0;
@@ -405,17 +406,21 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        }
 
        /* disable interupt */
-       WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
+       WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK);
 
        /* stall UMC and register bus before resetting VCPU */
-       WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+       WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
        mdelay(1);
 
        /* put LMI, VCPU, RBC etc... into reset */
-       WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
-               UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
-               UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
-               UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+       WREG32(mmUVD_SOFT_RESET,
+               UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
                UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
        mdelay(5);
 
@@ -424,8 +429,13 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        mdelay(5);
 
        /* initialize UVD memory controller */
-       WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
-                            (1 << 21) | (1 << 9) | (1 << 20));
+       WREG32(mmUVD_LMI_CTRL,
+               (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+               UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+               UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+               UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+               UVD_LMI_CTRL__REQ_MODE_MASK |
+               UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
 
 #ifdef __BIG_ENDIAN
        /* swap (8 in 32) RB and IB */
@@ -447,10 +457,10 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        mdelay(5);
 
        /* enable VCPU clock */
-       WREG32(mmUVD_VCPU_CNTL,  1 << 9);
+       WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
 
        /* enable UMC */
-       WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+       WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 
        /* boot up the VCPU */
        WREG32(mmUVD_SOFT_RESET, 0);
@@ -484,10 +494,12 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
                return r;
        }
        /* enable master interrupt */
-       WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
+       WREG32_P(mmUVD_MASTINT_EN,
+               (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+               ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 
        /* clear the bit 4 of UVD_STATUS */
-       WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
+       WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 
        rb_bufsz = order_base_2(ring->ring_size);
        tmp = 0;
@@ -580,6 +592,32 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
        amdgpu_ring_write(ring, 2);
 }
 
+/**
+ * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+       amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+       amdgpu_ring_write(ring, 1);
+}
+
 /**
  * uvd_v6_0_ring_test_ring - register write test
  *
@@ -847,7 +885,8 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
        bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
        static int curstate = -1;
 
-       if (adev->asic_type == CHIP_FIJI)
+       if (adev->asic_type == CHIP_FIJI ||
+                       adev->asic_type == CHIP_POLARIS10)
                uvd_v6_set_bypass_mode(adev, enable);
 
        if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
@@ -919,6 +958,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
        .parse_cs = amdgpu_uvd_ring_parse_cs,
        .emit_ib = uvd_v6_0_ring_emit_ib,
        .emit_fence = uvd_v6_0_ring_emit_fence,
+       .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
        .test_ring = uvd_v6_0_ring_test_ring,
        .test_ib = uvd_v6_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
index 2c88d0b66cf396b35016b169c04091feaa2cc6e5..cda7def9dc2c1be7c84d1e4df2c0688065455ff2 100644 (file)
@@ -203,6 +203,29 @@ static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
        spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 }
 
+static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+       WREG32(mmGC_CAC_IND_INDEX, (reg));
+       r = RREG32(mmGC_CAC_IND_DATA);
+       spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+       return r;
+}
+
+static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+       WREG32(mmGC_CAC_IND_INDEX, (reg));
+       WREG32(mmGC_CAC_IND_DATA, (v));
+       spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+}
+
+
 static const u32 tonga_mgcg_cgcg_init[] =
 {
        mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
@@ -421,6 +444,20 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
        return true;
 }
 
+static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
+{
+       u32 caps = 0;
+       u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+
+       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
+               caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
+
+       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
+               caps |= AMDGPU_VIRT_CAPS_IS_VF;
+
+       return caps;
+}
+
 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
        {mmGB_MACROTILE_MODE7, true},
 };
@@ -519,12 +556,12 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
 
        mutex_lock(&adev->grbm_idx_mutex);
        if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               gfx_v8_0_select_se_sh(adev, se_num, sh_num);
+               amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
        val = RREG32(reg_offset);
 
        if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
        return val;
 }
@@ -583,7 +620,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
        return -EINVAL;
 }
 
-static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
 {
        u32 i;
 
@@ -598,11 +635,14 @@ static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
 
        /* wait for asic to come out of reset */
        for (i = 0; i < adev->usec_timeout; i++) {
-               if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
-                       break;
+               if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+                       /* enable BM */
+                       pci_set_master(adev->pdev);
+                       return 0;
+               }
                udelay(1);
        }
-
+       return -EINVAL;
 }
 
 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
@@ -628,13 +668,15 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun
  */
 static int vi_asic_reset(struct amdgpu_device *adev)
 {
+       int r;
+
        vi_set_bios_scratch_engine_hung(adev, true);
 
-       vi_gpu_pci_config_reset(adev);
+       r = vi_gpu_pci_config_reset(adev);
 
        vi_set_bios_scratch_engine_hung(adev, false);
 
-       return 0;
+       return r;
 }
 
 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -1118,9 +1160,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
        .get_xclk = &vi_get_xclk,
        .set_uvd_clocks = &vi_set_uvd_clocks,
        .set_vce_clocks = &vi_set_vce_clocks,
-       /* these should be moved to their own ip modules */
-       .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
-       .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
+       .get_virtual_caps = &vi_get_virtual_caps,
 };
 
 static int vi_common_early_init(void *handle)
@@ -1141,6 +1181,8 @@ static int vi_common_early_init(void *handle)
        adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
        adev->didt_rreg = &vi_didt_rreg;
        adev->didt_wreg = &vi_didt_wreg;
+       adev->gc_cac_rreg = &vi_gc_cac_rreg;
+       adev->gc_cac_wreg = &vi_gc_cac_wreg;
 
        adev->asic_funcs = &vi_asic_funcs;
 
@@ -1207,19 +1249,39 @@ static int vi_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_SDMA_MGCG |
                        AMD_CG_SUPPORT_SDMA_LS;
+               /* rev0 hardware doesn't support PG */
                adev->pg_flags = 0;
+               if (adev->rev_id != 0x00)
+                       adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+                               AMD_PG_SUPPORT_GFX_SMG |
+                               AMD_PG_SUPPORT_GFX_DMG |
+                               AMD_PG_SUPPORT_CP |
+                               AMD_PG_SUPPORT_RLC_SMU_HS |
+                               AMD_PG_SUPPORT_GFX_PIPELINE;
                adev->external_rev_id = adev->rev_id + 0x1;
                break;
        case CHIP_STONEY:
                adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
                        AMD_CG_SUPPORT_GFX_MGCG |
                        AMD_CG_SUPPORT_GFX_MGLS |
+                       AMD_CG_SUPPORT_GFX_RLC_LS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_GFX_CGTS |
+                       AMD_CG_SUPPORT_GFX_MGLS |
+                       AMD_CG_SUPPORT_GFX_CGTS_LS |
+                       AMD_CG_SUPPORT_GFX_CGCG |
+                       AMD_CG_SUPPORT_GFX_CGLS |
                        AMD_CG_SUPPORT_BIF_LS |
                        AMD_CG_SUPPORT_HDP_MGCG |
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_SDMA_MGCG |
                        AMD_CG_SUPPORT_SDMA_LS;
-               adev->pg_flags = 0;
+               adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
+                       AMD_PG_SUPPORT_GFX_SMG |
+                       AMD_PG_SUPPORT_GFX_DMG |
+                       AMD_PG_SUPPORT_GFX_PIPELINE |
+                       AMD_PG_SUPPORT_CP |
+                       AMD_PG_SUPPORT_RLC_SMU_HS;
                adev->external_rev_id = adev->rev_id + 0x1;
                break;
        default:
index ac005796b71c1491fb509bc3705b6e5f5c75442a..4f3849ac8c0770d1dbde09f9d6e371fb24c01e94 100644 (file)
@@ -63,13 +63,12 @@ static struct kfd_process *create_process(const struct task_struct *thread);
 void kfd_process_create_wq(void)
 {
        if (!kfd_process_wq)
-               kfd_process_wq = create_workqueue("kfd_process_wq");
+               kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
 }
 
 void kfd_process_destroy_wq(void)
 {
        if (kfd_process_wq) {
-               flush_workqueue(kfd_process_wq);
                destroy_workqueue(kfd_process_wq);
                kfd_process_wq = NULL;
        }
@@ -242,13 +241,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
        pqm_uninit(&p->pqm);
 
        /* Iterate over all process device data structure and check
-        * if we should reset all wavefronts */
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+        * if we should delete debug managers and reset all wavefronts
+        */
+       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+               if ((pdd->dev->dbgmgr) &&
+                               (pdd->dev->dbgmgr->pasid == p->pasid))
+                       kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
+
                if (pdd->reset_wavefronts) {
                        pr_warn("amdkfd: Resetting all wave fronts\n");
                        dbgdev_wave_reset_wavefronts(pdd->dev, p);
                        pdd->reset_wavefronts = false;
                }
+       }
 
        mutex_unlock(&p->mutex);
 
@@ -324,6 +329,7 @@ static struct kfd_process *create_process(const struct task_struct *thread)
        synchronize_rcu();
        mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
 err_mmu_notifier:
+       mutex_destroy(&process->mutex);
        kfd_pasid_free(process->pasid);
 err_alloc_pasid:
        kfree(process->queues);
@@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
 
        idx = srcu_read_lock(&kfd_processes_srcu);
 
+       /*
+        * Look for the process that matches the pasid. If there is no such
+        * process, we either released it in amdkfd's own notifier, or there
+        * is a bug. Unfortunately, there is no way to tell...
+        */
        hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
-               if (p->pasid == pasid)
-                       break;
+               if (p->pasid == pasid) {
 
-       srcu_read_unlock(&kfd_processes_srcu, idx);
+                       srcu_read_unlock(&kfd_processes_srcu, idx);
 
-       BUG_ON(p->pasid != pasid);
+                       pr_debug("Unbinding process %d from IOMMU\n", pasid);
 
-       mutex_lock(&p->mutex);
+                       mutex_lock(&p->mutex);
 
-       if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
-               kfd_dbgmgr_destroy(dev->dbgmgr);
+                       if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+                               kfd_dbgmgr_destroy(dev->dbgmgr);
 
-       pqm_uninit(&p->pqm);
+                       pqm_uninit(&p->pqm);
 
-       pdd = kfd_get_process_device_data(dev, p);
+                       pdd = kfd_get_process_device_data(dev, p);
 
-       if (!pdd) {
-               mutex_unlock(&p->mutex);
-               return;
-       }
+                       if (!pdd) {
+                               mutex_unlock(&p->mutex);
+                               return;
+                       }
 
-       if (pdd->reset_wavefronts) {
-               dbgdev_wave_reset_wavefronts(pdd->dev, p);
-               pdd->reset_wavefronts = false;
-       }
+                       if (pdd->reset_wavefronts) {
+                               dbgdev_wave_reset_wavefronts(pdd->dev, p);
+                               pdd->reset_wavefronts = false;
+                       }
 
-       /*
-        * Just mark pdd as unbound, because we still need it to call
-        * amd_iommu_unbind_pasid() in when the process exits.
-        * We don't call amd_iommu_unbind_pasid() here
-        * because the IOMMU called us.
-        */
-       pdd->bound = false;
+                       /*
+                        * Just mark pdd as unbound, because we still need it
+                        * to call amd_iommu_unbind_pasid() in when the
+                        * process exits.
+                        * We don't call amd_iommu_unbind_pasid() here
+                        * because the IOMMU called us.
+                        */
+                       pdd->bound = false;
 
-       mutex_unlock(&p->mutex);
+                       mutex_unlock(&p->mutex);
+
+                       return;
+               }
+
+       srcu_read_unlock(&kfd_processes_srcu, idx);
 }
 
 struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
index 74909e72a00929c8a87b73c9e25afb633fb50cf4..884c96f50c3d9b62ccc4c9f81e20570d41d72f46 100644 (file)
@@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
                        dev->node_props.simd_count);
 
        if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
-               pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
+               pr_info_once("kfd: mem_banks_count truncated from %d to %d\n",
                                dev->node_props.mem_banks_count,
                                dev->mem_bank_count);
                sysfs_show_32bit_prop(buffer, "mem_banks_count",
index 7c2a916c1e63c3e6a00bfeafbda7a92444725085..5eb895fd98bfb6a65de4fd16e595d85c12d7830d 100644 (file)
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK   0x0000FFFF
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT  0
 
+/* gen: chipset 1/2, asic 1/2/3 */
+#define AMDGPU_DEFAULT_PCIE_GEN_MASK (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 \
+                                     | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 \
+                                     | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 \
+                                     | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 \
+                                     | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
+
 /* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */
 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1          0x00010000
 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2          0x00020000
 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32         0x00400000
 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT       16
 
+/* 1/2/4/8/16 lanes */
+#define AMDGPU_DEFAULT_PCIE_MLW_MASK (CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 \
+                                     | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 \
+                                     | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 \
+                                     | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \
+                                     | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+
 #endif
index 6080951d539d0d3fd4c85390a0afa18ce2df8cd7..a74a0d2ff1cac6008be2b634184f9edc42185b10 100644 (file)
 
 #define AMD_MAX_USEC_TIMEOUT           100000  /* 100 ms */
 
-/*
-* Supported GPU families (aligned with amdgpu_drm.h)
-*/
-#define AMD_FAMILY_UNKNOWN              0
-#define AMD_FAMILY_CI                   120 /* Bonaire, Hawaii */
-#define AMD_FAMILY_KV                   125 /* Kaveri, Kabini, Mullins */
-#define AMD_FAMILY_VI                   130 /* Iceland, Tonga */
-#define AMD_FAMILY_CZ                   135 /* Carrizo */
-
 /*
  * Supported ASIC types
  */
@@ -120,6 +111,8 @@ enum amd_powergating_state {
 #define AMD_PG_SUPPORT_SDMA                    (1 << 8)
 #define AMD_PG_SUPPORT_ACP                     (1 << 9)
 #define AMD_PG_SUPPORT_SAMU                    (1 << 10)
+#define AMD_PG_SUPPORT_GFX_QUICK_MG            (1 << 11)
+#define AMD_PG_SUPPORT_GFX_PIPELINE            (1 << 12)
 
 enum amd_pm_state_type {
        /* not used for dpm */
@@ -157,6 +150,7 @@ struct amd_ip_funcs {
        int (*hw_init)(void *handle);
        /* tears down the hw state */
        int (*hw_fini)(void *handle);
+       void (*late_fini)(void *handle);
        /* handles IP specific hw/sw changes for suspend */
        int (*suspend)(void *handle);
        /* handles IP specific hw/sw changes for resume */
index ebaf67bb1589bc8f85b77c822990d9cbc83f4a23..90ff7c8a6011e84a6d1d67258cb1a941ecac8d42 100644 (file)
 #define mmDC_EDC_CSINVOC_CNT                                                    0x3192
 #define mmDC_EDC_RESTORE_CNT                                                    0x3193
 
+#define mmGC_CAC_IND_INDEX                                                      0x129a
+#define mmGC_CAC_IND_DATA                                                       0x129b
+
 #endif /* GFX_8_0_D_H */
index 7d722458d9f51a7bb05801040fa32edf68bfdb52..4070ca3a68eb9b361a45303d3ee5c44571a7f55c 100644 (file)
 #define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10
 #define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x20000
 #define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
-#define RLC_GPM_STAT__RESERVED_MASK 0xfc0000
-#define RLC_GPM_STAT__RESERVED__SHIFT 0x12
 #define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xff000000
 #define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
 #define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x3f
 #define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
 #define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x80000
 #define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13
-#define RLC_PG_CNTL__RESERVED1_MASK 0xf00000
-#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14
+#define RLC_PG_CNTL__QUICK_PG_ENABLE_MASK 0x100000
+#define RLC_PG_CNTL__QUICK_PG_ENABLE__SHIFT 0x14
+#define RLC_PG_CNTL__RESERVED1_MASK 0xe00000
+#define RLC_PG_CNTL__RESERVED1__SHIFT 0x15
 #define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0xff
 #define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
 #define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0xff00
 #define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0
 #define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0xff
 #define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
-#define RLC_PG_DELAY_3__RESERVED_MASK 0xffffff00
-#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
 #define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xffffffff
 #define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0
 #define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xffffffff
 #define RLC_SRM_DEBUG_SELECT__RESERVED__SHIFT 0x8
 #define RLC_SRM_DEBUG__DATA_MASK 0xffffffff
 #define RLC_SRM_DEBUG__DATA__SHIFT 0x0
-#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x3ff
-#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
-#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xfffffc00
-#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xa
 #define RLC_SRM_ARAM_DATA__DATA_MASK 0xffffffff
 #define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
-#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x3ff
-#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
 #define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xfffffc00
 #define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xa
 #define RLC_SRM_DRAM_DATA__DATA_MASK 0xffffffff
 #define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
 #define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0xff0000
 #define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
-#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0xff000000
-#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
 #define VGT_TF_RING_SIZE__SIZE_MASK 0xffff
 #define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
 #define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x1
 #define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
 #define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
 #define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
 #define DIDT_SQ_CTRL1__MIN_POWER_MASK 0xffff
 #define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0
 #define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xffff0000
 #define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
 #define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
 #define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_DB_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_DB_CTRL0__UNUSED_0__SHIFT 0x6
 #define DIDT_DB_CTRL1__MIN_POWER_MASK 0xffff
 #define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0
 #define DIDT_DB_CTRL1__MAX_POWER_MASK 0xffff0000
 #define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
 #define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
 #define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
 #define DIDT_TD_CTRL1__MIN_POWER_MASK 0xffff
 #define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0
 #define DIDT_TD_CTRL1__MAX_POWER_MASK 0xffff0000
 #define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
 #define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
 #define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
 #define DIDT_TCP_CTRL1__MIN_POWER_MASK 0xffff
 #define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0
 #define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xffff0000
 #define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
 #define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
 #define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_DBR_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_DBR_CTRL0__UNUSED_0__SHIFT 0x6
 #define DIDT_DBR_CTRL1__MIN_POWER_MASK 0xffff
 #define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0
 #define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xffff0000
 #define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xff000000
 #define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18
 
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK    0x00000001
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT  0x00000000
+
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK       0x0000007e
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK       0x00001f80L
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT     0x00000001
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT     0x00000007
+
+#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK   0x1fffe000L
+#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_SQ_STALL_CTRL__UNUSED_0_MASK                  0xe0000000L
+#define DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT                0x0000001d
+
+#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK       0x00000001L
+#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT     0x00000000
+
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK       0x00007ffeL
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT     0x00000001
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK       0x1fff8000L
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT     0x0000000f
+
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK    0x00000001L
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT  0x00000000
+
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK       0x0000007eL
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK       0x00001f80L
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT     0x00000001
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT     0x00000007
+
+#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK   0x1fffe000L
+#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK     0x00000fc0L
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK     0x0003f000L
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT   0x00000006
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT   0x0000000c
+
+#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK       0x00000001L
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK       0x00007ffeL
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK       0x1fff8000L
+
+#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT     0x00000000
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT     0x00000001
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT     0x0000000f
+
+#define DIDT_TD_STALL_CTRL__UNUSED_0_MASK                  0xe0000000L
+#define DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT                0x0000001d
+
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK     0x00000fc0L
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK     0x0003f000L
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT   0x00000006
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT   0x0000000c
+
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK   0x00000001L
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
+
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK      0x0000007eL
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK      0x00001f80L
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT    0x00000001
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT    0x00000007
+
+#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK  0x1fffe000L
+#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_TCP_STALL_CTRL__UNUSED_0_MASK                 0xe0000000L
+#define DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT               0x0000001d
+
+#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK      0x00000001L
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK      0x00007ffeL
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK      0x1fff8000L
+#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT    0x00000000
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT    0x00000001
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT    0x0000000f
+
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK    0x00000fc0L
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK    0x0003f000L
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT  0x00000006
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT  0x0000000c
+
 #endif /* GFX_8_0_SH_MASK_H */
index 32f3e345de0809c8c04b62d59398b971c05be07e..3493da5c8f0efcaef97f2b07145d91097665b703 100644 (file)
@@ -5538,6 +5538,78 @@ typedef struct  _ATOM_ASIC_PROFILING_INFO_V3_5
   ULONG  ulReserved[12];
 }ATOM_ASIC_PROFILING_INFO_V3_5;
 
+/* for Polars10/11 AVFS parameters */
+typedef struct  _ATOM_ASIC_PROFILING_INFO_V3_6
+{
+  ATOM_COMMON_TABLE_HEADER         asHeader;
+  ULONG  ulMaxVddc;
+  ULONG  ulMinVddc;
+  USHORT usLkgEuseIndex;
+  UCHAR  ucLkgEfuseBitLSB;
+  UCHAR  ucLkgEfuseLength;
+  ULONG  ulLkgEncodeLn_MaxDivMin;
+  ULONG  ulLkgEncodeMax;
+  ULONG  ulLkgEncodeMin;
+  EFUSE_LINEAR_FUNC_PARAM sRoFuse;
+  ULONG  ulEvvDefaultVddc;
+  ULONG  ulEvvNoCalcVddc;
+  ULONG  ulSpeed_Model;
+  ULONG  ulSM_A0;
+  ULONG  ulSM_A1;
+  ULONG  ulSM_A2;
+  ULONG  ulSM_A3;
+  ULONG  ulSM_A4;
+  ULONG  ulSM_A5;
+  ULONG  ulSM_A6;
+  ULONG  ulSM_A7;
+  UCHAR  ucSM_A0_sign;
+  UCHAR  ucSM_A1_sign;
+  UCHAR  ucSM_A2_sign;
+  UCHAR  ucSM_A3_sign;
+  UCHAR  ucSM_A4_sign;
+  UCHAR  ucSM_A5_sign;
+  UCHAR  ucSM_A6_sign;
+  UCHAR  ucSM_A7_sign;
+  ULONG  ulMargin_RO_a;
+  ULONG  ulMargin_RO_b;
+  ULONG  ulMargin_RO_c;
+  ULONG  ulMargin_fixed;
+  ULONG  ulMargin_Fmax_mean;
+  ULONG  ulMargin_plat_mean;
+  ULONG  ulMargin_Fmax_sigma;
+  ULONG  ulMargin_plat_sigma;
+  ULONG  ulMargin_DC_sigma;
+  ULONG  ulLoadLineSlop;
+  ULONG  ulaTDClimitPerDPM[8];
+  ULONG  ulaNoCalcVddcPerDPM[8];
+  ULONG  ulAVFS_meanNsigma_Acontant0;
+  ULONG  ulAVFS_meanNsigma_Acontant1;
+  ULONG  ulAVFS_meanNsigma_Acontant2;
+  USHORT usAVFS_meanNsigma_DC_tol_sigma;
+  USHORT usAVFS_meanNsigma_Platform_mean;
+  USHORT usAVFS_meanNsigma_Platform_sigma;
+  ULONG  ulGB_VDROOP_TABLE_CKSOFF_a0;
+  ULONG  ulGB_VDROOP_TABLE_CKSOFF_a1;
+  ULONG  ulGB_VDROOP_TABLE_CKSOFF_a2;
+  ULONG  ulGB_VDROOP_TABLE_CKSON_a0;
+  ULONG  ulGB_VDROOP_TABLE_CKSON_a1;
+  ULONG  ulGB_VDROOP_TABLE_CKSON_a2;
+  ULONG  ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+  USHORT usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+  ULONG  ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+  ULONG  ulAVFSGB_FUSE_TABLE_CKSON_m1;
+  USHORT usAVFSGB_FUSE_TABLE_CKSON_m2;
+  ULONG  ulAVFSGB_FUSE_TABLE_CKSON_b;
+  USHORT usMaxVoltage_0_25mv;
+  UCHAR  ucEnableGB_VDROOP_TABLE_CKSOFF;
+  UCHAR  ucEnableGB_VDROOP_TABLE_CKSON;
+  UCHAR  ucEnableGB_FUSE_TABLE_CKSOFF;
+  UCHAR  ucEnableGB_FUSE_TABLE_CKSON;
+  USHORT usPSM_Age_ComFactor;
+  UCHAR  ucEnableApplyAVFS_CKS_OFF_Voltage;
+  UCHAR  ucReserved;
+}ATOM_ASIC_PROFILING_INFO_V3_6;
+
 
 typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{
   ULONG  ulMaxSclkFreq;
index a461e155a160240f7f127c156b902b56651c523b..0c8c85d2a2a57f9a4ca11f52d0d7faaf5da7ef27 100644 (file)
@@ -49,6 +49,7 @@ enum cgs_ind_reg {
        CGS_IND_REG__SMC,
        CGS_IND_REG__UVD_CTX,
        CGS_IND_REG__DIDT,
+       CGS_IND_REG_GC_CAC,
        CGS_IND_REG__AUDIO_ENDPT
 };
 
@@ -115,6 +116,7 @@ enum cgs_system_info_id {
        CGS_SYSTEM_INFO_CG_FLAGS,
        CGS_SYSTEM_INFO_PG_FLAGS,
        CGS_SYSTEM_INFO_GFX_CU_INFO,
+       CGS_SYSTEM_INFO_GFX_SE_INFO,
        CGS_SYSTEM_INFO_ID_MAXIMUM,
 };
 
@@ -189,7 +191,6 @@ typedef unsigned long cgs_handle_t;
 
 struct cgs_acpi_method_argument {
        uint32_t type;
-       uint32_t method_length;
        uint32_t data_length;
        union{
                uint32_t value;
@@ -581,6 +582,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
                                     enum cgs_ucode_id type,
                                     struct cgs_firmware_info *info);
 
+typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
+                                        enum cgs_ucode_id type);
+
 typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
                                  enum amd_ip_block_type block_type,
                                  enum amd_powergating_state state);
@@ -645,6 +649,7 @@ struct cgs_ops {
        cgs_set_camera_voltages_t set_camera_voltages;
        /* Firmware Info */
        cgs_get_firmware_info get_firmware_info;
+       cgs_rel_firmware rel_firmware;
        /* cg pg interface*/
        cgs_set_powergating_state set_powergating_state;
        cgs_set_clockgating_state set_clockgating_state;
@@ -738,6 +743,8 @@ struct cgs_device
        CGS_CALL(set_camera_voltages,dev,mask,voltages)
 #define cgs_get_firmware_info(dev, type, info) \
        CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_rel_firmware(dev, type)    \
+       CGS_CALL(rel_firmware, dev, type)
 #define cgs_set_powergating_state(dev, block_type, state)      \
        CGS_CALL(set_powergating_state, dev, block_type, state)
 #define cgs_set_clockgating_state(dev, block_type, state)      \
index 8e345bfddb693033d08584d1093f254ef9619a38..f9e03ad0baa29faea18d5bd0fc123c9f7b769c88 100644 (file)
@@ -73,11 +73,14 @@ static int pp_sw_init(void *handle)
 
        ret = hwmgr->hwmgr_func->backend_init(hwmgr);
        if (ret)
-               goto err;
+               goto err1;
 
        pr_info("amdgpu: powerplay initialized\n");
 
        return 0;
+err1:
+       if (hwmgr->pptable_func->pptable_fini)
+               hwmgr->pptable_func->pptable_fini(hwmgr);
 err:
        pr_err("amdgpu: powerplay initialization failed\n");
        return ret;
@@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle)
        if (hwmgr->hwmgr_func->backend_fini != NULL)
                ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
 
+       if (hwmgr->pptable_func->pptable_fini)
+               hwmgr->pptable_func->pptable_fini(hwmgr);
+
        return ret;
 }
 
@@ -530,6 +536,10 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
        case AMD_PP_EVENT_COMPLETE_INIT:
                ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
                break;
+       case AMD_PP_EVENT_READJUST_POWER_STATE:
+               pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
+               ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
+               break;
        default:
                break;
        }
@@ -734,12 +744,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
 
        PP_CHECK_HW(hwmgr);
 
-       if (hwmgr->hwmgr_func->get_pp_table == NULL) {
-               printk(KERN_INFO "%s was not implemented.\n", __func__);
-               return 0;
-       }
+       if (!hwmgr->soft_pp_table)
+               return -EINVAL;
+
+       *table = (char *)hwmgr->soft_pp_table;
 
-       return hwmgr->hwmgr_func->get_pp_table(hwmgr, table);
+       return hwmgr->soft_pp_table_size;
 }
 
 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
@@ -753,12 +763,23 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 
        PP_CHECK_HW(hwmgr);
 
-       if (hwmgr->hwmgr_func->set_pp_table == NULL) {
-               printk(KERN_INFO "%s was not implemented.\n", __func__);
-               return 0;
+       if (!hwmgr->hardcode_pp_table) {
+               hwmgr->hardcode_pp_table =
+                               kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+
+               if (!hwmgr->hardcode_pp_table)
+                       return -ENOMEM;
+
+               /* to avoid powerplay crash when hardcode pptable is empty */
+               memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
+                               hwmgr->soft_pp_table_size);
        }
 
-       return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size);
+       memcpy(hwmgr->hardcode_pp_table, buf, size);
+
+       hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
+
+       return amd_powerplay_reset(handle);
 }
 
 static int pp_dpm_force_clock_level(void *handle,
@@ -800,6 +821,82 @@ static int pp_dpm_print_clock_levels(void *handle,
        return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
 }
 
+static int pp_dpm_get_sclk_od(void *handle)
+{
+       struct pp_hwmgr *hwmgr;
+
+       if (!handle)
+               return -EINVAL;
+
+       hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+       PP_CHECK_HW(hwmgr);
+
+       if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
+               printk(KERN_INFO "%s was not implemented.\n", __func__);
+               return 0;
+       }
+
+       return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
+}
+
+static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
+{
+       struct pp_hwmgr *hwmgr;
+
+       if (!handle)
+               return -EINVAL;
+
+       hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+       PP_CHECK_HW(hwmgr);
+
+       if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
+               printk(KERN_INFO "%s was not implemented.\n", __func__);
+               return 0;
+       }
+
+       return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
+}
+
+static int pp_dpm_get_mclk_od(void *handle)
+{
+       struct pp_hwmgr *hwmgr;
+
+       if (!handle)
+               return -EINVAL;
+
+       hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+       PP_CHECK_HW(hwmgr);
+
+       if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
+               printk(KERN_INFO "%s was not implemented.\n", __func__);
+               return 0;
+       }
+
+       return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
+}
+
+static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
+{
+       struct pp_hwmgr *hwmgr;
+
+       if (!handle)
+               return -EINVAL;
+
+       hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+       PP_CHECK_HW(hwmgr);
+
+       if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
+               printk(KERN_INFO "%s was not implemented.\n", __func__);
+               return 0;
+       }
+
+       return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
+}
+
 const struct amd_powerplay_funcs pp_dpm_funcs = {
        .get_temperature = pp_dpm_get_temperature,
        .load_firmware = pp_dpm_load_fw,
@@ -822,6 +919,10 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
        .set_pp_table = pp_dpm_set_pp_table,
        .force_clock_level = pp_dpm_force_clock_level,
        .print_clock_levels = pp_dpm_print_clock_levels,
+       .get_sclk_od = pp_dpm_get_sclk_od,
+       .set_sclk_od = pp_dpm_set_sclk_od,
+       .get_mclk_od = pp_dpm_get_mclk_od,
+       .set_mclk_od = pp_dpm_set_mclk_od,
 };
 
 static int amd_pp_instance_init(struct amd_pp_init *pp_init,
@@ -903,6 +1004,44 @@ int amd_powerplay_fini(void *handle)
        return 0;
 }
 
+int amd_powerplay_reset(void *handle)
+{
+       struct pp_instance *instance = (struct pp_instance *)handle;
+       struct pp_eventmgr *eventmgr;
+       struct pem_event_data event_data = { {0} };
+       int ret;
+
+       if (instance == NULL)
+               return -EINVAL;
+
+       eventmgr = instance->eventmgr;
+       if (!eventmgr || !eventmgr->pp_eventmgr_fini)
+               return -EINVAL;
+
+       eventmgr->pp_eventmgr_fini(eventmgr);
+
+       ret = pp_sw_fini(handle);
+       if (ret)
+               return ret;
+
+       kfree(instance->hwmgr->ps);
+
+       ret = pp_sw_init(handle);
+       if (ret)
+               return ret;
+
+       hw_init_power_state_table(instance->hwmgr);
+
+       if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
+               return -EINVAL;
+
+       ret = eventmgr->pp_eventmgr_init(eventmgr);
+       if (ret)
+               return ret;
+
+       return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data);
+}
+
 /* export this function to DAL */
 
 int amd_powerplay_display_configuration_change(void *handle,
index 46410e3c73493acce741f955f60790fc9745256e..fb88e4e5d625815ab213b1011392bce0ef96eae6 100644 (file)
@@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr)
        pem_unregister_interrupts(eventmgr);
 
        pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
-
-       if (eventmgr != NULL)
-               kfree(eventmgr);
 }
 
 int eventmgr_init(struct pp_instance *handle)
index 5cd123472db471bcf4d5b3cfe4f21fc5000d4b17..b6f45fd01fa61621d2f1e776f94e26a5dd938f82 100644 (file)
@@ -132,8 +132,7 @@ int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struc
 
 int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
 {
-       /* TODO */
-       return 0;
+       return phm_disable_dynamic_state_management(eventmgr->hwmgr);
 }
 
 int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
index 436fc16dabb660e0970068ecc4eb0be3c2ed44f6..2da548f6337e822d3eeae9783d1b37190aa07d8c 100644 (file)
@@ -206,7 +206,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                                                        AMD_IP_BLOCK_TYPE_VCE,
                                                        AMD_PG_STATE_GATE);
                                cz_enable_disable_vce_dpm(hwmgr, false);
-                       /* TODO: to figure out why vce can't be poweroff*/
+                               cz_dpm_powerdown_vce(hwmgr);
                                cz_hwmgr->vce_power_gated = true;
                        } else {
                                cz_dpm_powerup_vce(hwmgr);
@@ -225,6 +225,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                        }
                }
        } else {
+               cz_hwmgr->vce_power_gated = bgate;
                cz_dpm_update_vce_dpm(hwmgr);
                cz_enable_disable_vce_dpm(hwmgr, !bgate);
                return 0;
index 1f14c477d15d3a490c89f316ce48dd077503e98f..9bf622e123b652e3e1d9fcd6676fafea1f041779 100644 (file)
@@ -1180,6 +1180,13 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
        int result = 0;
+       struct cz_hwmgr *data;
+
+       data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       hwmgr->backend = data;
 
        result = cz_initialize_dpm_defaults(hwmgr);
        if (result != 0) {
@@ -1909,15 +1916,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
 
 int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
-       struct cz_hwmgr *cz_hwmgr;
-       int ret = 0;
-
-       cz_hwmgr = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
-       if (cz_hwmgr == NULL)
-               return -ENOMEM;
-
-       hwmgr->backend = cz_hwmgr;
        hwmgr->hwmgr_func = &cz_hwmgr_funcs;
        hwmgr->pptable_func = &pptable_funcs;
-       return ret;
+       return 0;
 }
index 24a16e49b5713d07f8a53aa81fb719ab75383500..744aa886a2be789abc63c3c626dc2f07b3a39edf 100644 (file)
@@ -581,25 +581,24 @@ static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
 
 static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 {
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (data->soft_pp_table) {
-               kfree(data->soft_pp_table);
-               data->soft_pp_table = NULL;
-       }
-
        return phm_hwmgr_backend_fini(hwmgr);
 }
 
 static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       struct fiji_hwmgr *data;
        uint32_t i;
        struct phm_ppt_v1_information *table_info =
                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
        bool stay_in_boot;
        int result;
 
+       data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       hwmgr->backend = data;
+
        data->dll_default_on = false;
        data->sram_end = SMC_RAM_END;
 
@@ -633,6 +632,8 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
        data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
 
+       data->force_pcie_gen = PP_PCIEGenInvalid;
+
        if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
                        VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
                data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
@@ -732,7 +733,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
                result = cgs_query_system_info(hwmgr->device, &sys_info);
                if (result)
-                       data->pcie_gen_cap = 0x30007;
+                       data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
                else
                        data->pcie_gen_cap = (uint32_t)sys_info.value;
                if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -741,7 +742,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
                result = cgs_query_system_info(hwmgr->device, &sys_info);
                if (result)
-                       data->pcie_lane_cap = 0x2f0000;
+                       data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
                else
                        data->pcie_lane_cap = (uint32_t)sys_info.value;
        } else {
@@ -1234,6 +1235,34 @@ static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+       /* Reset voting clients before disabling DPM */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_0, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_1, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_2, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_3, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_4, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_5, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_6, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_7, 0);
+
+       return 0;
+}
+
 /**
 * Get the location of various tables inside the FW image.
 *
@@ -1360,6 +1389,17 @@ static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
+/**
+* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   if success then 0;
+*/
+static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
 /**
 * Initial switch from ARB F0->F1
 *
@@ -1373,6 +1413,21 @@ static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
                        MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
 }
 
+static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+       uint32_t tmp;
+
+       tmp = (cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+                       0x0000ff00) >> 8;
+
+       if (tmp == MC_CG_ARB_FREQ_F0)
+               return 0;
+
+       return fiji_copy_and_switch_arb_sets(hwmgr,
+                       tmp, MC_CG_ARB_FREQ_F0);
+}
+
 static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
                struct fiji_single_dpm_table *dpm_table, uint32_t count)
 {
@@ -1830,7 +1885,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
 
        PP_ASSERT_WITH_CODE(false,
                        "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
-                       return vddci_table->entries[i].value);
+                       return vddci_table->entries[i-1].value);
 }
 
 static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
@@ -3175,6 +3230,17 @@ static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       struct fiji_ulv_parm *ulv = &(data->ulv);
+
+       if (ulv->ulv_supported)
+               return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+       return 0;
+}
+
 static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
 {
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -3195,6 +3261,21 @@ static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep)) {
+               if (smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to disable Master Deep Sleep switch failed!",
+                                       return -1);
+               }
+       }
+
+       return 0;
+}
+
 static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 {
        struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -3355,6 +3436,70 @@ static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+       /* disable SCLK dpm */
+       if (!data->sclk_dpm_key_disabled)
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_DPM_Disable) == 0),
+                               "Failed to disable SCLK DPM!",
+                               return -1);
+
+       /* disable MCLK dpm */
+       if (!data->mclk_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
+                               "Failed to force MCLK DPM0!",
+                               return -1);
+
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_MCLKDPM_Disable) == 0),
+                               "Failed to disable MCLK DPM!",
+                               return -1);
+       }
+
+       return 0;
+}
+
+static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+       /* disable general power management */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+                       GLOBAL_PWRMGT_EN, 0);
+       /* disable sclk deep sleep */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+                       DYNAMIC_PM_EN, 0);
+
+       /* disable PCIE dpm */
+       if (!data->pcie_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_PCIeDPM_Disable) == 0),
+                               "Failed to disable pcie DPM during DPM Stop Function!",
+                               return -1);
+       }
+
+       if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
+               printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
+               return -1;
+       }
+
+       PP_ASSERT_WITH_CODE(
+                       (smum_send_msg_to_smc(hwmgr->smumgr,
+                                       PPSMC_MSG_Voltage_Cntl_Disable) == 0),
+                       "Failed to disable voltage DPM during DPM Stop Function!",
+                       return -1);
+
+       return 0;
+}
+
 static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
                uint32_t sources)
 {
@@ -3413,6 +3558,23 @@ static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
        return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
 }
 
+static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+               PHM_AutoThrottleSource source)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+       if (data->active_auto_throttle_sources & (1 << source)) {
+               data->active_auto_throttle_sources &= ~(1 << source);
+               fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+       }
+       return 0;
+}
+
+static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+       return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
 static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
        int tmp_result, result = 0;
@@ -3527,6 +3689,64 @@ static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+       int tmp_result, result = 0;
+
+       tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
+       PP_ASSERT_WITH_CODE(tmp_result == 0,
+                       "DPM is not running right now, no need to disable DPM!",
+                       return 0);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ThermalController))
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                               GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+       tmp_result = fiji_disable_power_containment(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable power containment!", result = tmp_result);
+
+       tmp_result = fiji_disable_smc_cac(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable SMC CAC!", result = tmp_result);
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+       tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable thermal auto throttle!", result = tmp_result);
+
+       tmp_result = fiji_stop_dpm(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to stop DPM!", result = tmp_result);
+
+       tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable deep sleep master switch!", result = tmp_result);
+
+       tmp_result = fiji_disable_ulv(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable ULV!", result = tmp_result);
+
+       tmp_result = fiji_clear_voting_clients(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to clear voting clients!", result = tmp_result);
+
+       tmp_result = fiji_reset_to_default(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to reset to default!", result = tmp_result);
+
+       tmp_result = fiji_force_switch_to_arbf0(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to force to switch arbf0!", result = tmp_result);
+
+       return result;
+}
+
 static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
 {
        struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -5069,42 +5289,6 @@ static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
                                CG_FDO_CTRL2, FDO_PWM_MODE);
 }
 
-static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (!data->soft_pp_table) {
-               data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
-                                             hwmgr->soft_pp_table_size,
-                                             GFP_KERNEL);
-               if (!data->soft_pp_table)
-                       return -ENOMEM;
-       }
-
-       *table = (char *)&data->soft_pp_table;
-
-       return hwmgr->soft_pp_table_size;
-}
-
-static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (!data->soft_pp_table) {
-               data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
-               if (!data->soft_pp_table)
-                       return -ENOMEM;
-       }
-
-       memcpy(data->soft_pp_table, buf, size);
-
-       hwmgr->soft_pp_table = data->soft_pp_table;
-
-       /* TODO: re-init powerplay to implement modified pptable */
-
-       return 0;
-}
-
 static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, uint32_t mask)
 {
@@ -5274,12 +5458,96 @@ bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *h
        return is_update_required;
 }
 
+static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+       struct fiji_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       int value;
+
+       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+                       100 /
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return value;
+}
+
+static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       struct fiji_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       struct pp_power_state  *ps;
+       struct fiji_power_state  *fiji_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
+
+       fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+                       value / 100 +
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return 0;
+}
+
+static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+       struct fiji_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       int value;
+
+       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+                       100 /
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return value;
+}
+
+static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       struct fiji_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       struct pp_power_state  *ps;
+       struct fiji_power_state  *fiji_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
+
+       fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+                       value / 100 +
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return 0;
+}
 
 static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
        .backend_init = &fiji_hwmgr_backend_init,
        .backend_fini = &fiji_hwmgr_backend_fini,
        .asic_setup = &fiji_setup_asic_task,
        .dynamic_state_management_enable = &fiji_enable_dpm_tasks,
+       .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
        .force_dpm_level = &fiji_dpm_force_dpm_level,
        .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
        .get_power_state_size = &fiji_get_power_state_size,
@@ -5312,24 +5580,18 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
        .get_fan_control_mode = fiji_get_fan_control_mode,
        .check_states_equal = fiji_check_states_equal,
        .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
-       .get_pp_table = fiji_get_pp_table,
-       .set_pp_table = fiji_set_pp_table,
        .force_clock_level = fiji_force_clock_level,
        .print_clock_levels = fiji_print_clock_levels,
+       .get_sclk_od = fiji_get_sclk_od,
+       .set_sclk_od = fiji_set_sclk_od,
+       .get_mclk_od = fiji_get_mclk_od,
+       .set_mclk_od = fiji_set_mclk_od,
 };
 
 int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
-       struct fiji_hwmgr  *data;
-       int ret = 0;
-
-       data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
-       if (data == NULL)
-               return -ENOMEM;
-
-       hwmgr->backend = data;
        hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
        hwmgr->pptable_func = &tonga_pptable_funcs;
        pp_fiji_thermal_initialize(hwmgr);
-       return ret;
+       return 0;
 }
index 170edf5a772dabb04b8fb683104c141f24d53862..bf67c2a92c68366607efaa8402e1209c4b3ad0be 100644 (file)
@@ -302,9 +302,6 @@ struct fiji_hwmgr {
        bool                           pg_acp_init;
        bool                           frtc_enabled;
        bool                           frtc_status_changed;
-
-       /* soft pptable for re-uploading into smu */
-       void *soft_pp_table;
 };
 
 /* To convert to Q8.8 format for firmware */
index db23a4068baf291f948eb891fd4c2739bef5d8cb..44658451a8d2eef6c4a5c0e061cb2351bcad0ec3 100644 (file)
@@ -72,18 +72,19 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
        fiji_hwmgr->dte_tj_offset = tmp;
 
        if (!tmp) {
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_PowerContainment);
-
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                PHM_PlatformCaps_CAC);
 
                fiji_hwmgr->fast_watermark_threshold = 100;
 
-               tmp = 1;
-               fiji_hwmgr->enable_dte_feature = tmp ? false : true;
-               fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
-               fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
+               if (hwmgr->powercontainment_enabled) {
+                       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                   PHM_PlatformCaps_PowerContainment);
+                       tmp = 1;
+                       fiji_hwmgr->enable_dte_feature = tmp ? false : true;
+                       fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
+                       fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
+               }
        }
 }
 
@@ -459,6 +460,23 @@ int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       int result = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_CAC) && data->cac_enabled) {
+               int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                               (uint16_t)(PPSMC_MSG_DisableCac));
+               PP_ASSERT_WITH_CODE((smc_result == 0),
+                               "Failed to disable CAC in SMC.", result = -1);
+
+               data->cac_enabled = false;
+       }
+       return result;
+}
+
 int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
 {
        struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -528,6 +546,48 @@ int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       int result = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment) &&
+                       data->power_containment_features) {
+               int smc_result;
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_TDCLimit) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable TDCLimit in SMC.",
+                                       result = smc_result);
+               }
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_DTE) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_DisableDTE));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable DTE in SMC.",
+                                       result = smc_result);
+               }
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable PkgPwrTracking in SMC.",
+                                       result = smc_result);
+               }
+               data->power_containment_features = 0;
+       }
+
+       return result;
+}
+
 int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
 {
        struct phm_ppt_v1_information *table_info =
index 55e58200f33a8ab2ffaa8645fbbd9e923a091439..fec7724217339c68d0c5f3c91fe02ba4d8f8bfab 100644 (file)
@@ -36,6 +36,19 @@ enum fiji_pt_config_reg_type {
 #define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
 
+#define DIDT_SQ_CTRL0__UNUSED_0_MASK             0xffffffc0
+#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT           0x6
+#define DIDT_TD_CTRL0__UNUSED_0_MASK             0xffffffc0
+#define DIDT_TD_CTRL0__UNUSED_0__SHIFT           0x6
+#define DIDT_TCP_CTRL0__UNUSED_0_MASK            0xffffffc0
+#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT          0x6
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK                 0xe0000000
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001d
+#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK                 0xe0000000
+#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001d
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK                0xe0000000
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT              0x0000001d
+
 struct fiji_pt_config_reg {
        uint32_t                           offset;
        uint32_t                           mask;
@@ -58,7 +71,9 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
 int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
 int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
 int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
 int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
+int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
 int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
 int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
 
index fa208ada689219f4eb072ebb5e655e0a03cc9ab7..789f98ad2615027c8182ebfda29173e3b30b38db 100644 (file)
@@ -154,6 +154,30 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
        return ret;
 }
 
+int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
+{
+       int ret = -1;
+       bool enabled;
+
+       PHM_FUNC_CHECK(hwmgr);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+               PHM_PlatformCaps_TablelessHardwareInterface)) {
+               if (hwmgr->hwmgr_func->dynamic_state_management_disable)
+                       ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
+       } else {
+               ret = phm_dispatch_table(hwmgr,
+                               &(hwmgr->disable_dynamic_state_management),
+                               NULL, NULL);
+       }
+
+       enabled = ret == 0 ? false : true;
+
+       cgs_notify_dpm_enabled(hwmgr->device, enabled);
+
+       return ret;
+}
+
 int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
 {
        PHM_FUNC_CHECK(hwmgr);
@@ -306,11 +330,15 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
 {
        PHM_FUNC_CHECK(hwmgr);
 
-       if (hwmgr->hwmgr_func->store_cc6_data == NULL)
+       if (display_config == NULL)
                return -EINVAL;
 
        hwmgr->display_config = *display_config;
-       /* to do pass other display configuration in furture */
+
+       if (hwmgr->hwmgr_func->store_cc6_data == NULL)
+               return -EINVAL;
+
+       /* TODO: pass other display configuration in the future */
 
        if (hwmgr->hwmgr_func->store_cc6_data)
                hwmgr->hwmgr_func->store_cc6_data(hwmgr,
index 1c48917da3cf3712b39eb72aaf1511fc3936845c..03b6128ebc203c66ad26cf2f2cb5f15786e08820 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <drm/amdgpu_drm.h>
 #include "cgs_common.h"
 #include "power_state.h"
 #include "hwmgr.h"
@@ -58,12 +59,13 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
        hwmgr->hw_revision = pp_init->rev_id;
        hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
        hwmgr->power_source = PP_PowerSource_AC;
+       hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled;
 
        switch (hwmgr->chip_family) {
-       case AMD_FAMILY_CZ:
+       case AMDGPU_FAMILY_CZ:
                cz_hwmgr_init(hwmgr);
                break;
-       case AMD_FAMILY_VI:
+       case AMDGPU_FAMILY_VI:
                switch (hwmgr->chip_id) {
                case CHIP_TONGA:
                        tonga_hwmgr_init(hwmgr);
@@ -93,6 +95,15 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
        if (hwmgr == NULL || hwmgr->ps == NULL)
                return -EINVAL;
 
+       /* do hwmgr finish*/
+       kfree(hwmgr->hardcode_pp_table);
+
+       kfree(hwmgr->backend);
+
+       kfree(hwmgr->start_thermal_controller.function_list);
+
+       kfree(hwmgr->set_temperature_range.function_list);
+
        kfree(hwmgr->ps);
        kfree(hwmgr);
        return 0;
@@ -462,7 +473,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
 
        PP_ASSERT_WITH_CODE(false,
                        "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
-                       return vddci_table->entries[i].value);
+                       return vddci_table->entries[i-1].value);
 }
 
 int phm_find_boot_level(void *table,
index 347fef127ce91b26127fdf0f64a91f50a339b90b..2930a3355948990c56961f7f52dc424e6b65d8c1 100644 (file)
@@ -39,6 +39,7 @@ struct phm_ppt_v1_clock_voltage_dependency_record {
        uint8_t phases;
        uint8_t cks_enable;
        uint8_t cks_voffset;
+       uint32_t sclk_offset;
 };
 
 typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
index 8f142a74ad08dd7a13433423fcd1b158b5258ccd..aeec25c66aa83ed5600981b3456cad076d9a7166 100644 (file)
@@ -106,11 +106,17 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
        data->uvd_power_gated = bgate;
 
        if (bgate) {
+               cgs_set_clockgating_state(hwmgr->device,
+                               AMD_IP_BLOCK_TYPE_UVD,
+                               AMD_CG_STATE_GATE);
                polaris10_update_uvd_dpm(hwmgr, true);
                polaris10_phm_powerdown_uvd(hwmgr);
        } else {
                polaris10_phm_powerup_uvd(hwmgr);
                polaris10_update_uvd_dpm(hwmgr, false);
+               cgs_set_clockgating_state(hwmgr->device,
+                               AMD_IP_BLOCK_TYPE_UVD,
+                               AMD_PG_STATE_UNGATE);
        }
 
        return 0;
index aa6be033f21b36832ad67f156d6b1954cd8827ea..9d764c4d253e3feeee4cd9ee921acf3c9805c0db 100644 (file)
@@ -389,6 +389,34 @@ static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+       /* Reset voting clients before disabling DPM */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_0, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_1, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_2, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_3, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_4, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_5, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_6, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_7, 0);
+
+       return 0;
+}
+
 /**
 * Get the location of various tables inside the FW image.
 *
@@ -515,6 +543,11 @@ static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
+static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
 /**
 * Initial switch from ARB F0->F1
 *
@@ -528,6 +561,21 @@ static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
                        MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
 }
 
+static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+       uint32_t tmp;
+
+       tmp = (cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+                       0x0000ff00) >> 8;
+
+       if (tmp == MC_CG_ARB_FREQ_F0)
+               return 0;
+
+       return polaris10_copy_and_switch_arb_sets(hwmgr,
+                       tmp, MC_CG_ARB_FREQ_F0);
+}
+
 static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -999,7 +1047,7 @@ static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
                                vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
                                                (dep_table->entries[i].vddc -
                                                                (uint16_t)data->vddc_vddci_delta));
-                               *voltage |= (vddci * VOLTAGE_SCALE) <<  VDDCI_SHIFT;
+                               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
                        }
 
                        if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
@@ -1296,7 +1344,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
        }
 
        mem_level->MclkFrequency = clock;
-       mem_level->StutterEnable = 0;
        mem_level->EnabledForThrottle = 1;
        mem_level->EnabledForActivity = 0;
        mem_level->UpHyst = 0;
@@ -1304,7 +1351,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
        mem_level->VoltageDownHyst = 0;
        mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
        mem_level->StutterEnable = false;
-
        mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
 
        data->display_timing.num_existing_displays = info.display_count;
@@ -1358,12 +1404,12 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
                        return result;
        }
 
-       /* in order to prevent MC activity from stutter mode to push DPM up.
+       /* In order to prevent MC activity from stutter mode to push DPM up,
         * the UVD change complements this by putting the MCLK in
-        * a higher state by default such that we are not effected by
+        * a higher state by default such that we are not affected by
         * up threshold or and MCLK DPM latency.
         */
-       levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+       levels[0].ActivityLevel = 0x1f;
        CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
 
        data->smc_state_table.MemoryDpmLevelCount =
@@ -1761,12 +1807,9 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
 
 static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
 {
-       uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
-                       volt_with_cks, value;
-       uint16_t clock_freq_u16;
+       uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
-                       volt_offset = 0;
+       uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
        struct phm_ppt_v1_information *table_info =
                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
        struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1778,50 +1821,38 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
         * if the part is SS or FF. if RO >= 1660MHz, part is FF.
         */
        efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (146 * 4));
-       efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (148 * 4));
+                       ixSMU_EFUSE_0 + (67 * 4));
        efuse &= 0xFF000000;
        efuse = efuse >> 24;
-       efuse2 &= 0xF;
-
-       if (efuse2 == 1)
-               ro = (2300 - 1350) * efuse / 255 + 1350;
-       else
-               ro = (2500 - 1000) * efuse / 255 + 1000;
 
-       if (ro >= 1660)
-               type = 0;
-       else
-               type = 1;
+       if (hwmgr->chip_id == CHIP_POLARIS10) {
+               min = 1000;
+               max = 2300;
+       } else {
+               min = 1100;
+               max = 2100;
+       }
 
-       /* Populate Stretch amount */
-       data->smc_state_table.ClockStretcherAmount = stretch_amount;
+       ro = efuse * (max -min)/255 + min;
 
        /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
        for (i = 0; i < sclk_table->count; i++) {
                data->smc_state_table.Sclk_CKS_masterEn0_7 |=
                                sclk_table->entries[i].cks_enable << i;
-               volt_without_cks = (uint32_t)((14041 *
-                       (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
-                       (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
-               volt_with_cks = (uint32_t)((13946 *
-                       (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
-                       (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+
+               volt_without_cks =  (uint32_t)(((ro - 40) * 1000 - 2753594 - sclk_table->entries[i].clk/100 * 136418 /1000) / \
+                                       (sclk_table->entries[i].clk/100 * 1132925 /10000 - 242418)/100);
+
+               volt_with_cks = (uint32_t)((ro * 1000 -2396351 - sclk_table->entries[i].clk/100 * 329021/1000) / \
+                               (sclk_table->entries[i].clk/10000 * 649434 /1000  - 18005)/10);
+
                if (volt_without_cks >= volt_with_cks)
                        volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
                                        sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+
                data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
        }
 
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       STRETCH_ENABLE, 0x0);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       masterReset, 0x1);
-       /* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       masterReset, 0x0);
-
        /* Populate CKS Lookup Table */
        if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
                stretch_amount2 = 0;
@@ -1835,69 +1866,6 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
                                return -EINVAL);
        }
 
-       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixPWR_CKS_CNTL);
-       value &= 0xFFC2FF87;
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
-                       polaris10_clock_stretcher_lookup_table[stretch_amount2][0];
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
-                       polaris10_clock_stretcher_lookup_table[stretch_amount2][1];
-       clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
-                       GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100);
-       if (polaris10_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16
-       && polaris10_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) {
-               /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
-               value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
-               /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
-               value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
-               /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
-               value |= (polaris10_clock_stretch_amount_conversion
-                               [polaris10_clock_stretcher_lookup_table[stretch_amount2][3]]
-                                [stretch_amount]) << 3;
-       }
-       CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq);
-       CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq);
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
-                       polaris10_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
-                       (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixPWR_CKS_CNTL, value);
-
-       /* Populate DDT Lookup Table */
-       for (i = 0; i < 4; i++) {
-               /* Assign the minimum and maximum VID stored
-                * in the last row of Clock Stretcher Voltage Table.
-                */
-               data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID =
-                               (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][2];
-               data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID =
-                               (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][3];
-               /* Loop through each SCLK and check the frequency
-                * to see if it lies within the frequency for clock stretcher.
-                */
-               for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
-                       cks_setting = 0;
-                       clock_freq = PP_SMC_TO_HOST_UL(
-                                       data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency);
-                       /* Check the allowed frequency against the sclk level[j].
-                        *  Sclk's endianness has already been converted,
-                        *  and it's in 10Khz unit,
-                        *  as opposed to Data table, which is in Mhz unit.
-                        */
-                       if (clock_freq >= (polaris10_clock_stretcher_ddt_table[type][i][0]) * 100) {
-                               cks_setting |= 0x2;
-                               if (clock_freq < (polaris10_clock_stretcher_ddt_table[type][i][1]) * 100)
-                                       cks_setting |= 0x1;
-                       }
-                       data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting
-                                                       |= cks_setting << (j * 2);
-               }
-               CONVERT_FROM_HOST_TO_SMC_US(
-                       data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting);
-       }
-
        value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
        value &= 0xFFFFFFFE;
        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
@@ -1945,9 +1913,8 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
        if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
                config = VR_SVI2_PLANE_2;
                table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-       } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               config = VR_SMIO_PATTERN_2;
-               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
+                       offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
        } else {
                config = VR_STATIC_VOLTAGE;
                table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
@@ -1956,6 +1923,90 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
+
+int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       SMU74_Discrete_DpmTable  *table = &(data->smc_state_table);
+       int result = 0;
+       struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+       AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+       AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+       uint32_t tmp, i;
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)hwmgr->pptable;
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+                       table_info->vdd_dep_on_sclk;
+
+
+       if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+               return result;
+
+       result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+       if (0 == result) {
+               table->BTCGB_VDROOP_TABLE[0].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+               table->BTCGB_VDROOP_TABLE[0].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+               table->BTCGB_VDROOP_TABLE[0].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+               table->BTCGB_VDROOP_TABLE[1].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+               table->BTCGB_VDROOP_TABLE[1].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+               table->BTCGB_VDROOP_TABLE[1].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+               table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+               table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+               table->AVFSGB_VDROOP_TABLE[0].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+               table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+               table->AVFSGB_VDROOP_TABLE[0].m2_shift  = 12;
+               table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+               table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+               table->AVFSGB_VDROOP_TABLE[1].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+               table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+               table->AVFSGB_VDROOP_TABLE[1].m2_shift  = 12;
+               table->MaxVoltage                = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+               AVFS_meanNsigma.Aconstant[0]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+               AVFS_meanNsigma.Aconstant[1]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+               AVFS_meanNsigma.Aconstant[2]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+               AVFS_meanNsigma.DC_tol_sigma      = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+               AVFS_meanNsigma.Platform_mean     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+               AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+               AVFS_meanNsigma.Platform_sigma     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+               for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+                       AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+                       AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+               }
+
+               result = polaris10_read_smc_sram_dword(smumgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+                               &tmp, data->sram_end);
+
+               polaris10_copy_bytes_to_smc(smumgr,
+                                       tmp,
+                                       (uint8_t *)&AVFS_meanNsigma,
+                                       sizeof(AVFS_meanNsigma_t),
+                                       data->sram_end);
+
+               result = polaris10_read_smc_sram_dword(smumgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+                               &tmp, data->sram_end);
+               polaris10_copy_bytes_to_smc(smumgr,
+                                       tmp,
+                                       (uint8_t *)&AVFS_SclkOffset,
+                                       sizeof(AVFS_Sclk_Offset_t),
+                                       data->sram_end);
+
+               data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+               data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+       }
+       return result;
+}
+
+
 /**
 * Initializes the SMC table and uploads it
 *
@@ -2056,6 +2107,10 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
                                "Failed to populate Clock Stretcher Data Table!",
                                return result);
        }
+
+       result = polaris10_populate_avfs_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
        table->CurrSclkPllRange = 0xff;
        table->GraphicsVoltageChangeEnable  = 1;
        table->GraphicsThermThrottleEnable  = 1;
@@ -2229,6 +2284,17 @@ static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       struct polaris10_ulv_parm *ulv = &(data->ulv);
+
+       if (ulv->ulv_supported)
+               return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+       return 0;
+}
+
 static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
 {
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -2249,9 +2315,27 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep)) {
+               if (smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to disable Master Deep Sleep switch failed!",
+                                       return -1);
+               }
+       }
+
+       return 0;
+}
+
 static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       uint32_t soft_register_value = 0;
+       uint32_t handshake_disables_offset = data->soft_regs_start
+                               + offsetof(SMU74_SoftRegisters, HandshakeDisables);
 
        /* enable SCLK dpm */
        if (!data->sclk_dpm_key_disabled)
@@ -2262,6 +2346,12 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 
        /* enable MCLK dpm */
        if (0 == data->mclk_dpm_key_disabled) {
+/* Disable UVD - SMU handshake for MCLK. */
+               soft_register_value = cgs_read_ind_register(hwmgr->device,
+                                       CGS_IND_REG__SMC, handshake_disables_offset);
+               soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                               handshake_disables_offset, soft_register_value);
 
                PP_ASSERT_WITH_CODE(
                                (0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -2269,7 +2359,6 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                                "Failed to enable MCLK DPM during DPM Start Function!",
                                return -1);
 
-
                PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
 
                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
@@ -2338,6 +2427,58 @@ static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+       /* disable SCLK dpm */
+       if (!data->sclk_dpm_key_disabled)
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_DPM_Disable) == 0),
+                               "Failed to disable SCLK DPM!",
+                               return -1);
+
+       /* disable MCLK dpm */
+       if (!data->mclk_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_MCLKDPM_Disable) == 0),
+                               "Failed to disable MCLK DPM!",
+                               return -1);
+       }
+
+       return 0;
+}
+
+static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+       /* disable general power management */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+                       GLOBAL_PWRMGT_EN, 0);
+       /* disable sclk deep sleep */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+                       DYNAMIC_PM_EN, 0);
+
+       /* disable PCIE dpm */
+       if (!data->pcie_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_PCIeDPM_Disable) == 0),
+                               "Failed to disable pcie DPM during DPM Stop Function!",
+                               return -1);
+       }
+
+       if (polaris10_disable_sclk_mclk_dpm(hwmgr)) {
+               printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
+               return -1;
+       }
+
+       return 0;
+}
+
 static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
 {
        bool protection;
@@ -2395,6 +2536,23 @@ static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
        return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
 }
 
+static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+               PHM_AutoThrottleSource source)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+       if (data->active_auto_throttle_sources & (1 << source)) {
+               data->active_auto_throttle_sources &= ~(1 << source);
+               polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+       }
+       return 0;
+}
+
+static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+       return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
 int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -2516,8 +2674,60 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 
 int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
+       int tmp_result, result = 0;
 
-       return 0;
+       tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
+       PP_ASSERT_WITH_CODE(tmp_result == 0,
+                       "DPM is not running right now, no need to disable DPM!",
+                       return 0);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ThermalController))
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                               GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+       tmp_result = polaris10_disable_power_containment(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable power containment!", result = tmp_result);
+
+       tmp_result = polaris10_disable_smc_cac(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable SMC CAC!", result = tmp_result);
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+       tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable thermal auto throttle!", result = tmp_result);
+
+       tmp_result = polaris10_stop_dpm(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to stop DPM!", result = tmp_result);
+
+       tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable deep sleep master switch!", result = tmp_result);
+
+       tmp_result = polaris10_disable_ulv(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable ULV!", result = tmp_result);
+
+       tmp_result = polaris10_clear_voting_clients(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to clear voting clients!", result = tmp_result);
+
+       tmp_result = polaris10_reset_to_default(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to reset to default!", result = tmp_result);
+
+       tmp_result = polaris10_force_switch_to_arbf0(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to force to switch arbf0!", result = tmp_result);
+
+       return result;
 }
 
 int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
@@ -2528,13 +2738,6 @@ int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
 
 int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 {
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (data->soft_pp_table) {
-               kfree(data->soft_pp_table);
-               data->soft_pp_table = NULL;
-       }
-
        return phm_hwmgr_backend_fini(hwmgr);
 }
 
@@ -2590,8 +2793,13 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
        phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
                                                PHM_PlatformCaps_TCPRamping);
 
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_PowerContainment);
+       if (hwmgr->powercontainment_enabled)
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                           PHM_PlatformCaps_PowerContainment);
+       else
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                           PHM_PlatformCaps_PowerContainment);
+
        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                                        PHM_PlatformCaps_CAC);
 
@@ -2606,6 +2814,7 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
 
        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                                PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
        if (hwmgr->chip_id == CHIP_POLARIS11)
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                        PHM_PlatformCaps_SPLLShutdownSupport);
@@ -2662,12 +2871,12 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
                                }
                        }
 
-
-                       PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
-                                                       VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
-                                               "Error retrieving EVV voltage value!",
-                                               continue);
-
+                       if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
+                                               VOLTAGE_TYPE_VDDC,
+                                               sclk, vv_id, &vddc) != 0) {
+                               printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
+                               continue;
+                       }
 
                        /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
                        PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
@@ -2898,13 +3107,19 @@ static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
 
 int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       struct polaris10_hwmgr *data;
        struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
        uint32_t temp_reg;
        int result;
        struct phm_ppt_v1_information *table_info =
                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 
+       data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       hwmgr->backend = data;
+
        data->dll_default_on = false;
        data->sram_end = SMC_RAM_END;
        data->mclk_dpm0_activity_target = 0xa;
@@ -2938,6 +3153,11 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
        data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
 
+       data->enable_tdc_limit_feature = true;
+       data->enable_pkg_pwr_tracking_feature = true;
+       data->force_pcie_gen = PP_PCIEGenInvalid;
+       data->mclk_stutter_mode_threshold = 40000;
+
        if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
                        VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
                data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
@@ -2962,6 +3182,10 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                        data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
        }
 
+       if (table_info->cac_dtp_table->usClockStretchAmount != 0)
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_ClockStretcher);
+
        polaris10_set_features_platform_caps(hwmgr);
 
        polaris10_init_dpm_defaults(hwmgr);
@@ -3068,7 +3292,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
                result = cgs_query_system_info(hwmgr->device, &sys_info);
                if (result)
-                       data->pcie_gen_cap = 0x30007;
+                       data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
                else
                        data->pcie_gen_cap = (uint32_t)sys_info.value;
                if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -3077,7 +3301,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
                result = cgs_query_system_info(hwmgr->device, &sys_info);
                if (result)
-                       data->pcie_lane_cap = 0x2f0000;
+                       data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
                else
                        data->pcie_lane_cap = (uint32_t)sys_info.value;
 
@@ -3520,10 +3744,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
        ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
        ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
                        (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
-       ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
-                       (ATOM_Tonga_SCLK_Dependency_Table *)
+       PPTable_Generic_SubTable_Header *sclk_dep_table =
+                       (PPTable_Generic_SubTable_Header *)
                        (((unsigned long)powerplay_table) +
                                le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+
        ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
                        (ATOM_Tonga_MCLK_Dependency_Table *)
                        (((unsigned long)powerplay_table) +
@@ -3575,7 +3800,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
        /* Performance levels are arranged from low to high. */
        performance_level->memory_clock = mclk_dep_table->entries
                        [state_entry->ucMemoryClockIndexLow].ulMclk;
-       performance_level->engine_clock = sclk_dep_table->entries
+       if (sclk_dep_table->ucRevId == 0)
+               performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+                       [state_entry->ucEngineClockIndexLow].ulSclk;
+       else if (sclk_dep_table->ucRevId == 1)
+               performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
                        [state_entry->ucEngineClockIndexLow].ulSclk;
        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
                        state_entry->ucPCIEGenLow);
@@ -3586,8 +3815,14 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
                        [polaris10_power_state->performance_level_count++]);
        performance_level->memory_clock = mclk_dep_table->entries
                        [state_entry->ucMemoryClockIndexHigh].ulMclk;
-       performance_level->engine_clock = sclk_dep_table->entries
+
+       if (sclk_dep_table->ucRevId == 0)
+               performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+                       [state_entry->ucEngineClockIndexHigh].ulSclk;
+       else if (sclk_dep_table->ucRevId == 1)
+               performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
                        [state_entry->ucEngineClockIndexHigh].ulSclk;
+
        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
                        state_entry->ucPCIEGenHigh);
        performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
@@ -3645,7 +3880,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
                switch (state->classification.ui_label) {
                case PP_StateUILabel_Performance:
                        data->use_pcie_performance_levels = true;
-
                        for (i = 0; i < ps->performance_level_count; i++) {
                                if (data->pcie_gen_performance.max <
                                                ps->performance_levels[i].pcie_gen)
@@ -3661,7 +3895,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
                                                ps->performance_levels[i].pcie_lane)
                                        data->pcie_lane_performance.max =
                                                        ps->performance_levels[i].pcie_lane;
-
                                if (data->pcie_lane_performance.min >
                                                ps->performance_levels[i].pcie_lane)
                                        data->pcie_lane_performance.min =
@@ -4187,12 +4420,9 @@ int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
        uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
 
        if (!bgate) {
-               data->smc_state_table.SamuBootLevel =
-                               (uint8_t) (table_info->mm_dep_table->count - 1);
+               data->smc_state_table.SamuBootLevel = 0;
                mm_boot_level_offset = data->dpm_table_start +
                                offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
                mm_boot_level_offset /= 4;
@@ -4721,42 +4951,6 @@ int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
        return result;
 }
 
-static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (!data->soft_pp_table) {
-               data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
-                                             hwmgr->soft_pp_table_size,
-                                             GFP_KERNEL);
-               if (!data->soft_pp_table)
-                       return -ENOMEM;
-       }
-
-       *table = (char *)&data->soft_pp_table;
-
-       return hwmgr->soft_pp_table_size;
-}
-
-static int polaris10_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (!data->soft_pp_table) {
-               data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
-               if (!data->soft_pp_table)
-                       return -ENOMEM;
-       }
-
-       memcpy(data->soft_pp_table, buf, size);
-
-       hwmgr->soft_pp_table = data->soft_pp_table;
-
-       /* TODO: re-init powerplay to implement modified pptable */
-
-       return 0;
-}
-
 static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, uint32_t mask)
 {
@@ -4899,6 +5093,89 @@ static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
                                CG_FDO_CTRL2, FDO_PWM_MODE);
 }
 
+static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+       struct polaris10_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       int value;
+
+       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+                       100 /
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return value;
+}
+
+static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       struct polaris10_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       struct pp_power_state  *ps;
+       struct polaris10_power_state  *polaris10_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+       polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock =
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+                       value / 100 +
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return 0;
+}
+
+static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+       struct polaris10_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       int value;
+
+       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+                       100 /
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return value;
+}
+
+static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       struct polaris10_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       struct pp_power_state  *ps;
+       struct polaris10_power_state  *polaris10_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+       polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock =
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+                       value / 100 +
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return 0;
+}
 static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
        .backend_init = &polaris10_hwmgr_backend_init,
        .backend_fini = &polaris10_hwmgr_backend_fini,
@@ -4937,22 +5214,17 @@ static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
        .check_states_equal = polaris10_check_states_equal,
        .set_fan_control_mode = polaris10_set_fan_control_mode,
        .get_fan_control_mode = polaris10_get_fan_control_mode,
-       .get_pp_table = polaris10_get_pp_table,
-       .set_pp_table = polaris10_set_pp_table,
        .force_clock_level = polaris10_force_clock_level,
        .print_clock_levels = polaris10_print_clock_levels,
        .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
+       .get_sclk_od = polaris10_get_sclk_od,
+       .set_sclk_od = polaris10_set_sclk_od,
+       .get_mclk_od = polaris10_get_mclk_od,
+       .set_mclk_od = polaris10_set_mclk_od,
 };
 
 int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
-       struct polaris10_hwmgr  *data;
-
-       data = kzalloc (sizeof(struct polaris10_hwmgr), GFP_KERNEL);
-       if (data == NULL)
-               return -ENOMEM;
-
-       hwmgr->backend = data;
        hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
        hwmgr->pptable_func = &tonga_pptable_funcs;
        pp_polaris10_thermal_initialize(hwmgr);
index beedf35cbfa67974eba8758721b9a4fb227c9a69..fd38b0d7a3c2b27fb02b515e44434ba41dd7eadc 100644 (file)
@@ -309,9 +309,8 @@ struct polaris10_hwmgr {
        uint32_t                           up_hyst;
        uint32_t disable_dpm_mask;
        bool apply_optimized_settings;
-
-       /* soft pptable for re-uploading into smu */
-       void *soft_pp_table;
+       uint32_t                              avfs_vdroop_override_setting;
+       bool                                  apply_avfs_cks_off_voltage;
 };
 
 /* To convert to Q8.8 format for firmware */
index 0b99ab3ba0c5063ca8d3cd70c91e3bab0c3f5ab1..5620e268b55307ecabd1e0bd9287ea37148a3f1b 100644 (file)
@@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
 
                if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
                                (uint8_t *)&data->power_tune_table,
-                               sizeof(struct SMU74_Discrete_PmFuses), data->sram_end))
+                               (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
                        PP_ASSERT_WITH_CODE(false,
                                        "Attempt to download PmFuseTable Failed!",
                                        return -EINVAL);
@@ -312,6 +312,23 @@ int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       int result = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_CAC) && data->cac_enabled) {
+               int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                               (uint16_t)(PPSMC_MSG_DisableCac));
+               PP_ASSERT_WITH_CODE((smc_result == 0),
+                               "Failed to disable CAC in SMC.", result = -1);
+
+               data->cac_enabled = false;
+       }
+       return result;
+}
+
 int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -373,6 +390,48 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       int result = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment) &&
+                       data->power_containment_features) {
+               int smc_result;
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_TDCLimit) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable TDCLimit in SMC.",
+                                       result = smc_result);
+               }
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_DTE) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_DisableDTE));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable DTE in SMC.",
+                                       result = smc_result);
+               }
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable PkgPwrTracking in SMC.",
+                                       result = smc_result);
+               }
+               data->power_containment_features = 0;
+       }
+
+       return result;
+}
+
 int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
 {
        struct phm_ppt_v1_information *table_info =
index 68bc1cb6d40ca0446340d74e28067a073a8a3c6d..d492d6d288679e6545408864b390baadd71329cc 100644 (file)
@@ -31,6 +31,19 @@ enum polaris10_pt_config_reg_type {
        POLARIS10_CONFIGREG_MAX
 };
 
+#define DIDT_SQ_CTRL0__UNUSED_0_MASK    0xfffc0000
+#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT  0x12
+#define DIDT_TD_CTRL0__UNUSED_0_MASK    0xfffc0000
+#define DIDT_TD_CTRL0__UNUSED_0__SHIFT  0x12
+#define DIDT_TCP_CTRL0__UNUSED_0_MASK   0xfffc0000
+#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK                 0xc0000000
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001e
+#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK                 0xc0000000
+#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001e
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK                0xc0000000
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT              0x0000001e
+
 /* PowerContainment Features */
 #define POWERCONTAINMENT_FEATURE_DTE             0x00000001
 #define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
@@ -62,7 +75,9 @@ void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
 int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
 int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
 int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr);
 int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
+int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr);
 int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
 int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
 
index aba167f7d16797a6c47cb23e79215bcc4fce258b..b206632d4650f7ca69d72fad9d1eceaadf5043d3 100644 (file)
@@ -625,10 +625,14 @@ static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
        int ret;
        struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
        struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
 
-       if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+       if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
                return 0;
 
+       ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                       PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
        ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
                        0 : -1;
 
index 58742e0d14924bb3b0e994c03f8b98f898769081..a3c38bbd1e94d5596ba9190772d40b1fcb5c28db 100644 (file)
@@ -44,6 +44,20 @@ bool acpi_atcs_functions_supported(void *device, uint32_t index)
        return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
 }
 
+bool acpi_atcs_notify_pcie_device_ready(void *device)
+{
+       int32_t temp_buffer = 1;
+
+       return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
+                               ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION,
+                                               &temp_buffer,
+                                               NULL,
+                                               0,
+                                               sizeof(temp_buffer),
+                                               0);
+}
+
+
 int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
 {
        struct atcs_pref_req_input atcs_input;
@@ -52,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
        int result;
        struct cgs_system_info info = {0};
 
-       if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST))
+       if( 0 != acpi_atcs_notify_pcie_device_ready(device))
                return -EINVAL;
 
        info.size = sizeof(struct cgs_system_info);
@@ -77,7 +91,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
                                                ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
                                                &atcs_input,
                                                &atcs_output,
-                                               0,
+                                               1,
                                                sizeof(atcs_input),
                                                sizeof(atcs_output));
                if (result != 0)
index da9f5f1b6dc2ccd00cd7f2508c316b189ccec657..5d70e2c47faf9731c35c626392620331b233d5d2 100644 (file)
@@ -552,13 +552,13 @@ static bool atomctrl_lookup_gpio_pin(
                                pin_assignment->ucGpioPinBitShift;
                        gpio_pin_assignment->us_gpio_pin_aindex =
                                le16_to_cpu(pin_assignment->usGpioPin_AIndex);
-                       return false;
+                       return true;
                }
 
                offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
        }
 
-       return true;
+       return false;
 }
 
 /**
@@ -1302,3 +1302,46 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
 
        return 0;
 }
+
+int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param)
+{
+       ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
+
+       if (param == NULL)
+               return -EINVAL;
+
+       profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
+                       cgs_atom_get_data_table(hwmgr->device,
+                                       GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
+                                       NULL, NULL, NULL);
+       if (!profile)
+               return -1;
+
+       param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0;
+       param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1;
+       param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2;
+       param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma;
+       param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean;
+       param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma;
+       param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0;
+       param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1;
+       param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2;
+       param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0;
+       param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1;
+       param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2;
+       param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+       param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+       param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+       param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1;
+       param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2;
+       param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b;
+       param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv;
+       param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
+       param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
+       param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
+       param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
+       param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor;
+       param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
+
+       return 0;
+}
index d24ebb566905295587b16211cbef4dc1eca0c5fa..248c5db5f38025559d345d311f1cfe3d7e1eb1ad 100644 (file)
@@ -250,6 +250,35 @@ struct pp_atomctrl_gpio_pin_assignment {
 };
 typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment;
 
+struct pp_atom_ctrl__avfs_parameters {
+       uint32_t  ulAVFS_meanNsigma_Acontant0;
+       uint32_t  ulAVFS_meanNsigma_Acontant1;
+       uint32_t  ulAVFS_meanNsigma_Acontant2;
+       uint16_t usAVFS_meanNsigma_DC_tol_sigma;
+       uint16_t usAVFS_meanNsigma_Platform_mean;
+       uint16_t usAVFS_meanNsigma_Platform_sigma;
+       uint32_t  ulGB_VDROOP_TABLE_CKSOFF_a0;
+       uint32_t  ulGB_VDROOP_TABLE_CKSOFF_a1;
+       uint32_t  ulGB_VDROOP_TABLE_CKSOFF_a2;
+       uint32_t  ulGB_VDROOP_TABLE_CKSON_a0;
+       uint32_t  ulGB_VDROOP_TABLE_CKSON_a1;
+       uint32_t  ulGB_VDROOP_TABLE_CKSON_a2;
+       uint32_t  ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+       uint16_t  usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+       uint32_t  ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+       uint32_t  ulAVFSGB_FUSE_TABLE_CKSON_m1;
+       uint16_t  usAVFSGB_FUSE_TABLE_CKSON_m2;
+       uint32_t  ulAVFSGB_FUSE_TABLE_CKSON_b;
+       uint16_t  usMaxVoltage_0_25mv;
+       uint8_t  ucEnableGB_VDROOP_TABLE_CKSOFF;
+       uint8_t  ucEnableGB_VDROOP_TABLE_CKSON;
+       uint8_t  ucEnableGB_FUSE_TABLE_CKSOFF;
+       uint8_t  ucEnableGB_FUSE_TABLE_CKSON;
+       uint16_t usPSM_Age_ComFactor;
+       uint8_t  ucEnableApplyAVFS_CKS_OFF_Voltage;
+       uint8_t  ucReserved;
+};
+
 extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
 extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
 extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
@@ -278,5 +307,8 @@ extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clo
 extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
                                uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
 extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
+
+extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
+
 #endif
 
index 2f1a14fe05b1d9f3ebe6fd950f45888f49b95ec0..35bc8a29b773c6378f10cf67f3f43f3998c4d604 100644 (file)
@@ -794,15 +794,18 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2(
 static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
                                     struct pp_hwmgr *hwmgr)
 {
-       const void *table_addr = NULL;
+       const void *table_addr = hwmgr->soft_pp_table;
        uint8_t frev, crev;
        uint16_t size;
 
-       table_addr = cgs_atom_get_data_table(hwmgr->device,
-                       GetIndexIntoMasterTable(DATA, PowerPlayInfo),
-                       &size, &frev, &crev);
+       if (!table_addr) {
+               table_addr = cgs_atom_get_data_table(hwmgr->device,
+                               GetIndexIntoMasterTable(DATA, PowerPlayInfo),
+                               &size, &frev, &crev);
 
-       hwmgr->soft_pp_table = table_addr;
+               hwmgr->soft_pp_table = table_addr;
+               hwmgr->soft_pp_table_size = size;
+       }
 
        return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr;
 }
@@ -1499,7 +1502,7 @@ int get_number_of_vce_state_table_entries(
        const ATOM_PPLIB_VCE_State_Table *vce_table =
                                    get_vce_state_table(hwmgr, table);
 
-       if (vce_table > 0)
+       if (vce_table)
                return vce_table->numEntries;
 
        return 0;
@@ -1589,11 +1592,6 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr)
 
 static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
 {
-       if (NULL != hwmgr->soft_pp_table) {
-               kfree(hwmgr->soft_pp_table);
-               hwmgr->soft_pp_table = NULL;
-       }
-
        if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) {
                kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
                hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
index 16fed487973b9bd28396d109ba079086b127379c..6c4553cf80230f381aeceedfd29774f80205170f 100644 (file)
@@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
                }
        }
 
-       /* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
-       for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
-               data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
-               /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
-               /* param1 is for corresponding std voltage */
-               data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
-       }
-       data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
-
-       if (NULL != allowed_vdd_mclk_table) {
-               /* Initialize Vddci DPM table based on allow Mclk values */
-               for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
-                       data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
-                       data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
-                       data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
-                       data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
-               }
-               data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
-               data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
-       }
-
        /* setup PCIE gen speed levels*/
        tonga_setup_default_pcie_tables(hwmgr);
 
@@ -3047,8 +3026,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
 
        reg_value = 0;
        if ((0 == reg_value) &&
-               (0 == atomctrl_get_pp_assign_pin(hwmgr,
-                       VDDC_VRHOT_GPIO_PINID, &gpio_pin_assignment))) {
+               (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+                                               &gpio_pin_assignment))) {
                table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_RegulatorHot);
@@ -3061,8 +3040,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
        /* ACDC Switch GPIO */
        reg_value = 0;
        if ((0 == reg_value) &&
-               (0 == atomctrl_get_pp_assign_pin(hwmgr,
-                       PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin_assignment))) {
+               (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+                                               &gpio_pin_assignment))) {
                table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_AutomaticDCTransition);
@@ -3084,8 +3063,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
        }
 
        reg_value = 0;
-       if ((0 == reg_value) &&
-               (0 == atomctrl_get_pp_assign_pin(hwmgr,
+       if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr,
                        THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_ThermalOutGPIO);
@@ -4443,13 +4421,6 @@ int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
 
 int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 {
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       if (data->soft_pp_table) {
-               kfree(data->soft_pp_table);
-               data->soft_pp_table = NULL;
-       }
-
        return phm_hwmgr_backend_fini(hwmgr);
 }
 
@@ -4463,7 +4434,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
        int result = 0;
        SMU72_Discrete_DpmTable  *table = NULL;
-       tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+       tonga_hwmgr *data;
        pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
        struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
        phw_tonga_ulv_parm *ulv;
@@ -4472,6 +4443,12 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE((NULL != hwmgr),
                "Invalid Parameter!", return -1;);
 
+       data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       hwmgr->backend = data;
+
        data->dll_defaule_on = 0;
        data->sram_end = SMC_RAM_END;
 
@@ -4510,6 +4487,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
        data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
        data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
+       data->force_pcie_gen = PP_PCIEGenInvalid;
 
        if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
                                VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
@@ -4591,7 +4569,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
        * Peak Current Control feature is enabled and we should program PCC HW register
        */
-       if (0 == atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
+       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
                uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
                                                                                CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
 
@@ -4659,7 +4637,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
                result = cgs_query_system_info(hwmgr->device, &sys_info);
                if (result)
-                       data->pcie_gen_cap = 0x30007;
+                       data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
                else
                        data->pcie_gen_cap = (uint32_t)sys_info.value;
                if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -4668,7 +4646,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
                result = cgs_query_system_info(hwmgr->device, &sys_info);
                if (result)
-                       data->pcie_lane_cap = 0x2f0000;
+                       data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
                else
                        data->pcie_lane_cap = (uint32_t)sys_info.value;
        } else {
@@ -6051,42 +6029,6 @@ static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
                                CG_FDO_CTRL2, FDO_PWM_MODE);
 }
 
-static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       if (!data->soft_pp_table) {
-               data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
-                                             hwmgr->soft_pp_table_size,
-                                             GFP_KERNEL);
-               if (!data->soft_pp_table)
-                       return -ENOMEM;
-       }
-
-       *table = (char *)&data->soft_pp_table;
-
-       return hwmgr->soft_pp_table_size;
-}
-
-static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       if (!data->soft_pp_table) {
-               data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
-               if (!data->soft_pp_table)
-                       return -ENOMEM;
-       }
-
-       memcpy(data->soft_pp_table, buf, size);
-
-       hwmgr->soft_pp_table = data->soft_pp_table;
-
-       /* TODO: re-init powerplay to implement modified pptable */
-
-       return 0;
-}
-
 static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, uint32_t mask)
 {
@@ -6194,11 +6136,96 @@ static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
        return size;
 }
 
+static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+       struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+       struct tonga_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       int value;
+
+       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+                       100 /
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return value;
+}
+
+static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+       struct tonga_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       struct pp_power_state  *ps;
+       struct tonga_power_state  *tonga_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
+
+       tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock =
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+                       value / 100 +
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return 0;
+}
+
+static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+       struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+       struct tonga_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       int value;
+
+       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+                       100 /
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return value;
+}
+
+static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+       struct tonga_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       struct pp_power_state  *ps;
+       struct tonga_power_state  *tonga_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
+
+       tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock =
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+                       value / 100 +
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return 0;
+}
+
 static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
        .backend_init = &tonga_hwmgr_backend_init,
        .backend_fini = &tonga_hwmgr_backend_fini,
        .asic_setup = &tonga_setup_asic_task,
        .dynamic_state_management_enable = &tonga_enable_dpm_tasks,
+       .dynamic_state_management_disable = &tonga_disable_dpm_tasks,
        .apply_state_adjust_rules = tonga_apply_state_adjust_rules,
        .force_dpm_level = &tonga_force_dpm_level,
        .power_state_set = tonga_set_power_state_tasks,
@@ -6232,22 +6259,16 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
        .check_states_equal = tonga_check_states_equal,
        .set_fan_control_mode = tonga_set_fan_control_mode,
        .get_fan_control_mode = tonga_get_fan_control_mode,
-       .get_pp_table = tonga_get_pp_table,
-       .set_pp_table = tonga_set_pp_table,
        .force_clock_level = tonga_force_clock_level,
        .print_clock_levels = tonga_print_clock_levels,
+       .get_sclk_od = tonga_get_sclk_od,
+       .set_sclk_od = tonga_set_sclk_od,
+       .get_mclk_od = tonga_get_mclk_od,
+       .set_mclk_od = tonga_set_mclk_od,
 };
 
 int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
-       tonga_hwmgr  *data;
-
-       data = kzalloc (sizeof(tonga_hwmgr), GFP_KERNEL);
-       if (data == NULL)
-               return -ENOMEM;
-       memset(data, 0x00, sizeof(tonga_hwmgr));
-
-       hwmgr->backend = data;
        hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
        hwmgr->pptable_func = &tonga_pptable_funcs;
        pp_tonga_thermal_initialize(hwmgr);
index 573cd39fe78d34dd09f0a4b34c074dd160d861bd..3961884bfa9bf688d79e95b2e7d53eba6bea92c5 100644 (file)
@@ -352,9 +352,6 @@ struct tonga_hwmgr {
        bool                           samu_power_gated; /* 1: gated, 0:not gated */
        bool                           acp_power_gated;  /* 1: gated, 0:not gated */
        bool                           pg_acp_init;
-
-       /* soft pptable for re-uploading into smu */
-       void *soft_pp_table;
 };
 
 typedef struct tonga_hwmgr tonga_hwmgr;
index 1b44f4e9b8f55c02fe5faf71ba0687465840fe9d..f127198aafc47a12512ca72bf0a19a2ead685402 100644 (file)
@@ -197,6 +197,22 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
        ATOM_Tonga_SCLK_Dependency_Record entries[1];                            /* Dynamically allocate entries. */
 } ATOM_Tonga_SCLK_Dependency_Table;
 
+typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+       UCHAR  ucVddInd;                                                                                        /* Base voltage */
+       USHORT usVddcOffset;                                                                            /* Offset relative to base voltage */
+       ULONG ulSclk;
+       USHORT usEdcCurrent;
+       UCHAR  ucReliabilityTemperature;
+       UCHAR  ucCKSVOffsetandDisable;                  /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
+       ULONG  ulSclkOffset;
+} ATOM_Polaris_SCLK_Dependency_Record;
+
+typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                     /* Number of entries. */
+       ATOM_Polaris_SCLK_Dependency_Record entries[1];                          /* Dynamically allocate entries. */
+} ATOM_Polaris_SCLK_Dependency_Table;
+
 typedef struct _ATOM_Tonga_PCIE_Record {
        UCHAR ucPCIEGenSpeed;
        UCHAR usPCIELaneWidth;
index 10e3630ee39d8d86c4897754274d70b451203328..94d6b472e1fe40bb0ca5718704c03cbfec93458d 100644 (file)
@@ -408,41 +408,78 @@ static int get_mclk_voltage_dependency_table(
 static int get_sclk_voltage_dependency_table(
                struct pp_hwmgr *hwmgr,
                phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
-               const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table
+               const PPTable_Generic_SubTable_Header *sclk_dep_table
                )
 {
        uint32_t table_size, i;
        phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
 
-       PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries),
-               "Invalid PowerPlay Table!", return -1);
+       if (sclk_dep_table->ucRevId < 1) {
+               const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
+                           (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
 
-       table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
-               * sclk_dep_table->ucNumEntries;
+               PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
+                       "Invalid PowerPlay Table!", return -1);
 
-       sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
-               kzalloc(table_size, GFP_KERNEL);
+               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+                       * tonga_table->ucNumEntries;
 
-       if (NULL == sclk_table)
-               return -ENOMEM;
+               sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
+                       kzalloc(table_size, GFP_KERNEL);
 
-       memset(sclk_table, 0x00, table_size);
-
-       sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries;
-
-       for (i = 0; i < sclk_dep_table->ucNumEntries; i++) {
-               sclk_table->entries[i].vddInd =
-                       sclk_dep_table->entries[i].ucVddInd;
-               sclk_table->entries[i].vdd_offset =
-                       sclk_dep_table->entries[i].usVddcOffset;
-               sclk_table->entries[i].clk =
-                       sclk_dep_table->entries[i].ulSclk;
-               sclk_table->entries[i].cks_enable =
-                       (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
-               sclk_table->entries[i].cks_voffset =
-                       (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
-       }
+               if (NULL == sclk_table)
+                       return -ENOMEM;
 
+               memset(sclk_table, 0x00, table_size);
+
+               sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
+
+               for (i = 0; i < tonga_table->ucNumEntries; i++) {
+                       sclk_table->entries[i].vddInd =
+                               tonga_table->entries[i].ucVddInd;
+                       sclk_table->entries[i].vdd_offset =
+                               tonga_table->entries[i].usVddcOffset;
+                       sclk_table->entries[i].clk =
+                               tonga_table->entries[i].ulSclk;
+                       sclk_table->entries[i].cks_enable =
+                               (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+                       sclk_table->entries[i].cks_voffset =
+                               (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+               }
+       } else {
+               const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
+                           (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
+
+               PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
+                       "Invalid PowerPlay Table!", return -1);
+
+               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+                       * polaris_table->ucNumEntries;
+
+               sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
+                       kzalloc(table_size, GFP_KERNEL);
+
+               if (NULL == sclk_table)
+                       return -ENOMEM;
+
+               memset(sclk_table, 0x00, table_size);
+
+               sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
+
+               for (i = 0; i < polaris_table->ucNumEntries; i++) {
+                       sclk_table->entries[i].vddInd =
+                               polaris_table->entries[i].ucVddInd;
+                       sclk_table->entries[i].vdd_offset =
+                               polaris_table->entries[i].usVddcOffset;
+                       sclk_table->entries[i].clk =
+                               polaris_table->entries[i].ulSclk;
+                       sclk_table->entries[i].cks_enable =
+                               (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+                       sclk_table->entries[i].cks_voffset =
+                               (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+                       sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
+               }
+       }
        *pp_tonga_sclk_dep_table = sclk_table;
 
        return 0;
@@ -708,8 +745,8 @@ static int init_clock_voltage_dependency(
        const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
                (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
                le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-       const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
-               (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
+       const PPTable_Generic_SubTable_Header *sclk_dep_table =
+               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
                le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
        const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
                (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
@@ -1040,48 +1077,41 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
        struct phm_ppt_v1_information *pp_table_information =
                (struct phm_ppt_v1_information *)(hwmgr->pptable);
 
-       if (NULL != hwmgr->soft_pp_table) {
-               kfree(hwmgr->soft_pp_table);
-               hwmgr->soft_pp_table = NULL;
-       }
-
-       if (NULL != pp_table_information->vdd_dep_on_sclk)
-               pp_table_information->vdd_dep_on_sclk = NULL;
+       kfree(pp_table_information->vdd_dep_on_sclk);
+       pp_table_information->vdd_dep_on_sclk = NULL;
 
-       if (NULL != pp_table_information->vdd_dep_on_mclk)
-               pp_table_information->vdd_dep_on_mclk = NULL;
+       kfree(pp_table_information->vdd_dep_on_mclk);
+       pp_table_information->vdd_dep_on_mclk = NULL;
 
-       if (NULL != pp_table_information->valid_mclk_values)
-               pp_table_information->valid_mclk_values = NULL;
+       kfree(pp_table_information->valid_mclk_values);
+       pp_table_information->valid_mclk_values = NULL;
 
-       if (NULL != pp_table_information->valid_sclk_values)
-               pp_table_information->valid_sclk_values = NULL;
+       kfree(pp_table_information->valid_sclk_values);
+       pp_table_information->valid_sclk_values = NULL;
 
-       if (NULL != pp_table_information->vddc_lookup_table)
-               pp_table_information->vddc_lookup_table = NULL;
+       kfree(pp_table_information->vddc_lookup_table);
+       pp_table_information->vddc_lookup_table = NULL;
 
-       if (NULL != pp_table_information->vddgfx_lookup_table)
-               pp_table_information->vddgfx_lookup_table = NULL;
+       kfree(pp_table_information->vddgfx_lookup_table);
+       pp_table_information->vddgfx_lookup_table = NULL;
 
-       if (NULL != pp_table_information->mm_dep_table)
-               pp_table_information->mm_dep_table = NULL;
+       kfree(pp_table_information->mm_dep_table);
+       pp_table_information->mm_dep_table = NULL;
 
-       if (NULL != pp_table_information->cac_dtp_table)
-               pp_table_information->cac_dtp_table = NULL;
+       kfree(pp_table_information->cac_dtp_table);
+       pp_table_information->cac_dtp_table = NULL;
 
-       if (NULL != hwmgr->dyn_state.cac_dtp_table)
-               hwmgr->dyn_state.cac_dtp_table = NULL;
+       kfree(hwmgr->dyn_state.cac_dtp_table);
+       hwmgr->dyn_state.cac_dtp_table = NULL;
 
-       if (NULL != pp_table_information->ppm_parameter_table)
-               pp_table_information->ppm_parameter_table = NULL;
+       kfree(pp_table_information->ppm_parameter_table);
+       pp_table_information->ppm_parameter_table = NULL;
 
-       if (NULL != pp_table_information->pcie_table)
-               pp_table_information->pcie_table = NULL;
+       kfree(pp_table_information->pcie_table);
+       pp_table_information->pcie_table = NULL;
 
-       if (NULL != hwmgr->pptable) {
-               kfree(hwmgr->pptable);
-               hwmgr->pptable = NULL;
-       }
+       kfree(hwmgr->pptable);
+       hwmgr->pptable = NULL;
 
        return result;
 }
index 50b367d44307c8992b8a17ef78b05c3ede5c09f5..b764c8c05ec8127d64cc7b57ea233eb8cf581c3e 100644 (file)
@@ -132,6 +132,7 @@ struct amd_pp_init {
        uint32_t chip_family;
        uint32_t chip_id;
        uint32_t rev_id;
+       bool powercontainment_enabled;
 };
 enum amd_pp_display_config_type{
        AMD_PP_DisplayConfigType_None = 0,
@@ -342,6 +343,10 @@ struct amd_powerplay_funcs {
        int (*set_pp_table)(void *handle, const char *buf, size_t size);
        int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
        int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
+       int (*get_sclk_od)(void *handle);
+       int (*set_sclk_od)(void *handle, uint32_t value);
+       int (*get_mclk_od)(void *handle);
+       int (*set_mclk_od)(void *handle, uint32_t value);
 };
 
 struct amd_powerplay {
@@ -355,6 +360,8 @@ int amd_powerplay_init(struct amd_pp_init *pp_init,
 
 int amd_powerplay_fini(void *handle);
 
+int amd_powerplay_reset(void *handle);
+
 int amd_powerplay_display_configuration_change(void *handle,
                const struct amd_pp_display_configuration *input);
 
index 56f712c7d07aff6051b1c473ca6936b1c1f512a2..962cb538595117421c0b08d5a3fa4b94ceb1f2b1 100644 (file)
@@ -340,6 +340,7 @@ extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
 extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
 extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
 extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
+extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
 extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
 extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
 extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
index 28f571449495d6e639c6135d4394d8eedaea7587..0bbc42a224e5fea004933abd511b875bdf79c6db 100644 (file)
@@ -278,6 +278,8 @@ struct pp_hwmgr_func {
 
        int (*dynamic_state_management_enable)(
                                                struct pp_hwmgr *hw_mgr);
+       int (*dynamic_state_management_disable)(
+                                               struct pp_hwmgr *hw_mgr);
 
        int (*patch_boot_state)(struct pp_hwmgr *hwmgr,
                                     struct pp_hw_power_state *hw_ps);
@@ -333,11 +335,13 @@ struct pp_hwmgr_func {
        int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks);
        int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
        int (*power_off_asic)(struct pp_hwmgr *hwmgr);
-       int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table);
-       int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size);
        int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
        int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
        int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
+       int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
+       int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
+       int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
+       int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
 };
 
 struct pp_table_func {
@@ -578,6 +582,7 @@ struct pp_hwmgr {
        struct pp_smumgr *smumgr;
        const void *soft_pp_table;
        uint32_t soft_pp_table_size;
+       void *hardcode_pp_table;
        bool need_pp_table_upload;
        enum amd_dpm_forced_level dpm_level;
        bool block_hw_access;
@@ -607,6 +612,7 @@ struct pp_hwmgr {
        uint32_t num_ps;
        struct pp_thermal_controller_info thermal_controller;
        bool fan_ctrl_is_in_default_mode;
+       bool powercontainment_enabled;
        uint32_t fan_ctrl_default_mode;
        uint32_t tmin;
        struct phm_microcode_version_info microcode_version_info;
index 0c6a413eaa5be9020333e68ec96a35d49a201e6e..d41d37ab5b7c59f586f32c3133240230de93d597 100644 (file)
@@ -27,6 +27,7 @@
 
 #pragma pack(push, 1)
 
+#define PPSMC_MSG_SetGBDroopSettings          ((uint16_t) 0x305)
 
 #define PPSMC_SWSTATE_FLAG_DC                           0x01
 #define PPSMC_SWSTATE_FLAG_UVD                          0x02
index 3bd5e69b90456641c8b318fa89fc74b6d99ab89f..3df5de2cdab0254becae8ab384e4f8feb2cd2c8d 100644 (file)
@@ -26,3 +26,4 @@ extern bool acpi_atcs_functions_supported(void *device,
 extern int acpi_pcie_perf_request(void *device,
                                                uint8_t perf_req,
                                                bool advertise);
+extern bool acpi_atcs_notify_pcie_device_ready(void *device);
index 1a12d85b8e97c05a186f2b99ef889b841098e5ab..fd10a9fa843d03bce96b63f4952f0d41ed84eba7 100644 (file)
 #define SMU__NUM_LCLK_DPM_LEVELS 8
 #define SMU__NUM_PCIE_DPM_LEVELS 8
 
+#define EXP_M1  35
+#define EXP_M2  92821
+#define EXP_B   66629747
+
+#define EXP_M1_1  365
+#define EXP_M2_1  658700
+#define EXP_B_1   305506134
+
+#define EXP_M1_2  189
+#define EXP_M2_2  379692
+#define EXP_B_2   194609469
+
+#define EXP_M1_3  99
+#define EXP_M2_3  217915
+#define EXP_B_3   122255994
+
+#define EXP_M1_4  51
+#define EXP_M2_4  122643
+#define EXP_B_4   74893384
+
+#define EXP_M1_5  423
+#define EXP_M2_5  1103326
+#define EXP_B_5   728122621
+
 enum SID_OPTION {
        SID_OPTION_HI,
        SID_OPTION_LO,
@@ -548,20 +572,20 @@ struct SMU74_Firmware_Header {
        uint32_t CacConfigTable;
        uint32_t CacStatusTable;
 
-
        uint32_t mcRegisterTable;
 
-
        uint32_t mcArbDramTimingTable;
 
-
-
-
        uint32_t PmFuseTable;
        uint32_t Globals;
        uint32_t ClockStretcherTable;
        uint32_t VftTable;
-       uint32_t Reserved[21];
+       uint32_t Reserved1;
+       uint32_t AvfsTable;
+       uint32_t AvfsCksOffGbvTable;
+       uint32_t AvfsMeanNSigma;
+       uint32_t AvfsSclkOffsetTable;
+       uint32_t Reserved[16];
        uint32_t Signature;
 };
 
@@ -701,8 +725,6 @@ VR Config info is contained in dpmTable.VRConfig */
 struct SMU_ClockStretcherDataTableEntry {
        uint8_t minVID;
        uint8_t maxVID;
-
-
        uint16_t setting;
 };
 typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
@@ -769,6 +791,43 @@ struct VFT_TABLE_t {
 typedef struct VFT_TABLE_t VFT_TABLE_t;
 
 
+/* Total margin, root mean square of Fmax + DC + Platform */
+struct AVFS_Margin_t {
+       VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_Margin_t AVFS_Margin_t;
+
+#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2
+#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2
+
+struct GB_VDROOP_TABLE_t {
+       int32_t a0;
+       int32_t a1;
+       int32_t a2;
+       uint32_t spare;
+};
+typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t;
+
+struct AVFS_CksOff_Gbv_t {
+       VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t;
+
+struct AVFS_meanNsigma_t {
+       uint32_t Aconstant[3];
+       uint16_t DC_tol_sigma;
+       uint16_t Platform_mean;
+       uint16_t Platform_sigma;
+       uint16_t PSM_Age_CompFactor;
+       uint8_t  Static_Voltage_Offset[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t;
+
+struct AVFS_Sclk_Offset_t {
+       uint16_t Sclk_Offset[8];
+};
+typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t;
+
 #endif
 
 
index 0dfe82336dc73f439a8d2dcca5043cf6a3661727..b85ff5400e57eb2ca78a2e1500bfbf8369f57cf6 100644 (file)
@@ -223,6 +223,16 @@ struct SMU74_Discrete_StateInfo {
 
 typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo;
 
+struct SMU_QuadraticCoeffs {
+       int32_t m1;
+       uint32_t b;
+
+       int16_t m2;
+       uint8_t m1_shift;
+       uint8_t m2_shift;
+};
+typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
+
 struct SMU74_Discrete_DpmTable {
 
        SMU74_PIDController                  GraphicsPIDController;
@@ -258,7 +268,14 @@ struct SMU74_Discrete_DpmTable {
        uint8_t                             ThermOutPolarity;
        uint8_t                             ThermOutMode;
        uint8_t                             BootPhases;
-       uint32_t                            Reserved[4];
+
+       uint8_t                             VRHotLevel;
+       uint8_t                             Reserved1[3];
+       uint16_t                            FanStartTemperature;
+       uint16_t                            FanStopTemperature;
+       uint16_t                            MaxVoltage;
+       uint16_t                            Reserved2;
+       uint32_t                            Reserved[1];
 
        SMU74_Discrete_GraphicsLevel        GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS];
        SMU74_Discrete_MemoryLevel          MemoryACPILevel;
@@ -347,6 +364,8 @@ struct SMU74_Discrete_DpmTable {
 
        uint32_t                            CurrSclkPllRange;
        sclkFcwRange_t                      SclkFcwRangeTable[NUM_SCLK_RANGE];
+       GB_VDROOP_TABLE_t                   BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES];
+       SMU_QuadraticCoeffs                 AVFSGB_VDROOP_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES];
 };
 
 typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable;
@@ -550,16 +569,6 @@ struct SMU7_AcpiScoreboard {
 
 typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
 
-struct SMU_QuadraticCoeffs {
-       int32_t m1;
-       uint32_t b;
-
-       int16_t m2;
-       uint8_t m1_shift;
-       uint8_t m2_shift;
-};
-typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
-
 struct SMU74_Discrete_PmFuses {
        uint8_t BapmVddCVidHiSidd[8];
        uint8_t BapmVddCVidLoSidd[8];
@@ -821,6 +830,17 @@ typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard;
 #define DB_PCC_SHIFT 26 
 #define DB_EDC_SHIFT 27
 
+#define BTCGB0_Vdroop_Enable_MASK  0x1
+#define BTCGB1_Vdroop_Enable_MASK  0x2
+#define AVFSGB0_Vdroop_Enable_MASK 0x4
+#define AVFSGB1_Vdroop_Enable_MASK 0x8
+
+#define BTCGB0_Vdroop_Enable_SHIFT  0
+#define BTCGB1_Vdroop_Enable_SHIFT  1
+#define AVFSGB0_Vdroop_Enable_SHIFT 2
+#define AVFSGB1_Vdroop_Enable_SHIFT 3
+
+
 #pragma pack(pop)
 
 
index 673a75c74e18a33c2a42f0ae92bca8e40ee30603..8e52a2e82db58c4de3ffc7ff3903bbabec037f4a 100644 (file)
@@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
 
 static int fiji_smu_fini(struct pp_smumgr *smumgr)
 {
+       struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+
+       smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
        if (smumgr->backend) {
                kfree(smumgr->backend);
                smumgr->backend = NULL;
        }
+
+       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
        return 0;
 }
 
index de618ead9db8df73e10ee0da9dac0cad940049b3..5dba7c50971020fe1ff3a052970fd14cb8860b09 100644 (file)
 static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
        /*  Min      pcie   DeepSleep Activity  CgSpll      CgSpll    CcPwr  CcPwr  Sclk         Enabled      Enabled                       Voltage    Power */
        /* Voltage, DpmLevel, DivId,  Level,  FuncCntl3,  FuncCntl4,  DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
-       { 0x3c0fd047, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x30750000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0xa00fd047, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x409c0000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0x0410d047, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0, 0, 0x0e, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x50c30000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0x6810d047, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x60ea0000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0xcc10d047, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xe8fd0000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0x3011d047, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x70110100, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0x9411d047, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xf8240100, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0xf811d047, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x80380100, 0, 0, 0, 0, 0, 0, 0 } }
+       { 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
+       { 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
+       { 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } },
+       { 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
+       { 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } },
+       { 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
+       { 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } },
+       { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
 };
 
 static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
-       {0x50140000, 0x50140000, 0x00320000, 0x00, 0x00,
-        0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0000, 0x00, 0x00};
+       {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
 
 /**
 * Set the address for reading/writing the SMC SRAM space.
@@ -219,6 +218,18 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
        && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
 }
 
+static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+{
+       uint32_t efuse;
+
+       efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+       efuse &= 0x00000001;
+       if (efuse)
+               return true;
+
+       return false;
+}
+
 /**
 * Send a message to the SMC, and wait for its response.
 *
@@ -228,21 +239,27 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
 */
 int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
 {
+       int ret;
+
        if (!polaris10_is_smc_ram_running(smumgr))
                return -1;
 
+
        SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
 
-       if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
-               printk("Failed to send Previous Message.\n");
+       ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
 
+       if (ret != 1)
+               printk("\n failed to send pre message %x ret is %d \n",  msg, ret);
 
        cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
 
        SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
 
-       if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
-               printk("Failed to send Message.\n");
+       ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+       if (ret != 1)
+               printk("\n failed to send message %x ret is %d \n",  msg, ret);
 
        return 0;
 }
@@ -469,6 +486,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr)
                kfree(smumgr->backend);
                smumgr->backend = NULL;
        }
+       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
        return 0;
 }
 
@@ -952,6 +970,11 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
                (cgs_handle_t)smu_data->smu_buffer.handle);
                return -1;);
 
+       if (polaris10_is_hw_avfs_present(smumgr))
+               smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
+       else
+               smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+
        return 0;
 }
 
index c483baf6b4fbc9bb8b76d293c772f727631c0726..7723473e51a073cea3365d0c9b32b4a211bd51bc 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <drm/amdgpu_drm.h>
 #include "pp_instance.h"
 #include "smumgr.h"
 #include "cgs_common.h"
@@ -52,10 +53,10 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
        handle->smu_mgr = smumgr;
 
        switch (smumgr->chip_family) {
-       case AMD_FAMILY_CZ:
+       case AMDGPU_FAMILY_CZ:
                cz_smum_init(smumgr);
                break;
-       case AMD_FAMILY_VI:
+       case AMDGPU_FAMILY_VI:
                switch (smumgr->chip_id) {
                case CHIP_TONGA:
                        tonga_smum_init(smumgr);
@@ -81,6 +82,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
 
 int smum_fini(struct pp_smumgr *smumgr)
 {
+       kfree(smumgr->device);
        kfree(smumgr);
        return 0;
 }
index 32820b680d8886104a1e8c28f7324489e9c4c375..b22722eabafcbc53477dd284a2671326443ec011 100644 (file)
@@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
 
 static int tonga_smu_fini(struct pp_smumgr *smumgr)
 {
+       struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
+
+       smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
+       smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
        if (smumgr->backend != NULL) {
                kfree(smumgr->backend);
                smumgr->backend = NULL;
        }
+
+       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
        return 0;
 }
 
index c89dc777768f8cdabe3ce882483c134faa6aefa7..b961a1c6caf30ea09e8eea2dae6f320cafb81d09 100644 (file)
@@ -26,7 +26,7 @@ TRACE_EVENT(amd_sched_job,
            TP_fast_assign(
                           __entry->entity = sched_job->s_entity;
                           __entry->sched_job = sched_job;
-                          __entry->fence = &sched_job->s_fence->base;
+                          __entry->fence = &sched_job->s_fence->finished;
                           __entry->name = sched_job->sched->name;
                           __entry->job_count = kfifo_len(
                                   &sched_job->s_entity->job_queue) / sizeof(sched_job);
@@ -46,7 +46,7 @@ TRACE_EVENT(amd_sched_process_job,
                    ),
 
            TP_fast_assign(
-                   __entry->fence = &fence->base;
+                   __entry->fence = &fence->finished;
                    ),
            TP_printk("fence=%p signaled", __entry->fence)
 );
index c16248cee7799221799c15a1dc707c83fd2b295f..70ff09d10885f3d19828233f6bddcc1e227add5c 100644 (file)
@@ -32,6 +32,7 @@
 
 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
 
 struct kmem_cache *sched_fence_slab;
 atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -140,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
                return r;
 
        atomic_set(&entity->fence_seq, 0);
-       entity->fence_context = fence_context_alloc(1);
+       entity->fence_context = fence_context_alloc(2);
 
        return 0;
 }
@@ -251,17 +252,21 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
 
        s_fence = to_amd_sched_fence(fence);
        if (s_fence && s_fence->sched == sched) {
-               /* Fence is from the same scheduler */
-               if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
-                       /* Ignore it when it is already scheduled */
-                       fence_put(entity->dependency);
-                       return false;
-               }
 
-               /* Wait for fence to be scheduled */
-               entity->cb.func = amd_sched_entity_clear_dep;
-               list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
-               return true;
+               /*
+                * Fence is from the same scheduler, only need to wait for
+                * it to be scheduled
+                */
+               fence = fence_get(&s_fence->scheduled);
+               fence_put(entity->dependency);
+               entity->dependency = fence;
+               if (!fence_add_callback(fence, &entity->cb,
+                                       amd_sched_entity_clear_dep))
+                       return true;
+
+               /* Ignore it when it is already scheduled */
+               fence_put(fence);
+               return false;
        }
 
        if (!fence_add_callback(entity->dependency, &entity->cb,
@@ -319,46 +324,108 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
        return added;
 }
 
-static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
-       struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
-       schedule_work(&job->work_free_job);
-}
-
 /* job_finish is called after hw fence signaled, and
  * the job had already been deleted from ring_mirror_list
  */
-void amd_sched_job_finish(struct amd_sched_job *s_job)
+static void amd_sched_job_finish(struct work_struct *work)
 {
-       struct amd_sched_job *next;
+       struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
+                                                  finish_work);
        struct amd_gpu_scheduler *sched = s_job->sched;
 
+       /* remove job from ring_mirror_list */
+       spin_lock(&sched->job_list_lock);
+       list_del_init(&s_job->node);
        if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
-               if (cancel_delayed_work(&s_job->work_tdr))
-                       amd_sched_job_put(s_job);
+               struct amd_sched_job *next;
+
+               spin_unlock(&sched->job_list_lock);
+               cancel_delayed_work_sync(&s_job->work_tdr);
+               spin_lock(&sched->job_list_lock);
 
                /* queue TDR for next job */
                next = list_first_entry_or_null(&sched->ring_mirror_list,
                                                struct amd_sched_job, node);
 
-               if (next) {
-                       INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
-                       amd_sched_job_get(next);
+               if (next)
                        schedule_delayed_work(&next->work_tdr, sched->timeout);
-               }
        }
+       spin_unlock(&sched->job_list_lock);
+       sched->ops->free_job(s_job);
 }
 
-void amd_sched_job_begin(struct amd_sched_job *s_job)
+static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+{
+       struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
+                                                finish_cb);
+       schedule_work(&job->finish_work);
+}
+
+static void amd_sched_job_begin(struct amd_sched_job *s_job)
 {
        struct amd_gpu_scheduler *sched = s_job->sched;
 
+       spin_lock(&sched->job_list_lock);
+       list_add_tail(&s_job->node, &sched->ring_mirror_list);
        if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
-               list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
-       {
-               INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
-               amd_sched_job_get(s_job);
+           list_first_entry_or_null(&sched->ring_mirror_list,
+                                    struct amd_sched_job, node) == s_job)
+               schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+       spin_unlock(&sched->job_list_lock);
+}
+
+static void amd_sched_job_timedout(struct work_struct *work)
+{
+       struct amd_sched_job *job = container_of(work, struct amd_sched_job,
+                                                work_tdr.work);
+
+       job->sched->ops->timedout_job(job);
+}
+
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
+{
+       struct amd_sched_job *s_job;
+
+       spin_lock(&sched->job_list_lock);
+       list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
+               if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+                       fence_put(s_job->s_fence->parent);
+                       s_job->s_fence->parent = NULL;
+               }
+       }
+       spin_unlock(&sched->job_list_lock);
+}
+
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+{
+       struct amd_sched_job *s_job;
+       int r;
+
+       spin_lock(&sched->job_list_lock);
+       s_job = list_first_entry_or_null(&sched->ring_mirror_list,
+                                        struct amd_sched_job, node);
+       if (s_job)
                schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+
+       list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+               struct amd_sched_fence *s_fence = s_job->s_fence;
+               struct fence *fence = sched->ops->run_job(s_job);
+               if (fence) {
+                       s_fence->parent = fence_get(fence);
+                       r = fence_add_callback(fence, &s_fence->cb,
+                                              amd_sched_process_job);
+                       if (r == -ENOENT)
+                               amd_sched_process_job(fence, &s_fence->cb);
+                       else if (r)
+                               DRM_ERROR("fence add callback failed (%d)\n",
+                                         r);
+                       fence_put(fence);
+               } else {
+                       DRM_ERROR("Failed to run job!\n");
+                       amd_sched_process_job(NULL, &s_fence->cb);
+               }
        }
+       spin_unlock(&sched->job_list_lock);
 }
 
 /**
@@ -372,36 +439,29 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
 {
        struct amd_sched_entity *entity = sched_job->s_entity;
 
-       sched_job->use_sched = 1;
-       fence_add_callback(&sched_job->s_fence->base,
-                                       &sched_job->cb_free_job, amd_sched_free_job);
        trace_amd_sched_job(sched_job);
+       fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+                          amd_sched_job_finish_cb);
        wait_event(entity->sched->job_scheduled,
                   amd_sched_entity_in(sched_job));
 }
 
 /* init a sched_job with basic field */
 int amd_sched_job_init(struct amd_sched_job *job,
-                                               struct amd_gpu_scheduler *sched,
-                                               struct amd_sched_entity *entity,
-                                               void (*timeout_cb)(struct work_struct *work),
-                                               void (*free_cb)(struct kref *refcount),
-                                               void *owner, struct fence **fence)
+                      struct amd_gpu_scheduler *sched,
+                      struct amd_sched_entity *entity,
+                      void *owner)
 {
-       INIT_LIST_HEAD(&job->node);
-       kref_init(&job->refcount);
        job->sched = sched;
        job->s_entity = entity;
        job->s_fence = amd_sched_fence_create(entity, owner);
        if (!job->s_fence)
                return -ENOMEM;
 
-       job->s_fence->s_job = job;
-       job->timeout_callback = timeout_cb;
-       job->free_callback = free_cb;
+       INIT_WORK(&job->finish_work, amd_sched_job_finish);
+       INIT_LIST_HEAD(&job->node);
+       INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
 
-       if (fence)
-               *fence = &job->s_fence->base;
        return 0;
 }
 
@@ -450,23 +510,25 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
        struct amd_sched_fence *s_fence =
                container_of(cb, struct amd_sched_fence, cb);
        struct amd_gpu_scheduler *sched = s_fence->sched;
-       unsigned long flags;
 
        atomic_dec(&sched->hw_rq_count);
-
-       /* remove job from ring_mirror_list */
-       spin_lock_irqsave(&sched->job_list_lock, flags);
-       list_del_init(&s_fence->s_job->node);
-       sched->ops->finish_job(s_fence->s_job);
-       spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
-       amd_sched_fence_signal(s_fence);
+       amd_sched_fence_finished(s_fence);
 
        trace_amd_sched_process_job(s_fence);
-       fence_put(&s_fence->base);
+       fence_put(&s_fence->finished);
        wake_up_interruptible(&sched->wake_up_worker);
 }
 
+static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
+{
+       if (kthread_should_park()) {
+               kthread_parkme();
+               return true;
+       }
+
+       return false;
+}
+
 static int amd_sched_main(void *param)
 {
        struct sched_param sparam = {.sched_priority = 1};
@@ -476,14 +538,15 @@ static int amd_sched_main(void *param)
        sched_setscheduler(current, SCHED_FIFO, &sparam);
 
        while (!kthread_should_stop()) {
-               struct amd_sched_entity *entity;
+               struct amd_sched_entity *entity = NULL;
                struct amd_sched_fence *s_fence;
                struct amd_sched_job *sched_job;
                struct fence *fence;
 
                wait_event_interruptible(sched->wake_up_worker,
-                       (entity = amd_sched_select_entity(sched)) ||
-                       kthread_should_stop());
+                                        (!amd_sched_blocked(sched) &&
+                                         (entity = amd_sched_select_entity(sched))) ||
+                                        kthread_should_stop());
 
                if (!entity)
                        continue;
@@ -495,16 +558,19 @@ static int amd_sched_main(void *param)
                s_fence = sched_job->s_fence;
 
                atomic_inc(&sched->hw_rq_count);
-               amd_sched_job_pre_schedule(sched, sched_job);
+               amd_sched_job_begin(sched_job);
+
                fence = sched->ops->run_job(sched_job);
                amd_sched_fence_scheduled(s_fence);
                if (fence) {
+                       s_fence->parent = fence_get(fence);
                        r = fence_add_callback(fence, &s_fence->cb,
                                               amd_sched_process_job);
                        if (r == -ENOENT)
                                amd_sched_process_job(fence, &s_fence->cb);
                        else if (r)
-                               DRM_ERROR("fence add callback failed (%d)\n", r);
+                               DRM_ERROR("fence add callback failed (%d)\n",
+                                         r);
                        fence_put(fence);
                } else {
                        DRM_ERROR("Failed to run job!\n");
index 070095a9433c3f9e2aac2a25484e21169e462329..7cbbbfb502ef1342caa2a5b36aa970368fed5055 100644 (file)
@@ -27,8 +27,6 @@
 #include <linux/kfifo.h>
 #include <linux/fence.h>
 
-#define AMD_SCHED_FENCE_SCHEDULED_BIT  FENCE_FLAG_USER_BITS
-
 struct amd_gpu_scheduler;
 struct amd_sched_rq;
 
@@ -68,36 +66,34 @@ struct amd_sched_rq {
 };
 
 struct amd_sched_fence {
-       struct fence                    base;
+       struct fence                    scheduled;
+       struct fence                    finished;
        struct fence_cb                 cb;
-       struct list_head                scheduled_cb;
+       struct fence                    *parent;
        struct amd_gpu_scheduler        *sched;
        spinlock_t                      lock;
        void                            *owner;
-       struct amd_sched_job    *s_job;
 };
 
 struct amd_sched_job {
-       struct kref refcount;
        struct amd_gpu_scheduler        *sched;
        struct amd_sched_entity         *s_entity;
        struct amd_sched_fence          *s_fence;
-       bool    use_sched;      /* true if the job goes to scheduler */
-       struct fence_cb                cb_free_job;
-       struct work_struct             work_free_job;
-       struct list_head                           node;
-       struct delayed_work work_tdr;
-       void (*timeout_callback) (struct work_struct *work);
-       void (*free_callback)(struct kref *refcount);
+       struct fence_cb                 finish_cb;
+       struct work_struct              finish_work;
+       struct list_head                node;
+       struct delayed_work             work_tdr;
 };
 
-extern const struct fence_ops amd_sched_fence_ops;
+extern const struct fence_ops amd_sched_fence_ops_scheduled;
+extern const struct fence_ops amd_sched_fence_ops_finished;
 static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
 {
-       struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+       if (f->ops == &amd_sched_fence_ops_scheduled)
+               return container_of(f, struct amd_sched_fence, scheduled);
 
-       if (__f->base.ops == &amd_sched_fence_ops)
-               return __f;
+       if (f->ops == &amd_sched_fence_ops_finished)
+               return container_of(f, struct amd_sched_fence, finished);
 
        return NULL;
 }
@@ -109,8 +105,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
 struct amd_sched_backend_ops {
        struct fence *(*dependency)(struct amd_sched_job *sched_job);
        struct fence *(*run_job)(struct amd_sched_job *sched_job);
-       void (*begin_job)(struct amd_sched_job *sched_job);
-       void (*finish_job)(struct amd_sched_job *sched_job);
+       void (*timedout_job)(struct amd_sched_job *sched_job);
+       void (*free_job)(struct amd_sched_job *sched_job);
 };
 
 enum amd_sched_priority {
@@ -152,25 +148,11 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
 struct amd_sched_fence *amd_sched_fence_create(
        struct amd_sched_entity *s_entity, void *owner);
 void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
-void amd_sched_fence_signal(struct amd_sched_fence *fence);
+void amd_sched_fence_finished(struct amd_sched_fence *fence);
 int amd_sched_job_init(struct amd_sched_job *job,
-                                       struct amd_gpu_scheduler *sched,
-                                       struct amd_sched_entity *entity,
-                                       void (*timeout_cb)(struct work_struct *work),
-                                       void (*free_cb)(struct kref* refcount),
-                                       void *owner, struct fence **fence);
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
-                                                               struct amd_sched_job *s_job);
-void amd_sched_job_finish(struct amd_sched_job *s_job);
-void amd_sched_job_begin(struct amd_sched_job *s_job);
-static inline void amd_sched_job_get(struct amd_sched_job *job) {
-       if (job)
-               kref_get(&job->refcount);
-}
-
-static inline void amd_sched_job_put(struct amd_sched_job *job) {
-       if (job)
-               kref_put(&job->refcount, job->free_callback);
-}
-
+                      struct amd_gpu_scheduler *sched,
+                      struct amd_sched_entity *entity,
+                      void *owner);
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
 #endif
index 2a732c4903755db7d3838f9b6c8ae00ee2804309..6b63beaf75746848720f98d4eb2b5329c299267b 100644 (file)
@@ -27,7 +27,8 @@
 #include <drm/drmP.h>
 #include "gpu_scheduler.h"
 
-struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
+struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
+                                              void *owner)
 {
        struct amd_sched_fence *fence = NULL;
        unsigned seq;
@@ -36,46 +37,37 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
        if (fence == NULL)
                return NULL;
 
-       INIT_LIST_HEAD(&fence->scheduled_cb);
        fence->owner = owner;
-       fence->sched = s_entity->sched;
+       fence->sched = entity->sched;
        spin_lock_init(&fence->lock);
 
-       seq = atomic_inc_return(&s_entity->fence_seq);
-       fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
-                  s_entity->fence_context, seq);
+       seq = atomic_inc_return(&entity->fence_seq);
+       fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+                  &fence->lock, entity->fence_context, seq);
+       fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+                  &fence->lock, entity->fence_context + 1, seq);
 
        return fence;
 }
 
-void amd_sched_fence_signal(struct amd_sched_fence *fence)
+void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
 {
-       int ret = fence_signal(&fence->base);
+       int ret = fence_signal(&fence->scheduled);
+
        if (!ret)
-               FENCE_TRACE(&fence->base, "signaled from irq context\n");
+               FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
        else
-               FENCE_TRACE(&fence->base, "was already signaled\n");
-}
-
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
-                               struct amd_sched_job *s_job)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&sched->job_list_lock, flags);
-       list_add_tail(&s_job->node, &sched->ring_mirror_list);
-       sched->ops->begin_job(s_job);
-       spin_unlock_irqrestore(&sched->job_list_lock, flags);
+               FENCE_TRACE(&fence->scheduled, "was already signaled\n");
 }
 
-void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
+void amd_sched_fence_finished(struct amd_sched_fence *fence)
 {
-       struct fence_cb *cur, *tmp;
+       int ret = fence_signal(&fence->finished);
 
-       set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
-       list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
-               list_del_init(&cur->node);
-               cur->func(&s_fence->base, cur);
-       }
+       if (!ret)
+               FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+       else
+               FENCE_TRACE(&fence->finished, "was already signaled\n");
 }
 
 static const char *amd_sched_fence_get_driver_name(struct fence *fence)
@@ -105,6 +97,8 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
 {
        struct fence *f = container_of(rcu, struct fence, rcu);
        struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+       fence_put(fence->parent);
        kmem_cache_free(sched_fence_slab, fence);
 }
 
@@ -116,16 +110,41 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
  * This function is called when the reference count becomes zero.
  * It just RCU schedules freeing up the fence.
  */
-static void amd_sched_fence_release(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct fence *f)
 {
-       call_rcu(&f->rcu, amd_sched_fence_free);
+       struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+       call_rcu(&fence->finished.rcu, amd_sched_fence_free);
 }
 
-const struct fence_ops amd_sched_fence_ops = {
+/**
+ * amd_sched_fence_release_scheduled - drop extra reference
+ *
+ * @f: fence
+ *
+ * Drop the extra reference from the scheduled fence to the base fence.
+ */
+static void amd_sched_fence_release_finished(struct fence *f)
+{
+       struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+       fence_put(&fence->scheduled);
+}
+
+const struct fence_ops amd_sched_fence_ops_scheduled = {
+       .get_driver_name = amd_sched_fence_get_driver_name,
+       .get_timeline_name = amd_sched_fence_get_timeline_name,
+       .enable_signaling = amd_sched_fence_enable_signaling,
+       .signaled = NULL,
+       .wait = fence_default_wait,
+       .release = amd_sched_fence_release_scheduled,
+};
+
+const struct fence_ops amd_sched_fence_ops_finished = {
        .get_driver_name = amd_sched_fence_get_driver_name,
        .get_timeline_name = amd_sched_fence_get_timeline_name,
        .enable_signaling = amd_sched_fence_enable_signaling,
        .signaled = NULL,
        .wait = fence_default_wait,
-       .release = amd_sched_fence_release,
+       .release = amd_sched_fence_release_finished,
 };
index 59f2f93b6f84859ea04c9d092df3f276d2f6a7a6..b29a41218fc92d8c6c6cfe7157b061e69f29fb24 100644 (file)
@@ -186,17 +186,6 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
 {
 }
 
-static int ast_bo_move(struct ttm_buffer_object *bo,
-                      bool evict, bool interruptible,
-                      bool no_wait_gpu,
-                      struct ttm_mem_reg *new_mem)
-{
-       int r;
-       r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-       return r;
-}
-
-
 static void ast_ttm_backend_destroy(struct ttm_tt *tt)
 {
        ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver ast_bo_driver = {
        .ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
        .init_mem_type = ast_bo_init_mem_type,
        .evict_flags = ast_bo_evict_flags,
-       .move = ast_bo_move,
+       .move = NULL,
        .verify_access = ast_bo_verify_access,
        .io_mem_reserve = &ast_ttm_io_mem_reserve,
        .io_mem_free = &ast_ttm_io_mem_free,
index 473a475f27b1ffb48b14bd22fa5717ec66c7e787..6119b50855013522880f4e725b9ac8eb06372b26 100644 (file)
@@ -254,9 +254,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev)
                if (!ret)
                        ret = atmel_hlcdc_check_endpoint(dev, &ep);
 
-               of_node_put(ep_np);
-               if (ret)
+               if (ret) {
+                       of_node_put(ep_np);
                        return ret;
+               }
        }
 
        for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
@@ -264,9 +265,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev)
                if (!ret)
                        ret = atmel_hlcdc_attach_endpoint(dev, &ep);
 
-               of_node_put(ep_np);
-               if (ret)
+               if (ret) {
+                       of_node_put(ep_np);
                        return ret;
+               }
        }
 
        return 0;
index aef3ca8a81fae37c31c84ba13eb45ff374dab83c..016c191221f35db2246a3e014b8a4f7294d22649 100644 (file)
@@ -339,6 +339,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
 
                atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
                                             factor_reg);
+       } else {
+               atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
        }
 }
 
index 28a60a7dc2f81c14c022bc78e41a54d9519f590b..5c5638a777a12ad12c1e0fdc8d9627d7d78ea50b 100644 (file)
@@ -165,15 +165,6 @@ static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev,
 {
 }
 
-static int bochs_bo_move(struct ttm_buffer_object *bo,
-                        bool evict, bool interruptible,
-                        bool no_wait_gpu,
-                        struct ttm_mem_reg *new_mem)
-{
-       return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-}
-
-
 static void bochs_ttm_backend_destroy(struct ttm_tt *tt)
 {
        ttm_tt_fini(tt);
@@ -208,7 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = {
        .ttm_tt_unpopulate = ttm_pool_unpopulate,
        .init_mem_type = bochs_bo_init_mem_type,
        .evict_flags = bochs_bo_evict_flags,
-       .move = bochs_bo_move,
+       .move = NULL,
        .verify_access = bochs_bo_verify_access,
        .io_mem_reserve = &bochs_ttm_io_mem_reserve,
        .io_mem_free = &bochs_ttm_io_mem_free,
index 7699597070a13d09909e64780faba937a2b13b8c..32715daf73cb70b491c808ef480be679d73db76b 100644 (file)
@@ -938,7 +938,7 @@ int analogix_dp_get_modes(struct drm_connector *connector)
                num_modes += drm_panel_get_modes(dp->plat_data->panel);
 
        if (dp->plat_data->get_modes)
-               num_modes += dp->plat_data->get_modes(dp->plat_data);
+               num_modes += dp->plat_data->get_modes(dp->plat_data, connector);
 
        return num_modes;
 }
@@ -1208,6 +1208,7 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
 
        switch (dp->plat_data->dev_type) {
        case RK3288_DP:
+       case RK3399_EDP:
                /*
                 * Like Rk3288 DisplayPort TRM indicate that "Main link
                 * containing 4 physical lanes of 2.7/1.62 Gbps/lane".
index f09275d40f704b5a2aa505b7e7f51493cf80d150..b45638043ec41308681902d06907786a58456522 100644 (file)
@@ -127,10 +127,10 @@ enum analog_power_block {
 };
 
 enum dp_irq_type {
-       DP_IRQ_TYPE_HP_CABLE_IN,
-       DP_IRQ_TYPE_HP_CABLE_OUT,
-       DP_IRQ_TYPE_HP_CHANGE,
-       DP_IRQ_TYPE_UNKNOWN,
+       DP_IRQ_TYPE_HP_CABLE_IN  = BIT(0),
+       DP_IRQ_TYPE_HP_CABLE_OUT = BIT(1),
+       DP_IRQ_TYPE_HP_CHANGE    = BIT(2),
+       DP_IRQ_TYPE_UNKNOWN      = BIT(3),
 };
 
 struct video_info {
index 49205ef02be3afed044a2f58d4dce13f985278a2..48030f0cf4971672df899c9ca2cf2cac41f07fad 100644 (file)
@@ -74,8 +74,12 @@ void analogix_dp_init_analog_param(struct analogix_dp_device *dp)
        reg = SEL_24M | TX_DVDD_BIT_1_0625V;
        writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2);
 
-       if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) {
-               writel(REF_CLK_24M, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
+       if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
+               reg = REF_CLK_24M;
+               if (dp->plat_data->dev_type == RK3288_DP)
+                       reg ^= REF_CLK_MASK;
+
+               writel(reg, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
                writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2);
                writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3);
                writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4);
@@ -244,7 +248,7 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
        u32 reg;
        u32 phy_pd_addr = ANALOGIX_DP_PHY_PD;
 
-       if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+       if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
                phy_pd_addr = ANALOGIX_DP_PD;
 
        switch (block) {
@@ -448,7 +452,7 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp)
        analogix_dp_reset_aux(dp);
 
        /* Disable AUX transaction H/W retry */
-       if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+       if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
                reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) |
                      AUX_HW_RETRY_COUNT_SEL(3) |
                      AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
index 337912b0aeabc9382f9ffdbd546ecddba1b691da..cdcc6c5add5e9c27befb644520784edaec9c6a4a 100644 (file)
 #define HSYNC_POLARITY_CFG                     (0x1 << 0)
 
 /* ANALOGIX_DP_PLL_REG_1 */
-#define REF_CLK_24M                            (0x1 << 1)
-#define REF_CLK_27M                            (0x0 << 1)
+#define REF_CLK_24M                            (0x1 << 0)
+#define REF_CLK_27M                            (0x0 << 0)
+#define REF_CLK_MASK                           (0x1 << 0)
 
 /* ANALOGIX_DP_LANE_MAP */
 #define LANE3_MAP_LOGIC_LANE_0                 (0x0 << 6)
index 70b1f7d4270b7cfa93f8d1c421f605b265f4db49..77ab47341658a2e109b323ee9393fc5456bdda12 100644 (file)
@@ -1495,14 +1495,6 @@ static void dw_hdmi_connector_force(struct drm_connector *connector)
 }
 
 static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
-       .dpms = drm_helper_connector_dpms,
-       .fill_modes = drm_helper_probe_single_connector_modes,
-       .detect = dw_hdmi_connector_detect,
-       .destroy = dw_hdmi_connector_destroy,
-       .force = dw_hdmi_connector_force,
-};
-
-static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = dw_hdmi_connector_detect,
@@ -1634,14 +1626,9 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi)
        drm_connector_helper_add(&hdmi->connector,
                                 &dw_hdmi_connector_helper_funcs);
 
-       if (drm_core_check_feature(drm, DRIVER_ATOMIC))
-               drm_connector_init(drm, &hdmi->connector,
-                                  &dw_hdmi_atomic_connector_funcs,
-                                  DRM_MODE_CONNECTOR_HDMIA);
-       else
-               drm_connector_init(drm, &hdmi->connector,
-                                  &dw_hdmi_connector_funcs,
-                                  DRM_MODE_CONNECTOR_HDMIA);
+       drm_connector_init(drm, &hdmi->connector,
+                          &dw_hdmi_connector_funcs,
+                          DRM_MODE_CONNECTOR_HDMIA);
 
        drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
 
index 6768b7b1af32d98bc26c259870ac471983e4a62d..1cc9ee607128772295bf6f49c8ba9ad93f73abd1 100644 (file)
@@ -186,17 +186,6 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
 {
 }
 
-static int cirrus_bo_move(struct ttm_buffer_object *bo,
-                      bool evict, bool interruptible,
-                      bool no_wait_gpu,
-                      struct ttm_mem_reg *new_mem)
-{
-       int r;
-       r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-       return r;
-}
-
-
 static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
 {
        ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
        .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
        .init_mem_type = cirrus_bo_init_mem_type,
        .evict_flags = cirrus_bo_evict_flags,
-       .move = cirrus_bo_move,
+       .move = NULL,
        .verify_access = cirrus_bo_verify_access,
        .io_mem_reserve = &cirrus_ttm_io_mem_reserve,
        .io_mem_free = &cirrus_ttm_io_mem_free,
index d99ab2f6663f157670a55fefc35b6bdc5e14448a..3cee084e9d28efbefb203e3b78491084851ff153 100644 (file)
@@ -1299,14 +1299,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes);
  */
 void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
 {
+       struct drm_device *dev = state->dev;
+       unsigned crtc_mask = 0;
+       struct drm_crtc *crtc;
        int ret;
+       bool global = false;
+
+       drm_for_each_crtc(crtc, dev) {
+               if (crtc->acquire_ctx != state->acquire_ctx)
+                       continue;
+
+               crtc_mask |= drm_crtc_mask(crtc);
+               crtc->acquire_ctx = NULL;
+       }
+
+       if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
+               global = true;
+
+               dev->mode_config.acquire_ctx = NULL;
+       }
 
 retry:
        drm_modeset_backoff(state->acquire_ctx);
 
-       ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
+       ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
        if (ret)
                goto retry;
+
+       drm_for_each_crtc(crtc, dev)
+               if (drm_crtc_mask(crtc) & crtc_mask)
+                       crtc->acquire_ctx = state->acquire_ctx;
+
+       if (global)
+               dev->mode_config.acquire_ctx = state->acquire_ctx;
 }
 EXPORT_SYMBOL(drm_atomic_legacy_backoff);
 
index d61591274ff6c4bfa8aad35f07f95d73a699add9..604d3ef72ffab53552918c3cf6055ed7712f1b93 100644 (file)
@@ -531,11 +531,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
 int drm_crtc_helper_set_config(struct drm_mode_set *set)
 {
        struct drm_device *dev;
-       struct drm_crtc *new_crtc;
-       struct drm_encoder *save_encoders, *new_encoder, *encoder;
+       struct drm_crtc **save_encoder_crtcs, *new_crtc;
+       struct drm_encoder **save_connector_encoders, *new_encoder, *encoder;
        bool mode_changed = false; /* if true do a full mode set */
        bool fb_changed = false; /* if true and !mode_changed just do a flip */
-       struct drm_connector *save_connectors, *connector;
+       struct drm_connector *connector;
        int count = 0, ro, fail = 0;
        const struct drm_crtc_helper_funcs *crtc_funcs;
        struct drm_mode_set save_set;
@@ -577,15 +577,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
         * Allocate space for the backup of all (non-pointer) encoder and
         * connector data.
         */
-       save_encoders = kzalloc(dev->mode_config.num_encoder *
-                               sizeof(struct drm_encoder), GFP_KERNEL);
-       if (!save_encoders)
+       save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder *
+                               sizeof(struct drm_crtc *), GFP_KERNEL);
+       if (!save_encoder_crtcs)
                return -ENOMEM;
 
-       save_connectors = kzalloc(dev->mode_config.num_connector *
-                               sizeof(struct drm_connector), GFP_KERNEL);
-       if (!save_connectors) {
-               kfree(save_encoders);
+       save_connector_encoders = kzalloc(dev->mode_config.num_connector *
+                               sizeof(struct drm_encoder *), GFP_KERNEL);
+       if (!save_connector_encoders) {
+               kfree(save_encoder_crtcs);
                return -ENOMEM;
        }
 
@@ -596,12 +596,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
         */
        count = 0;
        drm_for_each_encoder(encoder, dev) {
-               save_encoders[count++] = *encoder;
+               save_encoder_crtcs[count++] = encoder->crtc;
        }
 
        count = 0;
        drm_for_each_connector(connector, dev) {
-               save_connectors[count++] = *connector;
+               save_connector_encoders[count++] = connector->encoder;
        }
 
        save_set.crtc = set->crtc;
@@ -634,8 +634,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                mode_changed = true;
        }
 
-       /* take a reference on all connectors in set */
+       /* take a reference on all unbound connectors in set, reuse the
+        * already taken reference for bound connectors
+        */
        for (ro = 0; ro < set->num_connectors; ro++) {
+               if (set->connectors[ro]->encoder)
+                       continue;
                drm_connector_reference(set->connectors[ro]);
        }
 
@@ -757,30 +761,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                }
        }
 
-       /* after fail drop reference on all connectors in save set */
-       count = 0;
-       drm_for_each_connector(connector, dev) {
-               drm_connector_unreference(&save_connectors[count++]);
-       }
-
-       kfree(save_connectors);
-       kfree(save_encoders);
+       kfree(save_connector_encoders);
+       kfree(save_encoder_crtcs);
        return 0;
 
 fail:
        /* Restore all previous data. */
        count = 0;
        drm_for_each_encoder(encoder, dev) {
-               *encoder = save_encoders[count++];
+               encoder->crtc = save_encoder_crtcs[count++];
        }
 
        count = 0;
        drm_for_each_connector(connector, dev) {
-               *connector = save_connectors[count++];
+               connector->encoder = save_connector_encoders[count++];
        }
 
-       /* after fail drop reference on all connectors in set */
+       /* after fail drop reference on all unbound connectors in set, let
+        * bound connectors keep their reference
+        */
        for (ro = 0; ro < set->num_connectors; ro++) {
+               if (set->connectors[ro]->encoder)
+                       continue;
                drm_connector_unreference(set->connectors[ro]);
        }
 
@@ -790,8 +792,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                                      save_set.y, save_set.fb))
                DRM_ERROR("failed to restore config after modeset failure\n");
 
-       kfree(save_connectors);
-       kfree(save_encoders);
+       kfree(save_connector_encoders);
+       kfree(save_encoder_crtcs);
        return ret;
 }
 EXPORT_SYMBOL(drm_crtc_helper_set_config);
index a13edf5de2d6daafc0e89849a6cdd1c324835345..6537908050d71b8e3efe2d5e6c60b5417957f0bf 100644 (file)
@@ -2927,11 +2927,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
                drm_dp_port_teardown_pdt(port, port->pdt);
 
                if (!port->input && port->vcpi.vcpi > 0) {
-                       if (mgr->mst_state) {
-                               drm_dp_mst_reset_vcpi_slots(mgr, port);
-                               drm_dp_update_payload_part1(mgr);
-                               drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
-                       }
+                       drm_dp_mst_reset_vcpi_slots(mgr, port);
+                       drm_dp_update_payload_part1(mgr);
+                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
                }
 
                kref_put(&port->kref, drm_dp_free_mst_port);
index c0b0c718994a4133e2357e8921d2c9361be6ca1b..1fd6eac1400c1c92bae8edaac6a622a322d7ace0 100644 (file)
@@ -596,3 +596,18 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
                drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
 }
 EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
+
+/**
+ * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ * @state: desired state, zero to resume, non-zero to suspend
+ *
+ * Calls drm_fb_helper_set_suspend, which is a wrapper around
+ * fb_set_suspend implemented by fbdev core.
+ */
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
+{
+       if (fbdev_cma)
+               drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
+}
+EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
index 340d390306d893d5a61898491b1778b0a647ac5b..ffd1b32caa8d5b1b3a00b1dfbdf065b375e46496 100644 (file)
@@ -91,10 +91,8 @@ static void load_gpu(struct drm_device *dev)
                        int ret;
 
                        ret = etnaviv_gpu_init(g);
-                       if (ret) {
-                               dev_err(g->dev, "hw init failed: %d\n", ret);
+                       if (ret)
                                priv->gpu[i] = NULL;
-                       }
                }
        }
 }
index ff6aa5dfb2d7574556f9f8d97310c4c127f1c212..87ef34150d466903ea128eddf599718eb5836dcb 100644 (file)
@@ -487,6 +487,47 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
        return 0;
 }
 
+static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
+{
+       u32 pmc, ppc;
+
+       /* enable clock gating */
+       ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
+       ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+
+       /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
+       if (gpu->identity.revision == 0x4301 ||
+           gpu->identity.revision == 0x4302)
+               ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
+
+       gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
+
+       pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
+
+       /* Disable PA clock gating for GC400+ except for GC420 */
+       if (gpu->identity.model >= chipModel_GC400 &&
+           gpu->identity.model != chipModel_GC420)
+               pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
+
+       /*
+        * Disable PE clock gating on revs < 5.0.0.0 when HZ is
+        * present without a bug fix.
+        */
+       if (gpu->identity.revision < 0x5000 &&
+           gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
+           !(gpu->identity.minor_features1 &
+             chipMinorFeatures1_DISABLE_PE_GATING))
+               pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
+
+       if (gpu->identity.revision < 0x5422)
+               pmc |= BIT(15); /* Unknown bit */
+
+       pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
+       pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
+
+       gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
+}
+
 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 {
        u16 prefetch;
@@ -506,6 +547,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
                gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
        }
 
+       /* enable module-level clock gating */
+       etnaviv_gpu_enable_mlcg(gpu);
+
        /*
         * Update GPU AXI cache atttribute to "cacheable, no allocate".
         * This is necessary to prevent the iMX6 SoC locking up.
@@ -553,8 +597,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
        bool mmuv2;
 
        ret = pm_runtime_get_sync(gpu->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               dev_err(gpu->dev, "Failed to enable GPU power domain\n");
                return ret;
+       }
 
        etnaviv_hw_identify(gpu);
 
@@ -591,8 +637,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
        }
 
        ret = etnaviv_hw_reset(gpu);
-       if (ret)
+       if (ret) {
+               dev_err(gpu->dev, "GPU reset failed\n");
                goto fail;
+       }
 
        /* Setup IOMMU.. eventually we will (I think) do this once per context
         * and have separate page tables per context.  For now, to keep things
@@ -610,12 +658,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
        }
 
        if (!iommu) {
+               dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
                ret = -ENOMEM;
                goto fail;
        }
 
        gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
        if (!gpu->mmu) {
+               dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
                iommu_domain_free(iommu);
                ret = -ENOMEM;
                goto fail;
index 522cfd4478922d2ccde1f4b667ca39db761232e9..16353ee816516ef33348832ef31a5a089a95829b 100644 (file)
@@ -225,6 +225,7 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
 
        etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
        etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
+       etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
        etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
        etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
 
index 6a7de5f1454a51532c0809e2e741c53db4fba4e9..807a3d9e0dd5af0a8e0f3944fa748a41ac16c021 100644 (file)
@@ -218,6 +218,13 @@ Copyright (C) 2015
 #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001
 #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002
 #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SH 0x00000008
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA 0x00000010
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE 0x00000020
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA 0x00000040
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX 0x00000080
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ      0x00010000
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ      0x00020000
 
 #define VIVS_PM_MODULE_STATUS                                  0x00000108
 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE            0x00000001
index 1c7e14cf2781fc62c3ef4884837456d512bf70b3..83f61c513b7e84f49e13eb037abda0a141854ed5 100644 (file)
@@ -11,7 +11,7 @@ if DRM_EXYNOS
 
 config DRM_EXYNOS_IOMMU
        bool
-       depends on EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+       depends on EXYNOS_IOMMU
        default y
 
 comment "CRTCs"
index f6223f907c158fbdfcf176da6aef13536966c5bb..7f9901b7777b36786380c447c784418f4d7e3b07 100644 (file)
@@ -31,7 +31,6 @@
 #include "exynos_drm_plane.h"
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
-#include "exynos_drm_fbdev.h"
 #include "exynos_drm_iommu.h"
 
 /*
index 468498e3fec15923f25476b163a3f82535a34a80..4f0850585b8e0312344757104dc35c0e793234ad 100644 (file)
@@ -34,7 +34,7 @@
 
 struct exynos_dp_device {
        struct drm_encoder         encoder;
-       struct drm_connector       connector;
+       struct drm_connector       *connector;
        struct drm_bridge          *ptn_bridge;
        struct drm_device          *drm_dev;
        struct device              *dev;
@@ -67,10 +67,10 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data)
        return exynos_dp_crtc_clock_enable(plat_data, false);
 }
 
-static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data)
+static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
+                              struct drm_connector *connector)
 {
        struct exynos_dp_device *dp = to_dp(plat_data);
-       struct drm_connector *connector = &dp->connector;
        struct drm_display_mode *mode;
        int num_modes = 0;
 
@@ -103,6 +103,7 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
        int ret;
 
        drm_connector_register(connector);
+       dp->connector = connector;
 
        /* Pre-empt DP connector creation if there's a bridge */
        if (dp->ptn_bridge) {
index 011211e4167d41dce17d56e8baee0b168b37e7b6..edbd98ff293ec913008e7ec60cee20d85c54fa56 100644 (file)
@@ -15,7 +15,6 @@
 #include <drm/drmP.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
-#include "exynos_drm_fbdev.h"
 
 static LIST_HEAD(exynos_drm_subdrv_list);
 
index 13d28d4229e2b2263398f9970d26a5ef33223102..877d2efa28e2171a1d2e07f4feef9a9fdd1eac7c 100644 (file)
@@ -159,12 +159,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
        DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
                 dev_name(private->dma_dev));
 
-       /*
-        * create mapping to manage iommu table and set a pointer to iommu
-        * mapping structure to iommu_mapping of private data.
-        * also this iommu_mapping can be used to check if iommu is supported
-        * or not.
-        */
+       /* create common IOMMU mapping for all devices attached to Exynos DRM */
        ret = drm_create_iommu_mapping(dev);
        if (ret < 0) {
                DRM_ERROR("failed to create iommu mapping.\n");
index cc33ec9296e7e15b80cf110cc3f6f860a2e226de..b39d521f093d71d344427a32f120d0a83235b46b 100644 (file)
@@ -224,8 +224,6 @@ struct exynos_drm_private {
        struct drm_property *plane_zpos_property;
 
        struct device *dma_dev;
-       unsigned long da_start;
-       unsigned long da_space_size;
        void *mapping;
 
        unsigned int pipe;
index 3efe1aa89416701223745cec1ec587fb3ef0e2db..d4721648898524e9dc76ee6476dd9a068c52520a 100644 (file)
@@ -30,7 +30,6 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
-#include "exynos_drm_fbdev.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_plane.h"
 #include "exynos_drm_iommu.h"
@@ -120,7 +119,6 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
        .timing_base = 0x0,
        .has_clksel = 1,
        .has_limited_fmt = 1,
-       .has_hw_trigger = 1,
 };
 
 static struct fimd_driver_data exynos3_fimd_driver_data = {
@@ -171,14 +169,11 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
        .lcdblk_vt_shift = 24,
        .lcdblk_bypass_shift = 15,
        .lcdblk_mic_bypass_shift = 11,
-       .trg_type = I80_HW_TRG,
        .has_shadowcon = 1,
        .has_vidoutcon = 1,
        .has_vtsel = 1,
        .has_mic_bypass = 1,
        .has_dp_clk = 1,
-       .has_hw_trigger = 1,
-       .has_trigger_per_te = 1,
 };
 
 struct fimd_context {
index 493552368295288c091dff811ab02c7939dac2e0..8564c3da0d222568bdd41cf8063d097dcf665081 100644 (file)
 
 /* registers for base address */
 #define G2D_SRC_BASE_ADDR              0x0304
-#define G2D_SRC_STRIDE_REG             0x0308
+#define G2D_SRC_STRIDE                 0x0308
 #define G2D_SRC_COLOR_MODE             0x030C
 #define G2D_SRC_LEFT_TOP               0x0310
 #define G2D_SRC_RIGHT_BOTTOM           0x0314
 #define G2D_SRC_PLANE2_BASE_ADDR       0x0318
 #define G2D_DST_BASE_ADDR              0x0404
-#define G2D_DST_STRIDE_REG             0x0408
+#define G2D_DST_STRIDE                 0x0408
 #define G2D_DST_COLOR_MODE             0x040C
 #define G2D_DST_LEFT_TOP               0x0410
 #define G2D_DST_RIGHT_BOTTOM           0x0414
@@ -563,7 +563,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
 
        switch (reg_offset) {
        case G2D_SRC_BASE_ADDR:
-       case G2D_SRC_STRIDE_REG:
+       case G2D_SRC_STRIDE:
        case G2D_SRC_COLOR_MODE:
        case G2D_SRC_LEFT_TOP:
        case G2D_SRC_RIGHT_BOTTOM:
@@ -573,7 +573,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
                reg_type = REG_TYPE_SRC_PLANE2;
                break;
        case G2D_DST_BASE_ADDR:
-       case G2D_DST_STRIDE_REG:
+       case G2D_DST_STRIDE:
        case G2D_DST_COLOR_MODE:
        case G2D_DST_LEFT_TOP:
        case G2D_DST_RIGHT_BOTTOM:
@@ -968,8 +968,8 @@ static int g2d_check_reg_offset(struct device *dev,
                        } else
                                buf_info->types[reg_type] = BUF_TYPE_GEM;
                        break;
-               case G2D_SRC_STRIDE_REG:
-               case G2D_DST_STRIDE_REG:
+               case G2D_SRC_STRIDE:
+               case G2D_DST_STRIDE:
                        if (for_addr)
                                goto err;
 
index 7ca09ee196560bf9c0361371d1f0aeb652dde38b..0f373702414e32342fa98d09ac597d3996f24c58 100644 (file)
 
 #include <linux/dma-mapping.h>
 #include <linux/iommu.h>
-#include <linux/kref.h>
-
-#include <asm/dma-iommu.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_iommu.h"
 
+static inline int configure_dma_max_seg_size(struct device *dev)
+{
+       if (!dev->dma_parms)
+               dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+       if (!dev->dma_parms)
+               return -ENOMEM;
+
+       dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+       return 0;
+}
+
+static inline void clear_dma_max_seg_size(struct device *dev)
+{
+       kfree(dev->dma_parms);
+       dev->dma_parms = NULL;
+}
+
 /*
  * drm_create_iommu_mapping - create a mapping structure
  *
  */
 int drm_create_iommu_mapping(struct drm_device *drm_dev)
 {
-       struct dma_iommu_mapping *mapping = NULL;
        struct exynos_drm_private *priv = drm_dev->dev_private;
 
-       if (!priv->da_start)
-               priv->da_start = EXYNOS_DEV_ADDR_START;
-       if (!priv->da_space_size)
-               priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
-
-       mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
-                                               priv->da_space_size);
-
-       if (IS_ERR(mapping))
-               return PTR_ERR(mapping);
-
-       priv->mapping = mapping;
-
-       return 0;
+       return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START,
+                                            EXYNOS_DEV_ADDR_SIZE);
 }
 
 /*
  * drm_release_iommu_mapping - release iommu mapping structure
  *
  * @drm_dev: DRM device
- *
- * if mapping->kref becomes 0 then all things related to iommu mapping
- * will be released
  */
 void drm_release_iommu_mapping(struct drm_device *drm_dev)
 {
        struct exynos_drm_private *priv = drm_dev->dev_private;
 
-       arm_iommu_release_mapping(priv->mapping);
+       __exynos_iommu_release_mapping(priv);
 }
 
 /*
@@ -77,25 +75,19 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
        struct exynos_drm_private *priv = drm_dev->dev_private;
        int ret;
 
-       if (!priv->mapping)
-               return 0;
-
-       subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
-                                       sizeof(*subdrv_dev->dma_parms),
-                                       GFP_KERNEL);
-       if (!subdrv_dev->dma_parms)
-               return -ENOMEM;
-
-       dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
-
-       if (subdrv_dev->archdata.mapping)
-               arm_iommu_detach_device(subdrv_dev);
+       if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
+               DRM_ERROR("Device %s lacks support for IOMMU\n",
+                         dev_name(subdrv_dev));
+               return -EINVAL;
+       }
 
-       ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
-       if (ret < 0) {
-               DRM_DEBUG_KMS("failed iommu attach.\n");
+       ret = configure_dma_max_seg_size(subdrv_dev);
+       if (ret)
                return ret;
-       }
+
+       ret = __exynos_iommu_attach(priv, subdrv_dev);
+       if (ret)
+               clear_dma_max_seg_size(subdrv_dev);
 
        return 0;
 }
@@ -113,10 +105,7 @@ void drm_iommu_detach_device(struct drm_device *drm_dev,
                                struct device *subdrv_dev)
 {
        struct exynos_drm_private *priv = drm_dev->dev_private;
-       struct dma_iommu_mapping *mapping = priv->mapping;
-
-       if (!mapping || !mapping->domain)
-               return;
 
-       arm_iommu_detach_device(subdrv_dev);
+       __exynos_iommu_detach(priv, subdrv_dev);
+       clear_dma_max_seg_size(subdrv_dev);
 }
index 5ffebe02ee4d3ebd29dda3960307ea1ed0a5d2cb..c8de4913fdbe80a3962ebca2744236f523e768b0 100644 (file)
 
 #ifdef CONFIG_DRM_EXYNOS_IOMMU
 
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+
+static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
+                                       unsigned long start, unsigned long size)
+{
+       priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start,
+                                                size);
+       return IS_ERR(priv->mapping);
+}
+
+static inline void
+__exynos_iommu_release_mapping(struct exynos_drm_private *priv)
+{
+       arm_iommu_release_mapping(priv->mapping);
+}
+
+static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
+                                       struct device *dev)
+{
+       if (dev->archdata.mapping)
+               arm_iommu_detach_device(dev);
+
+       return arm_iommu_attach_device(dev, priv->mapping);
+}
+
+static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
+                                        struct device *dev)
+{
+       arm_iommu_detach_device(dev);
+}
+
+#elif defined(CONFIG_IOMMU_DMA)
+#include <linux/dma-iommu.h>
+
+static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
+                                       unsigned long start, unsigned long size)
+{
+       struct iommu_domain *domain;
+       int ret;
+
+       domain = iommu_domain_alloc(priv->dma_dev->bus);
+       if (!domain)
+               return -ENOMEM;
+
+       ret = iommu_get_dma_cookie(domain);
+       if (ret)
+               goto free_domain;
+
+       ret = iommu_dma_init_domain(domain, start, size);
+       if (ret)
+               goto put_cookie;
+
+       priv->mapping = domain;
+       return 0;
+
+put_cookie:
+       iommu_put_dma_cookie(domain);
+free_domain:
+       iommu_domain_free(domain);
+       return ret;
+}
+
+static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
+{
+       struct iommu_domain *domain = priv->mapping;
+
+       iommu_put_dma_cookie(domain);
+       iommu_domain_free(domain);
+       priv->mapping = NULL;
+}
+
+static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
+                                       struct device *dev)
+{
+       struct iommu_domain *domain = priv->mapping;
+
+       return iommu_attach_device(domain, dev);
+}
+
+static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
+                                        struct device *dev)
+{
+       struct iommu_domain *domain = priv->mapping;
+
+       iommu_detach_device(domain, dev);
+}
+#else
+#error Unsupported architecture and IOMMU/DMA-mapping glue code
+#endif
+
 int drm_create_iommu_mapping(struct drm_device *drm_dev);
 
 void drm_release_iommu_mapping(struct drm_device *drm_dev);
index 706de3278f1c0d04dfda0857d5445667481089ee..3371635cd4d707192e39104ed1bc13671fc19c69 100644 (file)
@@ -44,6 +44,8 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
 
+       drm_crtc_vblank_off(crtc);
+
        regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
                           DCU_MODE_DCU_MODE_MASK,
                           DCU_MODE_DCU_MODE(DCU_MODE_OFF));
@@ -61,6 +63,8 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
                           DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
        regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
                     DCU_UPDATE_MODE_READREG);
+
+       drm_crtc_vblank_on(crtc);
 }
 
 static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
@@ -137,9 +141,10 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
 {
        struct drm_plane *primary;
        struct drm_crtc *crtc = &fsl_dev->crtc;
-       unsigned int i, j, reg_num;
        int ret;
 
+       fsl_dcu_drm_init_planes(fsl_dev->drm);
+
        primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
        if (!primary)
                return -ENOMEM;
@@ -153,19 +158,5 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
 
        drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
 
-       if (!strcmp(fsl_dev->soc->name, "ls1021a"))
-               reg_num = LS1021A_LAYER_REG_NUM;
-       else
-               reg_num = VF610_LAYER_REG_NUM;
-       for (i = 0; i < fsl_dev->soc->total_layer; i++) {
-               for (j = 1; j <= reg_num; j++)
-                       regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
-       }
-       regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
-                          DCU_MODE_DCU_MODE_MASK,
-                          DCU_MODE_DCU_MODE(DCU_MODE_OFF));
-       regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
-                    DCU_UPDATE_MODE_READREG);
-
        return 0;
 }
index 33727d5d826af952c926c11d886cdc5a79ee3bd3..7882387f9bffd5c661f30bc5800e58672a0a3bd8 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#include <linux/console.h>
 #include <linux/io.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mm.h>
@@ -22,6 +23,7 @@
 #include <linux/regmap.h>
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
@@ -42,7 +44,6 @@ static const struct regmap_config fsl_dcu_regmap_config = {
        .reg_bits = 32,
        .reg_stride = 4,
        .val_bits = 32,
-       .cache_type = REGCACHE_RBTREE,
 
        .volatile_reg = fsl_dcu_drm_is_volatile_reg,
 };
@@ -228,11 +229,26 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
        if (!fsl_dev)
                return 0;
 
+       disable_irq(fsl_dev->irq);
        drm_kms_helper_poll_disable(fsl_dev->drm);
-       regcache_cache_only(fsl_dev->regmap, true);
-       regcache_mark_dirty(fsl_dev->regmap);
-       clk_disable(fsl_dev->clk);
-       clk_unprepare(fsl_dev->clk);
+
+       console_lock();
+       drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1);
+       console_unlock();
+
+       fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm);
+       if (IS_ERR(fsl_dev->state)) {
+               console_lock();
+               drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
+               console_unlock();
+
+               drm_kms_helper_poll_enable(fsl_dev->drm);
+               enable_irq(fsl_dev->irq);
+               return PTR_ERR(fsl_dev->state);
+       }
+
+       clk_disable_unprepare(fsl_dev->pix_clk);
+       clk_disable_unprepare(fsl_dev->clk);
 
        return 0;
 }
@@ -245,21 +261,27 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
        if (!fsl_dev)
                return 0;
 
-       ret = clk_enable(fsl_dev->clk);
+       ret = clk_prepare_enable(fsl_dev->clk);
        if (ret < 0) {
                dev_err(dev, "failed to enable dcu clk\n");
-               clk_unprepare(fsl_dev->clk);
                return ret;
        }
-       ret = clk_prepare(fsl_dev->clk);
+
+       ret = clk_prepare_enable(fsl_dev->pix_clk);
        if (ret < 0) {
-               dev_err(dev, "failed to prepare dcu clk\n");
+               dev_err(dev, "failed to enable pix clk\n");
                return ret;
        }
 
+       fsl_dcu_drm_init_planes(fsl_dev->drm);
+       drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
+
+       console_lock();
+       drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
+       console_unlock();
+
        drm_kms_helper_poll_enable(fsl_dev->drm);
-       regcache_cache_only(fsl_dev->regmap, false);
-       regcache_sync(fsl_dev->regmap);
+       enable_irq(fsl_dev->irq);
 
        return 0;
 }
@@ -273,12 +295,14 @@ static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = {
        .name = "ls1021a",
        .total_layer = 16,
        .max_layer = 4,
+       .layer_regs = LS1021A_LAYER_REG_NUM,
 };
 
 static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = {
        .name = "vf610",
        .total_layer = 64,
        .max_layer = 6,
+       .layer_regs = VF610_LAYER_REG_NUM,
 };
 
 static const struct of_device_id fsl_dcu_of_match[] = {
index c275f900ff231a564256bc84c2b1252b90dde6f1..3b371fe7491e164e2f3507f7d8283a677cbdfa11 100644 (file)
@@ -175,6 +175,7 @@ struct fsl_dcu_soc_data {
        unsigned int total_layer;
        /*max layer number DCU supported*/
        unsigned int max_layer;
+       unsigned int layer_regs;
 };
 
 struct fsl_dcu_drm_device {
@@ -193,6 +194,7 @@ struct fsl_dcu_drm_device {
        struct drm_encoder encoder;
        struct fsl_dcu_drm_connector connector;
        const struct fsl_dcu_soc_data *soc;
+       struct drm_atomic_state *state;
 };
 
 void fsl_dcu_fbdev_init(struct drm_device *dev);
index 274558b3b32b2804c60695fe08ca0e4753c1bfad..e50467a0deb0451aa9c5723f8c97806f21add3d2 100644 (file)
@@ -217,6 +217,22 @@ static const u32 fsl_dcu_drm_plane_formats[] = {
        DRM_FORMAT_YUV422,
 };
 
+void fsl_dcu_drm_init_planes(struct drm_device *dev)
+{
+       struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+       int i, j;
+
+       for (i = 0; i < fsl_dev->soc->total_layer; i++) {
+               for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
+                       regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
+       }
+       regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+                          DCU_MODE_DCU_MODE_MASK,
+                          DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+       regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+                    DCU_UPDATE_MODE_READREG);
+}
+
 struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
 {
        struct drm_plane *primary;
index d657f088d859369fddc797e7c7b166210f32dc25..8ee45f813ee87cecfa60b270677ab4bfd977af73 100644 (file)
@@ -12,6 +12,7 @@
 #ifndef __FSL_DCU_DRM_PLANE_H__
 #define __FSL_DCU_DRM_PLANE_H__
 
+void fsl_dcu_drm_init_planes(struct drm_device *dev);
 struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev);
 
 #endif /* __FSL_DCU_DRM_PLANE_H__ */
index ea0df6115f7e96fa52a8e295b21deb4a6b30100e..499f64405daca9729b3e00da06f4ec06e7dedc00 100644 (file)
@@ -4,6 +4,7 @@ config DRM_HISI_KIRIN
        select DRM_KMS_HELPER
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
+       select HISI_KIRIN_DW_DSI
        help
          Choose this option if you have a hisilicon Kirin chipsets(hi6220).
          If M is selected the module will be called kirin-drm.
index 805f4326fafef17db18f68d5f1a44aad94182f43..c3707d47cd89762c592f7d4f644c004bea27ac74 100644 (file)
@@ -967,21 +967,21 @@ static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx)
        }
 
        ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core");
-       if (!ctx->ade_core_clk) {
+       if (IS_ERR(ctx->ade_core_clk)) {
                DRM_ERROR("failed to parse clk ADE_CORE\n");
-               return -ENODEV;
+               return PTR_ERR(ctx->ade_core_clk);
        }
 
        ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg");
-       if (!ctx->media_noc_clk) {
+       if (IS_ERR(ctx->media_noc_clk)) {
                DRM_ERROR("failed to parse clk CODEC_JPEG\n");
-           return -ENODEV;
+               return PTR_ERR(ctx->media_noc_clk);
        }
 
        ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix");
-       if (!ctx->ade_pix_clk) {
+       if (IS_ERR(ctx->ade_pix_clk)) {
                DRM_ERROR("failed to parse clk ADE_PIX\n");
-           return -ENODEV;
+               return PTR_ERR(ctx->ade_pix_clk);
        }
 
        return 0;
index 8f404103341d9e6a349e22de5e89d53d247c4daa..cee87bfd10c45f8e9bec618549e07591f81551ad 100644 (file)
@@ -18,6 +18,9 @@ config DRM_I915_WERROR
 config DRM_I915_DEBUG
         bool "Enable additional driver debugging"
         depends on DRM_I915
+        select PREEMPT_COUNT
+        select X86_MSR # used by igt/pm_rpm
+        select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
         default n
         help
           Choose this option to turn on extra driver debugging that may affect
index 276abf1cac2b90141863bc736815488fd550f34f..684fc1cd08fa90260529ca558b18118d118c8f9c 100644 (file)
@@ -10,9 +10,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
 i915-y := i915_drv.o \
          i915_irq.o \
          i915_params.o \
+         i915_pci.o \
           i915_suspend.o \
          i915_sysfs.o \
          intel_csr.o \
+         intel_device_info.o \
          intel_pm.o \
          intel_runtime_pm.o
 
@@ -37,6 +39,7 @@ i915-y += i915_cmd_parser.o \
          i915_gem_userptr.o \
          i915_gpu_error.o \
          i915_trace_points.o \
+         intel_breadcrumbs.o \
          intel_lrc.o \
          intel_mocs.o \
          intel_ringbuffer.o \
@@ -101,9 +104,6 @@ i915-y += dvo_ch7017.o \
 # virtual gpu code
 i915-y += i915_vgpu.o
 
-# legacy horrors
-i915-y += i915_dma.o
-
 ifeq ($(CONFIG_DRM_I915_GVT),y)
 i915-y += intel_gvt.o
 include $(src)/gvt/Makefile
index 5b7526697838913b01b4d4f15ebee02e29663fa2..844fea795bae1b860588fa22bc6ffc1621d12cb4 100644 (file)
@@ -265,7 +265,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        u64 total_obj_size, total_gtt_size;
        LIST_HEAD(stolen);
@@ -440,15 +440,15 @@ static void print_context_stats(struct seq_file *m,
 
        memset(&stats, 0, sizeof(stats));
 
-       mutex_lock(&dev_priv->dev->struct_mutex);
+       mutex_lock(&dev_priv->drm.struct_mutex);
        if (dev_priv->kernel_context)
                per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
 
-       list_for_each_entry(file, &dev_priv->dev->filelist, lhead) {
+       list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
                struct drm_i915_file_private *fpriv = file->driver_priv;
                idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
        }
-       mutex_unlock(&dev_priv->dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 
        print_file_stats(m, "[k]contexts", stats);
 }
@@ -591,7 +591,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        uintptr_t list = (uintptr_t) node->info_ent->data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        u64 total_obj_size, total_gtt_size;
        int count, ret;
@@ -625,7 +625,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc;
        int ret;
 
@@ -662,8 +662,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                           engine->name,
                                           i915_gem_request_get_seqno(work->flip_queued_req),
                                           dev_priv->next_seqno,
-                                          engine->get_seqno(engine),
-                                          i915_gem_request_completed(work->flip_queued_req, true));
+                                          intel_engine_get_seqno(engine),
+                                          i915_gem_request_completed(work->flip_queued_req));
                        } else
                                seq_printf(m, "Flip not associated with any ring\n");
                        seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
@@ -695,7 +695,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        struct intel_engine_cs *engine;
        int total = 0;
@@ -740,7 +740,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        struct drm_i915_gem_request *req;
        int ret, any;
@@ -788,17 +788,29 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 static void i915_ring_seqno_info(struct seq_file *m,
                                 struct intel_engine_cs *engine)
 {
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct rb_node *rb;
+
        seq_printf(m, "Current sequence (%s): %x\n",
-                  engine->name, engine->get_seqno(engine));
-       seq_printf(m, "Current user interrupts (%s): %x\n",
-                  engine->name, READ_ONCE(engine->user_interrupts));
+                  engine->name, intel_engine_get_seqno(engine));
+       seq_printf(m, "Current user interrupts (%s): %lx\n",
+                  engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups));
+
+       spin_lock(&b->lock);
+       for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+               struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+               seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
+                          engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
+       }
+       spin_unlock(&b->lock);
 }
 
 static int i915_gem_seqno_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        int ret;
 
@@ -821,7 +833,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        int ret, i, pipe;
 
@@ -1012,7 +1024,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i, ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1040,7 +1052,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        const u32 *hws;
        int i;
@@ -1151,7 +1163,7 @@ static int
 i915_next_seqno_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1188,7 +1200,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
 
        intel_runtime_pm_get(dev_priv);
@@ -1391,7 +1403,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        u64 acthd[I915_NUM_ENGINES];
        u32 seqno[I915_NUM_ENGINES];
@@ -1408,7 +1420,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 
        for_each_engine_id(engine, dev_priv, id) {
                acthd[id] = intel_ring_get_active_head(engine);
-               seqno[id] = engine->get_seqno(engine);
+               seqno[id] = intel_engine_get_seqno(engine);
        }
 
        i915_get_extra_instdone(dev_priv, instdone);
@@ -1428,9 +1440,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
                           engine->hangcheck.seqno,
                           seqno[id],
                           engine->last_submitted_seqno);
-               seq_printf(m, "\tuser interrupts = %x [current %x]\n",
+               seq_printf(m, "\twaiters? %d\n",
+                          intel_engine_has_waiter(engine));
+               seq_printf(m, "\tuser interrupts = %lx [current %lx]\n",
                           engine->hangcheck.user_interrupts,
-                          READ_ONCE(engine->user_interrupts));
+                          READ_ONCE(engine->breadcrumbs.irq_wakeups));
                seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
                           (long long)engine->hangcheck.acthd,
                           (long long)acthd[id]);
@@ -1460,7 +1474,7 @@ static int ironlake_drpc_info(struct seq_file *m)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 rgvmodectl, rstdbyctl;
        u16 crstandvid;
        int ret;
@@ -1528,7 +1542,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_uncore_forcewake_domain *fw_domain;
 
        spin_lock_irq(&dev_priv->uncore.lock);
@@ -1546,7 +1560,7 @@ static int vlv_drpc_info(struct seq_file *m)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 rpmodectl1, rcctl1, pw_status;
 
        intel_runtime_pm_get(dev_priv);
@@ -1586,7 +1600,7 @@ static int gen6_drpc_info(struct seq_file *m)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
        unsigned forcewake_count;
        int count = 0, ret;
@@ -1698,7 +1712,7 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        seq_printf(m, "FB tracking busy bits: 0x%08x\n",
                   dev_priv->fb_tracking.busy_bits);
@@ -1713,7 +1727,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!HAS_FBC(dev)) {
                seq_puts(m, "FBC unsupported on this chipset\n");
@@ -1743,7 +1757,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 static int i915_fbc_fc_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
                return -ENODEV;
@@ -1756,7 +1770,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
 static int i915_fbc_fc_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 reg;
 
        if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
@@ -1783,7 +1797,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!HAS_IPS(dev)) {
                seq_puts(m, "not supported\n");
@@ -1813,7 +1827,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        bool sr_enabled = false;
 
        intel_runtime_pm_get(dev_priv);
@@ -1842,7 +1856,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long temp, chipset, gfx;
        int ret;
 
@@ -1870,7 +1884,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
        int gpu_freq, ia_freq;
        unsigned int max_gpu_freq, min_gpu_freq;
@@ -1925,7 +1939,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_opregion *opregion = &dev_priv->opregion;
        int ret;
 
@@ -1946,7 +1960,7 @@ static int i915_vbt(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_opregion *opregion = &dev_priv->opregion;
 
        if (opregion->vbt)
@@ -1968,19 +1982,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                return ret;
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-       if (to_i915(dev)->fbdev) {
-               fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
-
-               seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
-                         fbdev_fb->base.width,
-                         fbdev_fb->base.height,
-                         fbdev_fb->base.depth,
-                         fbdev_fb->base.bits_per_pixel,
-                         fbdev_fb->base.modifier[0],
-                         drm_framebuffer_read_refcount(&fbdev_fb->base));
-               describe_obj(m, fbdev_fb->obj);
-               seq_putc(m, '\n');
-       }
+       if (to_i915(dev)->fbdev) {
+               fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
+
+               seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+                          fbdev_fb->base.width,
+                          fbdev_fb->base.height,
+                          fbdev_fb->base.depth,
+                          fbdev_fb->base.bits_per_pixel,
+                          fbdev_fb->base.modifier[0],
+                          drm_framebuffer_read_refcount(&fbdev_fb->base));
+               describe_obj(m, fbdev_fb->obj);
+               seq_putc(m, '\n');
+       }
 #endif
 
        mutex_lock(&dev->mode_config.fb_lock);
@@ -2017,7 +2031,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
        int ret;
@@ -2114,7 +2128,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
        int ret;
@@ -2141,7 +2155,7 @@ static int i915_execlists(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *)m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        u32 status_pointer;
        u8 read_pointer;
@@ -2244,7 +2258,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2317,7 +2331,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
 
 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
        int i;
@@ -2338,7 +2352,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 
 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        if (IS_GEN6(dev_priv))
@@ -2372,7 +2386,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_file *file;
 
        int ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2415,7 +2429,7 @@ static int count_irq_waiters(struct drm_i915_private *i915)
        int count = 0;
 
        for_each_engine(engine, i915)
-               count += engine->irq_refcount;
+               count += intel_engine_has_waiter(engine);
 
        return count;
 }
@@ -2424,11 +2438,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_file *file;
 
        seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
-       seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
+       seq_printf(m, "GPU busy? %s [%x]\n",
+                  yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
        seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
        seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
                   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
@@ -2469,7 +2484,7 @@ static int i915_llc(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const bool edram = INTEL_GEN(dev_priv) > 8;
 
        seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
@@ -2482,7 +2497,7 @@ static int i915_llc(struct seq_file *m, void *data)
 static int i915_guc_load_status_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
-       struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
        u32 tmp, i;
 
@@ -2544,9 +2559,9 @@ static void i915_guc_client_info(struct seq_file *m,
 
        for_each_engine(engine, dev_priv) {
                seq_printf(m, "\tSubmissions: %llu %s\n",
-                               client->submissions[engine->guc_id],
+                               client->submissions[engine->id],
                                engine->name);
-               tot += client->submissions[engine->guc_id];
+               tot += client->submissions[engine->id];
        }
        seq_printf(m, "\tTotal: %llu\n", tot);
 }
@@ -2555,7 +2570,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_guc guc;
        struct i915_guc_client client = {};
        struct intel_engine_cs *engine;
@@ -2587,9 +2602,9 @@ static int i915_guc_info(struct seq_file *m, void *data)
        seq_printf(m, "\nGuC submissions:\n");
        for_each_engine(engine, dev_priv) {
                seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
-                       engine->name, guc.submissions[engine->guc_id],
-                       guc.last_seqno[engine->guc_id]);
-               total += guc.submissions[engine->guc_id];
+                       engine->name, guc.submissions[engine->id],
+                       guc.last_seqno[engine->id]);
+               total += guc.submissions[engine->id];
        }
        seq_printf(m, "\t%s: %llu\n", "Total", total);
 
@@ -2605,7 +2620,7 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
        u32 *log;
        int i = 0, pg;
@@ -2633,7 +2648,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 psrperf = 0;
        u32 stat[3];
        enum pipe pipe;
@@ -2701,7 +2716,6 @@ static int i915_sink_crc(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct intel_encoder *encoder;
        struct intel_connector *connector;
        struct intel_dp *intel_dp = NULL;
        int ret;
@@ -2709,18 +2723,19 @@ static int i915_sink_crc(struct seq_file *m, void *data)
 
        drm_modeset_lock_all(dev);
        for_each_intel_connector(dev, connector) {
+               struct drm_crtc *crtc;
 
-               if (connector->base.dpms != DRM_MODE_DPMS_ON)
+               if (!connector->base.state->best_encoder)
                        continue;
 
-               if (!connector->base.encoder)
+               crtc = connector->base.state->crtc;
+               if (!crtc->state->active)
                        continue;
 
-               encoder = to_intel_encoder(connector->base.encoder);
-               if (encoder->type != INTEL_OUTPUT_EDP)
+               if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
                        continue;
 
-               intel_dp = enc_to_intel_dp(&encoder->base);
+               intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
 
                ret = intel_dp_sink_crc(intel_dp, crc);
                if (ret)
@@ -2741,7 +2756,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u64 power;
        u32 units;
 
@@ -2767,12 +2782,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!HAS_RUNTIME_PM(dev_priv))
                seq_puts(m, "Runtime power management not supported\n");
 
-       seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
+       seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
        seq_printf(m, "IRQs disabled: %s\n",
                   yesno(!intel_irqs_enabled(dev_priv)));
 #ifdef CONFIG_PM
@@ -2782,8 +2797,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
        seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
 #endif
        seq_printf(m, "PCI device power state: %s [%d]\n",
-                  pci_power_name(dev_priv->dev->pdev->current_state),
-                  dev_priv->dev->pdev->current_state);
+                  pci_power_name(dev_priv->drm.pdev->current_state),
+                  dev_priv->drm.pdev->current_state);
 
        return 0;
 }
@@ -2792,7 +2807,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        int i;
 
@@ -2827,7 +2842,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_csr *csr;
 
        if (!HAS_CSR(dev)) {
@@ -2950,7 +2965,7 @@ static void intel_dp_info(struct seq_file *m,
 
        seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
        seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
-       if (intel_encoder->type == INTEL_OUTPUT_EDP)
+       if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
                intel_panel_info(m, &intel_connector->panel);
 }
 
@@ -2989,14 +3004,26 @@ static void intel_connector_info(struct seq_file *m,
                seq_printf(m, "\tCEA rev: %d\n",
                           connector->display_info.cea_rev);
        }
-       if (intel_encoder) {
-               if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
-                   intel_encoder->type == INTEL_OUTPUT_EDP)
-                       intel_dp_info(m, intel_connector);
-               else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
-                       intel_hdmi_info(m, intel_connector);
-               else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+
+       if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+               return;
+
+       switch (connector->connector_type) {
+       case DRM_MODE_CONNECTOR_DisplayPort:
+       case DRM_MODE_CONNECTOR_eDP:
+               intel_dp_info(m, intel_connector);
+               break;
+       case DRM_MODE_CONNECTOR_LVDS:
+               if (intel_encoder->type == INTEL_OUTPUT_LVDS)
                        intel_lvds_info(m, intel_connector);
+               break;
+       case DRM_MODE_CONNECTOR_HDMIA:
+               if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
+                   intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
+                       intel_hdmi_info(m, intel_connector);
+               break;
+       default:
+               break;
        }
 
        seq_printf(m, "\tmodes:\n");
@@ -3006,7 +3033,7 @@ static void intel_connector_info(struct seq_file *m,
 
 static bool cursor_active(struct drm_device *dev, int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 state;
 
        if (IS_845G(dev) || IS_I865G(dev))
@@ -3019,7 +3046,7 @@ static bool cursor_active(struct drm_device *dev, int pipe)
 
 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pos;
 
        pos = I915_READ(CURPOS(pipe));
@@ -3140,7 +3167,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc;
        struct drm_connector *connector;
 
@@ -3195,7 +3222,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
        enum intel_engine_id id;
@@ -3268,7 +3295,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        drm_modeset_lock_all(dev);
@@ -3298,7 +3325,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
        struct intel_engine_cs *engine;
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_workarounds *workarounds = &dev_priv->workarounds;
        enum intel_engine_id id;
 
@@ -3336,7 +3363,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_ddb_allocation *ddb;
        struct skl_ddb_entry *entry;
        enum pipe pipe;
@@ -3374,31 +3401,16 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
 static void drrs_status_per_crtc(struct seq_file *m,
                struct drm_device *dev, struct intel_crtc *intel_crtc)
 {
-       struct intel_encoder *intel_encoder;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_drrs *drrs = &dev_priv->drrs;
        int vrefresh = 0;
+       struct drm_connector *connector;
 
-       for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
-               /* Encoder connected on this CRTC */
-               switch (intel_encoder->type) {
-               case INTEL_OUTPUT_EDP:
-                       seq_puts(m, "eDP:\n");
-                       break;
-               case INTEL_OUTPUT_DSI:
-                       seq_puts(m, "DSI:\n");
-                       break;
-               case INTEL_OUTPUT_HDMI:
-                       seq_puts(m, "HDMI:\n");
-                       break;
-               case INTEL_OUTPUT_DISPLAYPORT:
-                       seq_puts(m, "DP:\n");
-                       break;
-               default:
-                       seq_printf(m, "Other encoder (id=%d).\n",
-                                               intel_encoder->type);
-                       return;
-               }
+       drm_for_each_connector(connector, dev) {
+               if (connector->state->crtc != &intel_crtc->base)
+                       continue;
+
+               seq_printf(m, "%s:\n", connector->name);
        }
 
        if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
@@ -3461,18 +3473,16 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
        struct intel_crtc *intel_crtc;
        int active_crtc_cnt = 0;
 
+       drm_modeset_lock_all(dev);
        for_each_intel_crtc(dev, intel_crtc) {
-               drm_modeset_lock(&intel_crtc->base.mutex, NULL);
-
                if (intel_crtc->base.state->active) {
                        active_crtc_cnt++;
                        seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
 
                        drrs_status_per_crtc(m, dev, intel_crtc);
                }
-
-               drm_modeset_unlock(&intel_crtc->base.mutex);
        }
+       drm_modeset_unlock_all(dev);
 
        if (!active_crtc_cnt)
                seq_puts(m, "No active crtc found\n");
@@ -3490,17 +3500,23 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_encoder *encoder;
        struct intel_encoder *intel_encoder;
        struct intel_digital_port *intel_dig_port;
+       struct drm_connector *connector;
+
        drm_modeset_lock_all(dev);
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               intel_encoder = to_intel_encoder(encoder);
-               if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
+       drm_for_each_connector(connector, dev) {
+               if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+                       continue;
+
+               intel_encoder = intel_attached_encoder(connector);
+               if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
                        continue;
-               intel_dig_port = enc_to_dig_port(encoder);
+
+               intel_dig_port = enc_to_dig_port(&intel_encoder->base);
                if (!intel_dig_port->dp.can_mst)
                        continue;
+
                seq_printf(m, "MST Source Port %c\n",
                           port_name(intel_dig_port->port));
                drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
@@ -3512,7 +3528,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 {
        struct pipe_crc_info *info = inode->i_private;
-       struct drm_i915_private *dev_priv = info->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(info->dev);
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
        if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
@@ -3536,7 +3552,7 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
 {
        struct pipe_crc_info *info = inode->i_private;
-       struct drm_i915_private *dev_priv = info->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(info->dev);
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
        spin_lock_irq(&pipe_crc->lock);
@@ -3564,7 +3580,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
 {
        struct pipe_crc_info *info = filep->private_data;
        struct drm_device *dev = info->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
        char buf[PIPE_CRC_BUFFER_LEN];
        int n_entries;
@@ -3697,7 +3713,7 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
 static int display_crc_ctl_show(struct seq_file *m, void *data)
 {
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        for (i = 0; i < I915_MAX_PIPES; i++)
@@ -3758,7 +3774,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
                case INTEL_OUTPUT_TVOUT:
                        *source = INTEL_PIPE_CRC_SOURCE_TV;
                        break;
-               case INTEL_OUTPUT_DISPLAYPORT:
+               case INTEL_OUTPUT_DP:
                case INTEL_OUTPUT_EDP:
                        dig_port = enc_to_dig_port(&encoder->base);
                        switch (dig_port->port) {
@@ -3791,7 +3807,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
                                enum intel_pipe_crc_source *source,
                                uint32_t *val)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        bool need_stable_symbols = false;
 
        if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
@@ -3862,7 +3878,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
                                 enum intel_pipe_crc_source *source,
                                 uint32_t *val)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        bool need_stable_symbols = false;
 
        if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
@@ -3936,7 +3952,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
                                         enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
        switch (pipe) {
@@ -3961,7 +3977,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
                                         enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
        if (pipe == PIPE_A)
@@ -4004,7 +4020,7 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
 
 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc =
                to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
        struct intel_crtc_state *pipe_config;
@@ -4072,7 +4088,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
                               enum intel_pipe_crc_source source)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
        struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
                                                                        pipe));
@@ -4579,7 +4595,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 static int pri_wm_latency_show(struct seq_file *m, void *data)
 {
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const uint16_t *latencies;
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4595,7 +4611,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
 static int spr_wm_latency_show(struct seq_file *m, void *data)
 {
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const uint16_t *latencies;
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4611,7 +4627,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
 static int cur_wm_latency_show(struct seq_file *m, void *data)
 {
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const uint16_t *latencies;
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4702,7 +4718,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint16_t *latencies;
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4718,7 +4734,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint16_t *latencies;
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4734,7 +4750,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint16_t *latencies;
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4776,7 +4792,7 @@ static int
 i915_wedged_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        *val = i915_terminally_wedged(&dev_priv->gpu_error);
 
@@ -4787,7 +4803,7 @@ static int
 i915_wedged_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /*
         * There is no safeguard against this debugfs entry colliding
@@ -4814,45 +4830,11 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
                        i915_wedged_get, i915_wedged_set,
                        "%llu\n");
 
-static int
-i915_ring_stop_get(void *data, u64 *val)
-{
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       *val = dev_priv->gpu_error.stop_rings;
-
-       return 0;
-}
-
-static int
-i915_ring_stop_set(void *data, u64 val)
-{
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       dev_priv->gpu_error.stop_rings = val;
-       mutex_unlock(&dev->struct_mutex);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
-                       i915_ring_stop_get, i915_ring_stop_set,
-                       "0x%08llx\n");
-
 static int
 i915_ring_missed_irq_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        *val = dev_priv->gpu_error.missed_irq_rings;
        return 0;
@@ -4862,7 +4844,7 @@ static int
 i915_ring_missed_irq_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        /* Lock against concurrent debugfs callers */
@@ -4883,7 +4865,7 @@ static int
 i915_ring_test_irq_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        *val = dev_priv->gpu_error.test_irq_rings;
 
@@ -4894,18 +4876,11 @@ static int
 i915_ring_test_irq_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
+       val &= INTEL_INFO(dev_priv)->ring_mask;
        DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
-
-       /* Lock against concurrent debugfs callers */
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
        dev_priv->gpu_error.test_irq_rings = val;
-       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
@@ -4934,7 +4909,7 @@ static int
 i915_drop_caches_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -4946,7 +4921,7 @@ i915_drop_caches_set(void *data, u64 val)
                return ret;
 
        if (val & DROP_ACTIVE) {
-               ret = i915_gpu_idle(dev);
+               ret = i915_gem_wait_for_idle(dev_priv);
                if (ret)
                        goto unlock;
        }
@@ -4974,7 +4949,7 @@ static int
 i915_max_freq_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        if (INTEL_INFO(dev)->gen < 6)
@@ -4996,7 +4971,7 @@ static int
 i915_max_freq_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 hw_max, hw_min;
        int ret;
 
@@ -5041,7 +5016,7 @@ static int
 i915_min_freq_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        if (INTEL_INFO(dev)->gen < 6)
@@ -5063,7 +5038,7 @@ static int
 i915_min_freq_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 hw_max, hw_min;
        int ret;
 
@@ -5108,7 +5083,7 @@ static int
 i915_cache_sharing_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 snpcr;
        int ret;
 
@@ -5123,7 +5098,7 @@ i915_cache_sharing_get(void *data, u64 *val)
        snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 
        intel_runtime_pm_put(dev_priv);
-       mutex_unlock(&dev_priv->dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 
        *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
 
@@ -5134,7 +5109,7 @@ static int
 i915_cache_sharing_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 snpcr;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -5171,7 +5146,7 @@ struct sseu_dev_status {
 static void cherryview_sseu_device_status(struct drm_device *dev,
                                          struct sseu_dev_status *stat)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ss_max = 2;
        int ss;
        u32 sig1[ss_max], sig2[ss_max];
@@ -5203,7 +5178,7 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
 static void gen9_sseu_device_status(struct drm_device *dev,
                                    struct sseu_dev_status *stat)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int s_max = 3, ss_max = 4;
        int s, ss;
        u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
@@ -5268,7 +5243,7 @@ static void gen9_sseu_device_status(struct drm_device *dev,
 static void broadwell_sseu_device_status(struct drm_device *dev,
                                         struct sseu_dev_status *stat)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int s;
        u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
 
@@ -5347,7 +5322,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
        struct drm_device *dev = inode->i_private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_INFO(dev)->gen < 6)
                return 0;
@@ -5361,7 +5336,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
 static int i915_forcewake_release(struct inode *inode, struct file *file)
 {
        struct drm_device *dev = inode->i_private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_INFO(dev)->gen < 6)
                return 0;
@@ -5477,7 +5452,6 @@ static const struct i915_debugfs_files {
        {"i915_max_freq", &i915_max_freq_fops},
        {"i915_min_freq", &i915_min_freq_fops},
        {"i915_cache_sharing", &i915_cache_sharing_fops},
-       {"i915_ring_stop", &i915_ring_stop_fops},
        {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
        {"i915_ring_test_irq", &i915_ring_test_irq_fops},
        {"i915_gem_drop_caches", &i915_drop_caches_fops},
@@ -5495,7 +5469,7 @@ static const struct i915_debugfs_files {
 
 void intel_display_crc_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
 
        for_each_pipe(dev_priv, pipe) {
@@ -5507,8 +5481,9 @@ void intel_display_crc_init(struct drm_device *dev)
        }
 }
 
-int i915_debugfs_init(struct drm_minor *minor)
+int i915_debugfs_register(struct drm_i915_private *dev_priv)
 {
+       struct drm_minor *minor = dev_priv->drm.primary;
        int ret, i;
 
        ret = i915_forcewake_create(minor->debugfs_root, minor);
@@ -5534,8 +5509,9 @@ int i915_debugfs_init(struct drm_minor *minor)
                                        minor->debugfs_root, minor);
 }
 
-void i915_debugfs_cleanup(struct drm_minor *minor)
+void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
 {
+       struct drm_minor *minor = dev_priv->drm.primary;
        int i;
 
        drm_debugfs_remove_files(i915_debugfs_list,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
deleted file mode 100644 (file)
index d15a461..0000000
+++ /dev/null
@@ -1,1692 +0,0 @@
-/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
- */
-/*
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_legacy.h>
-#include "intel_drv.h"
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_vgpu.h"
-#include "i915_trace.h"
-#include <linux/pci.h>
-#include <linux/console.h>
-#include <linux/vt.h>
-#include <linux/vgaarb.h>
-#include <linux/acpi.h>
-#include <linux/pnp.h>
-#include <linux/vga_switcheroo.h>
-#include <linux/slab.h>
-#include <acpi/video.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/oom.h>
-
-static unsigned int i915_load_fail_count;
-
-bool __i915_inject_load_failure(const char *func, int line)
-{
-       if (i915_load_fail_count >= i915.inject_load_failure)
-               return false;
-
-       if (++i915_load_fail_count == i915.inject_load_failure) {
-               DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
-                        i915.inject_load_failure, func, line);
-               return true;
-       }
-
-       return false;
-}
-
-#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
-#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
-                   "providing the dmesg log by booting with drm.debug=0xf"
-
-void
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
-             const char *fmt, ...)
-{
-       static bool shown_bug_once;
-       struct device *dev = dev_priv->dev->dev;
-       bool is_error = level[1] <= KERN_ERR[1];
-       bool is_debug = level[1] == KERN_DEBUG[1];
-       struct va_format vaf;
-       va_list args;
-
-       if (is_debug && !(drm_debug & DRM_UT_DRIVER))
-               return;
-
-       va_start(args, fmt);
-
-       vaf.fmt = fmt;
-       vaf.va = &args;
-
-       dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
-                  __builtin_return_address(0), &vaf);
-
-       if (is_error && !shown_bug_once) {
-               dev_notice(dev, "%s", FDO_BUG_MSG);
-               shown_bug_once = true;
-       }
-
-       va_end(args);
-}
-
-static bool i915_error_injected(struct drm_i915_private *dev_priv)
-{
-       return i915.inject_load_failure &&
-              i915_load_fail_count == i915.inject_load_failure;
-}
-
-#define i915_load_error(dev_priv, fmt, ...)                                 \
-       __i915_printk(dev_priv,                                              \
-                     i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
-                     fmt, ##__VA_ARGS__)
-
-static int i915_getparam(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       drm_i915_getparam_t *param = data;
-       int value;
-
-       switch (param->param) {
-       case I915_PARAM_IRQ_ACTIVE:
-       case I915_PARAM_ALLOW_BATCHBUFFER:
-       case I915_PARAM_LAST_DISPATCH:
-               /* Reject all old ums/dri params. */
-               return -ENODEV;
-       case I915_PARAM_CHIPSET_ID:
-               value = dev->pdev->device;
-               break;
-       case I915_PARAM_REVISION:
-               value = dev->pdev->revision;
-               break;
-       case I915_PARAM_HAS_GEM:
-               value = 1;
-               break;
-       case I915_PARAM_NUM_FENCES_AVAIL:
-               value = dev_priv->num_fence_regs;
-               break;
-       case I915_PARAM_HAS_OVERLAY:
-               value = dev_priv->overlay ? 1 : 0;
-               break;
-       case I915_PARAM_HAS_PAGEFLIPPING:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXECBUF2:
-               /* depends on GEM */
-               value = 1;
-               break;
-       case I915_PARAM_HAS_BSD:
-               value = intel_engine_initialized(&dev_priv->engine[VCS]);
-               break;
-       case I915_PARAM_HAS_BLT:
-               value = intel_engine_initialized(&dev_priv->engine[BCS]);
-               break;
-       case I915_PARAM_HAS_VEBOX:
-               value = intel_engine_initialized(&dev_priv->engine[VECS]);
-               break;
-       case I915_PARAM_HAS_BSD2:
-               value = intel_engine_initialized(&dev_priv->engine[VCS2]);
-               break;
-       case I915_PARAM_HAS_RELAXED_FENCING:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_COHERENT_RINGS:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXEC_CONSTANTS:
-               value = INTEL_INFO(dev)->gen >= 4;
-               break;
-       case I915_PARAM_HAS_RELAXED_DELTA:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_GEN7_SOL_RESET:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_LLC:
-               value = HAS_LLC(dev);
-               break;
-       case I915_PARAM_HAS_WT:
-               value = HAS_WT(dev);
-               break;
-       case I915_PARAM_HAS_ALIASING_PPGTT:
-               value = USES_PPGTT(dev);
-               break;
-       case I915_PARAM_HAS_WAIT_TIMEOUT:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_SEMAPHORES:
-               value = i915_semaphore_is_enabled(dev_priv);
-               break;
-       case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_SECURE_BATCHES:
-               value = capable(CAP_SYS_ADMIN);
-               break;
-       case I915_PARAM_HAS_PINNED_BATCHES:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXEC_NO_RELOC:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXEC_HANDLE_LUT:
-               value = 1;
-               break;
-       case I915_PARAM_CMD_PARSER_VERSION:
-               value = i915_cmd_parser_get_version(dev_priv);
-               break;
-       case I915_PARAM_HAS_COHERENT_PHYS_GTT:
-               value = 1;
-               break;
-       case I915_PARAM_MMAP_VERSION:
-               value = 1;
-               break;
-       case I915_PARAM_SUBSLICE_TOTAL:
-               value = INTEL_INFO(dev)->subslice_total;
-               if (!value)
-                       return -ENODEV;
-               break;
-       case I915_PARAM_EU_TOTAL:
-               value = INTEL_INFO(dev)->eu_total;
-               if (!value)
-                       return -ENODEV;
-               break;
-       case I915_PARAM_HAS_GPU_RESET:
-               value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
-               break;
-       case I915_PARAM_HAS_RESOURCE_STREAMER:
-               value = HAS_RESOURCE_STREAMER(dev);
-               break;
-       case I915_PARAM_HAS_EXEC_SOFTPIN:
-               value = 1;
-               break;
-       default:
-               DRM_DEBUG("Unknown parameter %d\n", param->param);
-               return -EINVAL;
-       }
-
-       if (copy_to_user(param->value, &value, sizeof(int))) {
-               DRM_ERROR("copy_to_user failed\n");
-               return -EFAULT;
-       }
-
-       return 0;
-}
-
-static int i915_get_bridge_dev(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
-       if (!dev_priv->bridge_dev) {
-               DRM_ERROR("bridge device not found\n");
-               return -1;
-       }
-       return 0;
-}
-
-/* Allocate space for the MCH regs if needed, return nonzero on error */
-static int
-intel_alloc_mchbar_resource(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-       u32 temp_lo, temp_hi = 0;
-       u64 mchbar_addr;
-       int ret;
-
-       if (INTEL_INFO(dev)->gen >= 4)
-               pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
-       pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
-       mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
-
-       /* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef CONFIG_PNP
-       if (mchbar_addr &&
-           pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
-               return 0;
-#endif
-
-       /* Get some space for it */
-       dev_priv->mch_res.name = "i915 MCHBAR";
-       dev_priv->mch_res.flags = IORESOURCE_MEM;
-       ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
-                                    &dev_priv->mch_res,
-                                    MCHBAR_SIZE, MCHBAR_SIZE,
-                                    PCIBIOS_MIN_MEM,
-                                    0, pcibios_align_resource,
-                                    dev_priv->bridge_dev);
-       if (ret) {
-               DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
-               dev_priv->mch_res.start = 0;
-               return ret;
-       }
-
-       if (INTEL_INFO(dev)->gen >= 4)
-               pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
-                                      upper_32_bits(dev_priv->mch_res.start));
-
-       pci_write_config_dword(dev_priv->bridge_dev, reg,
-                              lower_32_bits(dev_priv->mch_res.start));
-       return 0;
-}
-
-/* Setup MCHBAR if possible, return true if we should disable it again */
-static void
-intel_setup_mchbar(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-       u32 temp;
-       bool enabled;
-
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-               return;
-
-       dev_priv->mchbar_need_disable = false;
-
-       if (IS_I915G(dev) || IS_I915GM(dev)) {
-               pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
-               enabled = !!(temp & DEVEN_MCHBAR_EN);
-       } else {
-               pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
-               enabled = temp & 1;
-       }
-
-       /* If it's already enabled, don't have to do anything */
-       if (enabled)
-               return;
-
-       if (intel_alloc_mchbar_resource(dev))
-               return;
-
-       dev_priv->mchbar_need_disable = true;
-
-       /* Space is allocated or reserved, so enable it. */
-       if (IS_I915G(dev) || IS_I915GM(dev)) {
-               pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
-                                      temp | DEVEN_MCHBAR_EN);
-       } else {
-               pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
-               pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
-       }
-}
-
-static void
-intel_teardown_mchbar(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-
-       if (dev_priv->mchbar_need_disable) {
-               if (IS_I915G(dev) || IS_I915GM(dev)) {
-                       u32 deven_val;
-
-                       pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
-                                             &deven_val);
-                       deven_val &= ~DEVEN_MCHBAR_EN;
-                       pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
-                                              deven_val);
-               } else {
-                       u32 mchbar_val;
-
-                       pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
-                                             &mchbar_val);
-                       mchbar_val &= ~1;
-                       pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
-                                              mchbar_val);
-               }
-       }
-
-       if (dev_priv->mch_res.start)
-               release_resource(&dev_priv->mch_res);
-}
-
-/* true = enable decode, false = disable decoder */
-static unsigned int i915_vga_set_decode(void *cookie, bool state)
-{
-       struct drm_device *dev = cookie;
-
-       intel_modeset_vga_set_state(dev, state);
-       if (state)
-               return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
-                      VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
-       else
-               return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
-}
-
-static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
-{
-       struct drm_device *dev = pci_get_drvdata(pdev);
-       pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
-
-       if (state == VGA_SWITCHEROO_ON) {
-               pr_info("switched on\n");
-               dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               /* i915 resume handler doesn't set to D0 */
-               pci_set_power_state(dev->pdev, PCI_D0);
-               i915_resume_switcheroo(dev);
-               dev->switch_power_state = DRM_SWITCH_POWER_ON;
-       } else {
-               pr_info("switched off\n");
-               dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               i915_suspend_switcheroo(dev, pmm);
-               dev->switch_power_state = DRM_SWITCH_POWER_OFF;
-       }
-}
-
-static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
-{
-       struct drm_device *dev = pci_get_drvdata(pdev);
-
-       /*
-        * FIXME: open_count is protected by drm_global_mutex but that would lead to
-        * locking inversion with the driver load path. And the access here is
-        * completely racy anyway. So don't bother with locking for now.
-        */
-       return dev->open_count == 0;
-}
-
-static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
-       .set_gpu_state = i915_switcheroo_set_state,
-       .reprobe = NULL,
-       .can_switch = i915_switcheroo_can_switch,
-};
-
-static void i915_gem_fini(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       /*
-        * Neither the BIOS, ourselves or any other kernel
-        * expects the system to be in execlists mode on startup,
-        * so we need to reset the GPU back to legacy mode. And the only
-        * known way to disable logical contexts is through a GPU reset.
-        *
-        * So in order to leave the system in a known default configuration,
-        * always reset the GPU upon unload. Afterwards we then clean up the
-        * GEM state tracking, flushing off the requests and leaving the
-        * system in a known idle state.
-        *
-        * Note that is of the upmost importance that the GPU is idle and
-        * all stray writes are flushed *before* we dismantle the backing
-        * storage for the pinned objects.
-        *
-        * However, since we are uncertain that reseting the GPU on older
-        * machines is a good idea, we don't - just in case it leaves the
-        * machine in an unusable condition.
-        */
-       if (HAS_HW_CONTEXTS(dev)) {
-               int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
-               WARN_ON(reset && reset != -ENODEV);
-       }
-
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_reset(dev);
-       i915_gem_cleanup_engines(dev);
-       i915_gem_context_fini(dev);
-       mutex_unlock(&dev->struct_mutex);
-
-       WARN_ON(!list_empty(&to_i915(dev)->context_list));
-}
-
-static int i915_load_modeset_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       if (i915_inject_load_failure())
-               return -ENODEV;
-
-       ret = intel_bios_init(dev_priv);
-       if (ret)
-               DRM_INFO("failed to find VBIOS tables\n");
-
-       /* If we have > 1 VGA cards, then we need to arbitrate access
-        * to the common VGA resources.
-        *
-        * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
-        * then we do not take part in VGA arbitration and the
-        * vga_client_register() fails with -ENODEV.
-        */
-       ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
-       if (ret && ret != -ENODEV)
-               goto out;
-
-       intel_register_dsm_handler();
-
-       ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
-       if (ret)
-               goto cleanup_vga_client;
-
-       /* must happen before intel_power_domains_init_hw() on VLV/CHV */
-       intel_update_rawclk(dev_priv);
-
-       intel_power_domains_init_hw(dev_priv, false);
-
-       intel_csr_ucode_init(dev_priv);
-
-       ret = intel_irq_install(dev_priv);
-       if (ret)
-               goto cleanup_csr;
-
-       intel_setup_gmbus(dev);
-
-       /* Important: The output setup functions called by modeset_init need
-        * working irqs for e.g. gmbus and dp aux transfers. */
-       intel_modeset_init(dev);
-
-       intel_guc_init(dev);
-
-       ret = i915_gem_init(dev);
-       if (ret)
-               goto cleanup_irq;
-
-       intel_modeset_gem_init(dev);
-
-       if (INTEL_INFO(dev)->num_pipes == 0)
-               return 0;
-
-       ret = intel_fbdev_init(dev);
-       if (ret)
-               goto cleanup_gem;
-
-       /* Only enable hotplug handling once the fbdev is fully set up. */
-       intel_hpd_init(dev_priv);
-
-       /*
-        * Some ports require correctly set-up hpd registers for detection to
-        * work properly (leading to ghost connected connector status), e.g. VGA
-        * on gm45.  Hence we can only set up the initial fbdev config after hpd
-        * irqs are fully enabled. Now we should scan for the initial config
-        * only once hotplug handling is enabled, but due to screwed-up locking
-        * around kms/fbdev init we can't protect the fdbev initial config
-        * scanning against hotplug events. Hence do this first and ignore the
-        * tiny window where we will loose hotplug notifactions.
-        */
-       intel_fbdev_initial_config_async(dev);
-
-       drm_kms_helper_poll_init(dev);
-
-       return 0;
-
-cleanup_gem:
-       i915_gem_fini(dev);
-cleanup_irq:
-       intel_guc_fini(dev);
-       drm_irq_uninstall(dev);
-       intel_teardown_gmbus(dev);
-cleanup_csr:
-       intel_csr_ucode_fini(dev_priv);
-       intel_power_domains_fini(dev_priv);
-       vga_switcheroo_unregister_client(dev->pdev);
-cleanup_vga_client:
-       vga_client_register(dev->pdev, NULL, NULL, NULL);
-out:
-       return ret;
-}
-
-#if IS_ENABLED(CONFIG_FB)
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
-{
-       struct apertures_struct *ap;
-       struct pci_dev *pdev = dev_priv->dev->pdev;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       bool primary;
-       int ret;
-
-       ap = alloc_apertures(1);
-       if (!ap)
-               return -ENOMEM;
-
-       ap->ranges[0].base = ggtt->mappable_base;
-       ap->ranges[0].size = ggtt->mappable_end;
-
-       primary =
-               pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-
-       ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
-
-       kfree(ap);
-
-       return ret;
-}
-#else
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
-{
-       return 0;
-}
-#endif
-
-#if !defined(CONFIG_VGA_CONSOLE)
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
-       return 0;
-}
-#elif !defined(CONFIG_DUMMY_CONSOLE)
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
-       return -ENODEV;
-}
-#else
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
-       int ret = 0;
-
-       DRM_INFO("Replacing VGA console driver\n");
-
-       console_lock();
-       if (con_is_bound(&vga_con))
-               ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
-       if (ret == 0) {
-               ret = do_unregister_con_driver(&vga_con);
-
-               /* Ignore "already unregistered". */
-               if (ret == -ENODEV)
-                       ret = 0;
-       }
-       console_unlock();
-
-       return ret;
-}
-#endif
-
-static void i915_dump_device_info(struct drm_i915_private *dev_priv)
-{
-       const struct intel_device_info *info = &dev_priv->info;
-
-#define PRINT_S(name) "%s"
-#define SEP_EMPTY
-#define PRINT_FLAG(name) info->name ? #name "," : ""
-#define SEP_COMMA ,
-       DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
-                        DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
-                        info->gen,
-                        dev_priv->dev->pdev->device,
-                        dev_priv->dev->pdev->revision,
-                        DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
-#undef PRINT_S
-#undef SEP_EMPTY
-#undef PRINT_FLAG
-#undef SEP_COMMA
-}
-
-static void cherryview_sseu_info_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_device_info *info;
-       u32 fuse, eu_dis;
-
-       info = (struct intel_device_info *)&dev_priv->info;
-       fuse = I915_READ(CHV_FUSE_GT);
-
-       info->slice_total = 1;
-
-       if (!(fuse & CHV_FGT_DISABLE_SS0)) {
-               info->subslice_per_slice++;
-               eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
-                                CHV_FGT_EU_DIS_SS0_R1_MASK);
-               info->eu_total += 8 - hweight32(eu_dis);
-       }
-
-       if (!(fuse & CHV_FGT_DISABLE_SS1)) {
-               info->subslice_per_slice++;
-               eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
-                                CHV_FGT_EU_DIS_SS1_R1_MASK);
-               info->eu_total += 8 - hweight32(eu_dis);
-       }
-
-       info->subslice_total = info->subslice_per_slice;
-       /*
-        * CHV expected to always have a uniform distribution of EU
-        * across subslices.
-       */
-       info->eu_per_subslice = info->subslice_total ?
-                               info->eu_total / info->subslice_total :
-                               0;
-       /*
-        * CHV supports subslice power gating on devices with more than
-        * one subslice, and supports EU power gating on devices with
-        * more than one EU pair per subslice.
-       */
-       info->has_slice_pg = 0;
-       info->has_subslice_pg = (info->subslice_total > 1);
-       info->has_eu_pg = (info->eu_per_subslice > 2);
-}
-
-static void gen9_sseu_info_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_device_info *info;
-       int s_max = 3, ss_max = 4, eu_max = 8;
-       int s, ss;
-       u32 fuse2, s_enable, ss_disable, eu_disable;
-       u8 eu_mask = 0xff;
-
-       info = (struct intel_device_info *)&dev_priv->info;
-       fuse2 = I915_READ(GEN8_FUSE2);
-       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
-                  GEN8_F2_S_ENA_SHIFT;
-       ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
-                    GEN9_F2_SS_DIS_SHIFT;
-
-       info->slice_total = hweight32(s_enable);
-       /*
-        * The subslice disable field is global, i.e. it applies
-        * to each of the enabled slices.
-       */
-       info->subslice_per_slice = ss_max - hweight32(ss_disable);
-       info->subslice_total = info->slice_total *
-                              info->subslice_per_slice;
-
-       /*
-        * Iterate through enabled slices and subslices to
-        * count the total enabled EU.
-       */
-       for (s = 0; s < s_max; s++) {
-               if (!(s_enable & (0x1 << s)))
-                       /* skip disabled slice */
-                       continue;
-
-               eu_disable = I915_READ(GEN9_EU_DISABLE(s));
-               for (ss = 0; ss < ss_max; ss++) {
-                       int eu_per_ss;
-
-                       if (ss_disable & (0x1 << ss))
-                               /* skip disabled subslice */
-                               continue;
-
-                       eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
-                                                     eu_mask);
-
-                       /*
-                        * Record which subslice(s) has(have) 7 EUs. we
-                        * can tune the hash used to spread work among
-                        * subslices if they are unbalanced.
-                        */
-                       if (eu_per_ss == 7)
-                               info->subslice_7eu[s] |= 1 << ss;
-
-                       info->eu_total += eu_per_ss;
-               }
-       }
-
-       /*
-        * SKL is expected to always have a uniform distribution
-        * of EU across subslices with the exception that any one
-        * EU in any one subslice may be fused off for die
-        * recovery. BXT is expected to be perfectly uniform in EU
-        * distribution.
-       */
-       info->eu_per_subslice = info->subslice_total ?
-                               DIV_ROUND_UP(info->eu_total,
-                                            info->subslice_total) : 0;
-       /*
-        * SKL supports slice power gating on devices with more than
-        * one slice, and supports EU power gating on devices with
-        * more than one EU pair per subslice. BXT supports subslice
-        * power gating on devices with more than one subslice, and
-        * supports EU power gating on devices with more than one EU
-        * pair per subslice.
-       */
-       info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
-                              (info->slice_total > 1));
-       info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
-       info->has_eu_pg = (info->eu_per_subslice > 2);
-
-       if (IS_BROXTON(dev)) {
-#define IS_SS_DISABLED(_ss_disable, ss)    (_ss_disable & (0x1 << ss))
-               /*
-                * There is a HW issue in 2x6 fused down parts that requires
-                * Pooled EU to be enabled as a WA. The pool configuration
-                * changes depending upon which subslice is fused down. This
-                * doesn't affect if the device has all 3 subslices enabled.
-                */
-               /* WaEnablePooledEuFor2x6:bxt */
-               info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
-                                      (info->subslice_per_slice == 2 &&
-                                       INTEL_REVID(dev) < BXT_REVID_C0));
-
-               info->min_eu_in_pool = 0;
-               if (info->has_pooled_eu) {
-                       if (IS_SS_DISABLED(ss_disable, 0) ||
-                           IS_SS_DISABLED(ss_disable, 2))
-                               info->min_eu_in_pool = 3;
-                       else if (IS_SS_DISABLED(ss_disable, 1))
-                               info->min_eu_in_pool = 6;
-                       else
-                               info->min_eu_in_pool = 9;
-               }
-#undef IS_SS_DISABLED
-       }
-}
-
-static void broadwell_sseu_info_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_device_info *info;
-       const int s_max = 3, ss_max = 3, eu_max = 8;
-       int s, ss;
-       u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
-
-       fuse2 = I915_READ(GEN8_FUSE2);
-       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-       ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
-
-       eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
-       eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
-                       ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
-                        (32 - GEN8_EU_DIS0_S1_SHIFT));
-       eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
-                       ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
-                        (32 - GEN8_EU_DIS1_S2_SHIFT));
-
-
-       info = (struct intel_device_info *)&dev_priv->info;
-       info->slice_total = hweight32(s_enable);
-
-       /*
-        * The subslice disable field is global, i.e. it applies
-        * to each of the enabled slices.
-        */
-       info->subslice_per_slice = ss_max - hweight32(ss_disable);
-       info->subslice_total = info->slice_total * info->subslice_per_slice;
-
-       /*
-        * Iterate through enabled slices and subslices to
-        * count the total enabled EU.
-        */
-       for (s = 0; s < s_max; s++) {
-               if (!(s_enable & (0x1 << s)))
-                       /* skip disabled slice */
-                       continue;
-
-               for (ss = 0; ss < ss_max; ss++) {
-                       u32 n_disabled;
-
-                       if (ss_disable & (0x1 << ss))
-                               /* skip disabled subslice */
-                               continue;
-
-                       n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
-
-                       /*
-                        * Record which subslices have 7 EUs.
-                        */
-                       if (eu_max - n_disabled == 7)
-                               info->subslice_7eu[s] |= 1 << ss;
-
-                       info->eu_total += eu_max - n_disabled;
-               }
-       }
-
-       /*
-        * BDW is expected to always have a uniform distribution of EU across
-        * subslices with the exception that any one EU in any one subslice may
-        * be fused off for die recovery.
-        */
-       info->eu_per_subslice = info->subslice_total ?
-               DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
-
-       /*
-        * BDW supports slice power gating on devices with more than
-        * one slice.
-        */
-       info->has_slice_pg = (info->slice_total > 1);
-       info->has_subslice_pg = 0;
-       info->has_eu_pg = 0;
-}
-
-/*
- * Determine various intel_device_info fields at runtime.
- *
- * Use it when either:
- *   - it's judged too laborious to fill n static structures with the limit
- *     when a simple if statement does the job,
- *   - run-time checks (eg read fuse/strap registers) are needed.
- *
- * This function needs to be called:
- *   - after the MMIO has been setup as we are reading registers,
- *   - after the PCH has been detected,
- *   - before the first usage of the fields it can tweak.
- */
-static void intel_device_info_runtime_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_device_info *info;
-       enum pipe pipe;
-
-       info = (struct intel_device_info *)&dev_priv->info;
-
-       /*
-        * Skylake and Broxton currently don't expose the topmost plane as its
-        * use is exclusive with the legacy cursor and we only want to expose
-        * one of those, not both. Until we can safely expose the topmost plane
-        * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
-        * we don't expose the topmost plane at all to prevent ABI breakage
-        * down the line.
-        */
-       if (IS_BROXTON(dev)) {
-               info->num_sprites[PIPE_A] = 2;
-               info->num_sprites[PIPE_B] = 2;
-               info->num_sprites[PIPE_C] = 1;
-       } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-               for_each_pipe(dev_priv, pipe)
-                       info->num_sprites[pipe] = 2;
-       else
-               for_each_pipe(dev_priv, pipe)
-                       info->num_sprites[pipe] = 1;
-
-       if (i915.disable_display) {
-               DRM_INFO("Display disabled (module parameter)\n");
-               info->num_pipes = 0;
-       } else if (info->num_pipes > 0 &&
-                  (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
-                  HAS_PCH_SPLIT(dev)) {
-               u32 fuse_strap = I915_READ(FUSE_STRAP);
-               u32 sfuse_strap = I915_READ(SFUSE_STRAP);
-
-               /*
-                * SFUSE_STRAP is supposed to have a bit signalling the display
-                * is fused off. Unfortunately it seems that, at least in
-                * certain cases, fused off display means that PCH display
-                * reads don't land anywhere. In that case, we read 0s.
-                *
-                * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
-                * should be set when taking over after the firmware.
-                */
-               if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
-                   sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
-                   (dev_priv->pch_type == PCH_CPT &&
-                    !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
-                       DRM_INFO("Display fused off, disabling\n");
-                       info->num_pipes = 0;
-               } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
-                       DRM_INFO("PipeC fused off\n");
-                       info->num_pipes -= 1;
-               }
-       } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
-               u32 dfsm = I915_READ(SKL_DFSM);
-               u8 disabled_mask = 0;
-               bool invalid;
-               int num_bits;
-
-               if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
-                       disabled_mask |= BIT(PIPE_A);
-               if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
-                       disabled_mask |= BIT(PIPE_B);
-               if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
-                       disabled_mask |= BIT(PIPE_C);
-
-               num_bits = hweight8(disabled_mask);
-
-               switch (disabled_mask) {
-               case BIT(PIPE_A):
-               case BIT(PIPE_B):
-               case BIT(PIPE_A) | BIT(PIPE_B):
-               case BIT(PIPE_A) | BIT(PIPE_C):
-                       invalid = true;
-                       break;
-               default:
-                       invalid = false;
-               }
-
-               if (num_bits > info->num_pipes || invalid)
-                       DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
-                                 disabled_mask);
-               else
-                       info->num_pipes -= num_bits;
-       }
-
-       /* Initialize slice/subslice/EU info */
-       if (IS_CHERRYVIEW(dev))
-               cherryview_sseu_info_init(dev);
-       else if (IS_BROADWELL(dev))
-               broadwell_sseu_info_init(dev);
-       else if (INTEL_INFO(dev)->gen >= 9)
-               gen9_sseu_info_init(dev);
-
-       info->has_snoop = !info->has_llc;
-
-       /* Snooping is broken on BXT A stepping. */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
-               info->has_snoop = false;
-
-       DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
-       DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
-       DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
-       DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
-       DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
-       DRM_DEBUG_DRIVER("Has Pooled EU: %s\n", HAS_POOLED_EU(dev) ? "y" : "n");
-       if (HAS_POOLED_EU(dev))
-               DRM_DEBUG_DRIVER("Min EU in pool: %u\n", info->min_eu_in_pool);
-       DRM_DEBUG_DRIVER("has slice power gating: %s\n",
-                        info->has_slice_pg ? "y" : "n");
-       DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
-                        info->has_subslice_pg ? "y" : "n");
-       DRM_DEBUG_DRIVER("has EU power gating: %s\n",
-                        info->has_eu_pg ? "y" : "n");
-
-       i915.enable_execlists =
-               intel_sanitize_enable_execlists(dev_priv,
-                                               i915.enable_execlists);
-
-       /*
-        * i915.enable_ppgtt is read-only, so do an early pass to validate the
-        * user's requested state against the hardware/driver capabilities.  We
-        * do this now so that we can print out any log messages once rather
-        * than every time we check intel_enable_ppgtt().
-        */
-       i915.enable_ppgtt =
-               intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
-       DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
-}
-
-static void intel_init_dpio(struct drm_i915_private *dev_priv)
-{
-       /*
-        * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
-        * CHV x1 PHY (DP/HDMI D)
-        * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
-        */
-       if (IS_CHERRYVIEW(dev_priv)) {
-               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
-               DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
-       } else if (IS_VALLEYVIEW(dev_priv)) {
-               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
-       }
-}
-
-static int i915_workqueues_init(struct drm_i915_private *dev_priv)
-{
-       /*
-        * The i915 workqueue is primarily used for batched retirement of
-        * requests (and thus managing bo) once the task has been completed
-        * by the GPU. i915_gem_retire_requests() is called directly when we
-        * need high-priority retirement, such as waiting for an explicit
-        * bo.
-        *
-        * It is also used for periodic low-priority events, such as
-        * idle-timers and recording error state.
-        *
-        * All tasks on the workqueue are expected to acquire the dev mutex
-        * so there is no point in running more than one instance of the
-        * workqueue at any time.  Use an ordered one.
-        */
-       dev_priv->wq = alloc_ordered_workqueue("i915", 0);
-       if (dev_priv->wq == NULL)
-               goto out_err;
-
-       dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
-       if (dev_priv->hotplug.dp_wq == NULL)
-               goto out_free_wq;
-
-       dev_priv->gpu_error.hangcheck_wq =
-               alloc_ordered_workqueue("i915-hangcheck", 0);
-       if (dev_priv->gpu_error.hangcheck_wq == NULL)
-               goto out_free_dp_wq;
-
-       return 0;
-
-out_free_dp_wq:
-       destroy_workqueue(dev_priv->hotplug.dp_wq);
-out_free_wq:
-       destroy_workqueue(dev_priv->wq);
-out_err:
-       DRM_ERROR("Failed to allocate workqueues.\n");
-
-       return -ENOMEM;
-}
-
-static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
-{
-       destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
-       destroy_workqueue(dev_priv->hotplug.dp_wq);
-       destroy_workqueue(dev_priv->wq);
-}
-
-/**
- * i915_driver_init_early - setup state not requiring device access
- * @dev_priv: device private
- *
- * Initialize everything that is a "SW-only" state, that is state not
- * requiring accessing the device or exposing the driver via kernel internal
- * or userspace interfaces. Example steps belonging here: lock initialization,
- * system memory allocation, setting up device specific attributes and
- * function hooks not requiring accessing the device.
- */
-static int i915_driver_init_early(struct drm_i915_private *dev_priv,
-                                 struct drm_device *dev,
-                                 struct intel_device_info *info)
-{
-       struct intel_device_info *device_info;
-       int ret = 0;
-
-       if (i915_inject_load_failure())
-               return -ENODEV;
-
-       /* Setup the write-once "constant" device info */
-       device_info = (struct intel_device_info *)&dev_priv->info;
-       memcpy(device_info, info, sizeof(dev_priv->info));
-       device_info->device_id = dev->pdev->device;
-
-       BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
-       device_info->gen_mask = BIT(device_info->gen - 1);
-
-       spin_lock_init(&dev_priv->irq_lock);
-       spin_lock_init(&dev_priv->gpu_error.lock);
-       mutex_init(&dev_priv->backlight_lock);
-       spin_lock_init(&dev_priv->uncore.lock);
-       spin_lock_init(&dev_priv->mm.object_stat_lock);
-       spin_lock_init(&dev_priv->mmio_flip_lock);
-       mutex_init(&dev_priv->sb_lock);
-       mutex_init(&dev_priv->modeset_restore_lock);
-       mutex_init(&dev_priv->av_mutex);
-       mutex_init(&dev_priv->wm.wm_mutex);
-       mutex_init(&dev_priv->pps_mutex);
-
-       ret = i915_workqueues_init(dev_priv);
-       if (ret < 0)
-               return ret;
-
-       ret = intel_gvt_init(dev_priv);
-       if (ret < 0)
-               goto err_workqueues;
-
-       /* This must be called before any calls to HAS_PCH_* */
-       intel_detect_pch(dev);
-
-       intel_pm_setup(dev);
-       intel_init_dpio(dev_priv);
-       intel_power_domains_init(dev_priv);
-       intel_irq_init(dev_priv);
-       intel_init_display_hooks(dev_priv);
-       intel_init_clock_gating_hooks(dev_priv);
-       intel_init_audio_hooks(dev_priv);
-       i915_gem_load_init(dev);
-
-       intel_display_crc_init(dev);
-
-       i915_dump_device_info(dev_priv);
-
-       /* Not all pre-production machines fall into this category, only the
-        * very first ones. Almost everything should work, except for maybe
-        * suspend/resume. And we don't implement workarounds that affect only
-        * pre-production machines. */
-       if (IS_HSW_EARLY_SDV(dev))
-               DRM_INFO("This is an early pre-production Haswell machine. "
-                        "It may not be fully functional.\n");
-
-       return 0;
-
-err_workqueues:
-       i915_workqueues_cleanup(dev_priv);
-       return ret;
-}
-
-/**
- * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
- * @dev_priv: device private
- */
-static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
-{
-       i915_gem_load_cleanup(dev_priv->dev);
-       i915_workqueues_cleanup(dev_priv);
-}
-
-static int i915_mmio_setup(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int mmio_bar;
-       int mmio_size;
-
-       mmio_bar = IS_GEN2(dev) ? 1 : 0;
-       /*
-        * Before gen4, the registers and the GTT are behind different BARs.
-        * However, from gen4 onwards, the registers and the GTT are shared
-        * in the same BAR, so we want to restrict this ioremap from
-        * clobbering the GTT which we want ioremap_wc instead. Fortunately,
-        * the register BAR remains the same size for all the earlier
-        * generations up to Ironlake.
-        */
-       if (INTEL_INFO(dev)->gen < 5)
-               mmio_size = 512 * 1024;
-       else
-               mmio_size = 2 * 1024 * 1024;
-       dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
-       if (dev_priv->regs == NULL) {
-               DRM_ERROR("failed to map registers\n");
-
-               return -EIO;
-       }
-
-       /* Try to make sure MCHBAR is enabled before poking at it */
-       intel_setup_mchbar(dev);
-
-       return 0;
-}
-
-static void i915_mmio_cleanup(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       intel_teardown_mchbar(dev);
-       pci_iounmap(dev->pdev, dev_priv->regs);
-}
-
-/**
- * i915_driver_init_mmio - setup device MMIO
- * @dev_priv: device private
- *
- * Setup minimal device state necessary for MMIO accesses later in the
- * initialization sequence. The setup here should avoid any other device-wide
- * side effects or exposing the driver via kernel internal or user space
- * interfaces.
- */
-static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       int ret;
-
-       if (i915_inject_load_failure())
-               return -ENODEV;
-
-       if (i915_get_bridge_dev(dev))
-               return -EIO;
-
-       ret = i915_mmio_setup(dev);
-       if (ret < 0)
-               goto put_bridge;
-
-       intel_uncore_init(dev_priv);
-
-       return 0;
-
-put_bridge:
-       pci_dev_put(dev_priv->bridge_dev);
-
-       return ret;
-}
-
-/**
- * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
- * @dev_priv: device private
- */
-static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-
-       intel_uncore_fini(dev_priv);
-       i915_mmio_cleanup(dev);
-       pci_dev_put(dev_priv->bridge_dev);
-}
-
-/**
- * i915_driver_init_hw - setup state requiring device access
- * @dev_priv: device private
- *
- * Setup state that requires accessing the device, but doesn't require
- * exposing the driver via kernel internal or userspace interfaces.
- */
-static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       uint32_t aperture_size;
-       int ret;
-
-       if (i915_inject_load_failure())
-               return -ENODEV;
-
-       intel_device_info_runtime_init(dev);
-
-       ret = i915_ggtt_init_hw(dev);
-       if (ret)
-               return ret;
-
-       ret = i915_ggtt_enable_hw(dev);
-       if (ret) {
-               DRM_ERROR("failed to enable GGTT\n");
-               goto out_ggtt;
-       }
-
-       /* WARNING: Apparently we must kick fbdev drivers before vgacon,
-        * otherwise the vga fbdev driver falls over. */
-       ret = i915_kick_out_firmware_fb(dev_priv);
-       if (ret) {
-               DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
-               goto out_ggtt;
-       }
-
-       ret = i915_kick_out_vgacon(dev_priv);
-       if (ret) {
-               DRM_ERROR("failed to remove conflicting VGA console\n");
-               goto out_ggtt;
-       }
-
-       pci_set_master(dev->pdev);
-
-       /* overlay on gen2 is broken and can't address above 1G */
-       if (IS_GEN2(dev)) {
-               ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
-               if (ret) {
-                       DRM_ERROR("failed to set DMA mask\n");
-
-                       goto out_ggtt;
-               }
-       }
-
-
-       /* 965GM sometimes incorrectly writes to hardware status page (HWS)
-        * using 32bit addressing, overwriting memory if HWS is located
-        * above 4GB.
-        *
-        * The documentation also mentions an issue with undefined
-        * behaviour if any general state is accessed within a page above 4GB,
-        * which also needs to be handled carefully.
-        */
-       if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
-               ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
-
-               if (ret) {
-                       DRM_ERROR("failed to set DMA mask\n");
-
-                       goto out_ggtt;
-               }
-       }
-
-       aperture_size = ggtt->mappable_end;
-
-       ggtt->mappable =
-               io_mapping_create_wc(ggtt->mappable_base,
-                                    aperture_size);
-       if (!ggtt->mappable) {
-               ret = -EIO;
-               goto out_ggtt;
-       }
-
-       ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
-                                             aperture_size);
-
-       pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
-                          PM_QOS_DEFAULT_VALUE);
-
-       intel_uncore_sanitize(dev_priv);
-
-       intel_opregion_setup(dev_priv);
-
-       i915_gem_load_init_fences(dev_priv);
-
-       /* On the 945G/GM, the chipset reports the MSI capability on the
-        * integrated graphics even though the support isn't actually there
-        * according to the published specs.  It doesn't appear to function
-        * correctly in testing on 945G.
-        * This may be a side effect of MSI having been made available for PEG
-        * and the registers being closely associated.
-        *
-        * According to chipset errata, on the 965GM, MSI interrupts may
-        * be lost or delayed, but we use them anyways to avoid
-        * stuck interrupts on some machines.
-        */
-       if (!IS_I945G(dev) && !IS_I945GM(dev)) {
-               if (pci_enable_msi(dev->pdev) < 0)
-                       DRM_DEBUG_DRIVER("can't enable MSI");
-       }
-
-       return 0;
-
-out_ggtt:
-       i915_ggtt_cleanup_hw(dev);
-
-       return ret;
-}
-
-/**
- * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
- * @dev_priv: device private
- */
-static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-       if (dev->pdev->msi_enabled)
-               pci_disable_msi(dev->pdev);
-
-       pm_qos_remove_request(&dev_priv->pm_qos);
-       arch_phys_wc_del(ggtt->mtrr);
-       io_mapping_free(ggtt->mappable);
-       i915_ggtt_cleanup_hw(dev);
-}
-
-/**
- * i915_driver_register - register the driver with the rest of the system
- * @dev_priv: device private
- *
- * Perform any steps necessary to make the driver available via kernel
- * internal or userspace interfaces.
- */
-static void i915_driver_register(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-
-       i915_gem_shrinker_init(dev_priv);
-       /*
-        * Notify a valid surface after modesetting,
-        * when running inside a VM.
-        */
-       if (intel_vgpu_active(dev_priv))
-               I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
-
-       i915_setup_sysfs(dev);
-
-       if (INTEL_INFO(dev_priv)->num_pipes) {
-               /* Must be done after probing outputs */
-               intel_opregion_register(dev_priv);
-               acpi_video_register();
-       }
-
-       if (IS_GEN5(dev_priv))
-               intel_gpu_ips_init(dev_priv);
-
-       i915_audio_component_init(dev_priv);
-}
-
-/**
- * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
- * @dev_priv: device private
- */
-static void i915_driver_unregister(struct drm_i915_private *dev_priv)
-{
-       i915_audio_component_cleanup(dev_priv);
-       intel_gpu_ips_teardown();
-       acpi_video_unregister();
-       intel_opregion_unregister(dev_priv);
-       i915_teardown_sysfs(dev_priv->dev);
-       i915_gem_shrinker_cleanup(dev_priv);
-}
-
-/**
- * i915_driver_load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
- *
- * The driver load routine has to do several things:
- *   - drive output discovery via intel_modeset_init()
- *   - initialize the memory manager
- *   - allocate initial config memory
- *   - setup the DRM framebuffer with the allocated memory
- */
-int i915_driver_load(struct drm_device *dev, unsigned long flags)
-{
-       struct drm_i915_private *dev_priv;
-       int ret = 0;
-
-       dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
-       if (dev_priv == NULL)
-               return -ENOMEM;
-
-       dev->dev_private = dev_priv;
-       /* Must be set before calling __i915_printk */
-       dev_priv->dev = dev;
-
-       ret = i915_driver_init_early(dev_priv, dev,
-                                    (struct intel_device_info *)flags);
-
-       if (ret < 0)
-               goto out_free_priv;
-
-       intel_runtime_pm_get(dev_priv);
-
-       ret = i915_driver_init_mmio(dev_priv);
-       if (ret < 0)
-               goto out_runtime_pm_put;
-
-       ret = i915_driver_init_hw(dev_priv);
-       if (ret < 0)
-               goto out_cleanup_mmio;
-
-       /*
-        * TODO: move the vblank init and parts of modeset init steps into one
-        * of the i915_driver_init_/i915_driver_register functions according
-        * to the role/effect of the given init step.
-        */
-       if (INTEL_INFO(dev)->num_pipes) {
-               ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
-               if (ret)
-                       goto out_cleanup_hw;
-       }
-
-       ret = i915_load_modeset_init(dev);
-       if (ret < 0)
-               goto out_cleanup_vblank;
-
-       i915_driver_register(dev_priv);
-
-       intel_runtime_pm_enable(dev_priv);
-
-       intel_runtime_pm_put(dev_priv);
-
-       return 0;
-
-out_cleanup_vblank:
-       drm_vblank_cleanup(dev);
-out_cleanup_hw:
-       i915_driver_cleanup_hw(dev_priv);
-out_cleanup_mmio:
-       i915_driver_cleanup_mmio(dev_priv);
-out_runtime_pm_put:
-       intel_runtime_pm_put(dev_priv);
-       i915_driver_cleanup_early(dev_priv);
-out_free_priv:
-       i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
-
-       kfree(dev_priv);
-
-       return ret;
-}
-
-int i915_driver_unload(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       intel_fbdev_fini(dev);
-
-       intel_gvt_cleanup(dev_priv);
-
-       ret = i915_gem_suspend(dev);
-       if (ret) {
-               DRM_ERROR("failed to idle hardware: %d\n", ret);
-               return ret;
-       }
-
-       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
-       i915_driver_unregister(dev_priv);
-
-       drm_vblank_cleanup(dev);
-
-       intel_modeset_cleanup(dev);
-
-       /*
-        * free the memory space allocated for the child device
-        * config parsed from VBT
-        */
-       if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
-               kfree(dev_priv->vbt.child_dev);
-               dev_priv->vbt.child_dev = NULL;
-               dev_priv->vbt.child_dev_num = 0;
-       }
-       kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
-       dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
-       kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
-       dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
-
-       vga_switcheroo_unregister_client(dev->pdev);
-       vga_client_register(dev->pdev, NULL, NULL, NULL);
-
-       intel_csr_ucode_fini(dev_priv);
-
-       /* Free error state after interrupts are fully disabled. */
-       cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-       i915_destroy_error_state(dev);
-
-       /* Flush any outstanding unpin_work. */
-       flush_workqueue(dev_priv->wq);
-
-       intel_guc_fini(dev);
-       i915_gem_fini(dev);
-       intel_fbc_cleanup_cfb(dev_priv);
-
-       intel_power_domains_fini(dev_priv);
-
-       i915_driver_cleanup_hw(dev_priv);
-       i915_driver_cleanup_mmio(dev_priv);
-
-       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
-       i915_driver_cleanup_early(dev_priv);
-       kfree(dev_priv);
-
-       return 0;
-}
-
-int i915_driver_open(struct drm_device *dev, struct drm_file *file)
-{
-       int ret;
-
-       ret = i915_gem_open(dev, file);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-/**
- * i915_driver_lastclose - clean up after all DRM clients have exited
- * @dev: DRM device
- *
- * Take care of cleaning up after all DRM clients have exited.  In the
- * mode setting case, we want to restore the kernel's initial mode (just
- * in case the last client left us in a bad state).
- *
- * Additionally, in the non-mode setting case, we'll tear down the GTT
- * and DMA structures, since the kernel won't be using them, and clea
- * up any GEM state.
- */
-void i915_driver_lastclose(struct drm_device *dev)
-{
-       intel_fbdev_restore_mode(dev);
-       vga_switcheroo_process_delayed_switch();
-}
-
-void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
-{
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_context_close(dev, file);
-       i915_gem_release(dev, file);
-       mutex_unlock(&dev->struct_mutex);
-}
-
-void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
-{
-       struct drm_i915_file_private *file_priv = file->driver_priv;
-
-       kfree(file_priv);
-}
-
-static int
-i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
-                         struct drm_file *file)
-{
-       return -ENODEV;
-}
-
-const struct drm_ioctl_desc i915_ioctls[] = {
-       DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
-};
-
-int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
index 3eb47fbcea73c634bdba9a7c857a91909f520c1c..b9a811750ca855f3ab9331c8abf6a88d1b9d8882 100644 (file)
  *
  */
 
-#include <linux/device.h>
 #include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/oom.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/vt.h>
+#include <acpi/video.h>
+
 #include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
 #include <drm/i915_drm.h>
+
 #include "i915_drv.h"
 #include "i915_trace.h"
+#include "i915_vgpu.h"
 #include "intel_drv.h"
 
-#include <linux/console.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/vga_switcheroo.h>
-#include <drm/drm_crtc_helper.h>
-
 static struct drm_driver driver;
 
-#define GEN_DEFAULT_PIPEOFFSETS \
-       .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
-                         PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
-       .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
-                          TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
-       .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
-
-#define GEN_CHV_PIPEOFFSETS \
-       .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
-                         CHV_PIPE_C_OFFSET }, \
-       .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
-                          CHV_TRANSCODER_C_OFFSET, }, \
-       .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
-                            CHV_PALETTE_C_OFFSET }
-
-#define CURSOR_OFFSETS \
-       .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
-
-#define IVB_CURSOR_OFFSETS \
-       .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
-
-#define BDW_COLORS \
-       .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
-#define CHV_COLORS \
-       .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
-
-static const struct intel_device_info intel_i830_info = {
-       .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+static unsigned int i915_load_fail_count;
 
-static const struct intel_device_info intel_845g_info = {
-       .gen = 2, .num_pipes = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+bool __i915_inject_load_failure(const char *func, int line)
+{
+       if (i915_load_fail_count >= i915.inject_load_failure)
+               return false;
 
-static const struct intel_device_info intel_i85x_info = {
-       .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
-       .cursor_needs_physical = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       if (++i915_load_fail_count == i915.inject_load_failure) {
+               DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
+                        i915.inject_load_failure, func, line);
+               return true;
+       }
+
+       return false;
+}
+
+#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
+#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
+                   "providing the dmesg log by booting with drm.debug=0xf"
+
+void
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+             const char *fmt, ...)
+{
+       static bool shown_bug_once;
+       struct device *dev = dev_priv->drm.dev;
+       bool is_error = level[1] <= KERN_ERR[1];
+       bool is_debug = level[1] == KERN_DEBUG[1];
+       struct va_format vaf;
+       va_list args;
+
+       if (is_debug && !(drm_debug & DRM_UT_DRIVER))
+               return;
+
+       va_start(args, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &args;
+
+       dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+                  __builtin_return_address(0), &vaf);
+
+       if (is_error && !shown_bug_once) {
+               dev_notice(dev, "%s", FDO_BUG_MSG);
+               shown_bug_once = true;
+       }
+
+       va_end(args);
+}
+
+static bool i915_error_injected(struct drm_i915_private *dev_priv)
+{
+       return i915.inject_load_failure &&
+              i915_load_fail_count == i915.inject_load_failure;
+}
+
+#define i915_load_error(dev_priv, fmt, ...)                                 \
+       __i915_printk(dev_priv,                                              \
+                     i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
+                     fmt, ##__VA_ARGS__)
+
+
+static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
+{
+       enum intel_pch ret = PCH_NOP;
+
+       /*
+        * In a virtualized passthrough environment we can be in a
+        * setup where the ISA bridge is not able to be passed through.
+        * In this case, a south bridge can be emulated and we have to
+        * make an educated guess as to which PCH is really there.
+        */
+
+       if (IS_GEN5(dev)) {
+               ret = PCH_IBX;
+               DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
+       } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+               ret = PCH_CPT;
+               DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
+       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+               ret = PCH_LPT;
+               DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
+       } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+               ret = PCH_SPT;
+               DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
+       }
+
+       return ret;
+}
+
+static void intel_detect_pch(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pch = NULL;
 
-static const struct intel_device_info intel_i865g_info = {
-       .gen = 2, .num_pipes = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
+        * (which really amounts to a PCH but no South Display).
+        */
+       if (INTEL_INFO(dev)->num_pipes == 0) {
+               dev_priv->pch_type = PCH_NOP;
+               return;
+       }
+
+       /*
+        * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+        * make graphics device passthrough work easy for VMM, that only
+        * need to expose ISA bridge to let driver know the real hardware
+        * underneath. This is a requirement from virtualization team.
+        *
+        * In some virtualized environments (e.g. XEN), there is irrelevant
+        * ISA bridge in the system. To work reliably, we should scan trhough
+        * all the ISA bridge devices and check for the first match, instead
+        * of only checking the first one.
+        */
+       while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+               if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+                       unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+                       dev_priv->pch_id = id;
+
+                       if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_IBX;
+                               DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+                               WARN_ON(!IS_GEN5(dev));
+                       } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_CPT;
+                               DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+                               WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+                       } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
+                               /* PantherPoint is CPT compatible */
+                               dev_priv->pch_type = PCH_CPT;
+                               DRM_DEBUG_KMS("Found PantherPoint PCH\n");
+                               WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+                       } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_LPT;
+                               DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+                               WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
+                       } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_LPT;
+                               DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+                               WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
+                       } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_SPT;
+                               DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+                               WARN_ON(!IS_SKYLAKE(dev) &&
+                                       !IS_KABYLAKE(dev));
+                       } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_SPT;
+                               DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+                               WARN_ON(!IS_SKYLAKE(dev) &&
+                                       !IS_KABYLAKE(dev));
+                       } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_KBP;
+                               DRM_DEBUG_KMS("Found KabyPoint PCH\n");
+                               WARN_ON(!IS_KABYLAKE(dev));
+                       } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+                                  (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
+                                  ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+                                   pch->subsystem_vendor ==
+                                           PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+                                   pch->subsystem_device ==
+                                           PCI_SUBDEVICE_ID_QEMU)) {
+                               dev_priv->pch_type = intel_virt_detect_pch(dev);
+                       } else
+                               continue;
+
+                       break;
+               }
+       }
+       if (!pch)
+               DRM_DEBUG_KMS("No PCH found.\n");
+
+       pci_dev_put(pch);
+}
+
+bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
+{
+       if (INTEL_GEN(dev_priv) < 6)
+               return false;
+
+       if (i915.semaphores >= 0)
+               return i915.semaphores;
+
+       /* TODO: make semaphores and Execlists play nicely together */
+       if (i915.enable_execlists)
+               return false;
+
+#ifdef CONFIG_INTEL_IOMMU
+       /* Enable semaphores on SNB when IO remapping is off */
+       if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
+               return false;
+#endif
+
+       return true;
+}
+
+static int i915_getparam(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       drm_i915_getparam_t *param = data;
+       int value;
+
+       switch (param->param) {
+       case I915_PARAM_IRQ_ACTIVE:
+       case I915_PARAM_ALLOW_BATCHBUFFER:
+       case I915_PARAM_LAST_DISPATCH:
+               /* Reject all old ums/dri params. */
+               return -ENODEV;
+       case I915_PARAM_CHIPSET_ID:
+               value = dev->pdev->device;
+               break;
+       case I915_PARAM_REVISION:
+               value = dev->pdev->revision;
+               break;
+       case I915_PARAM_HAS_GEM:
+               value = 1;
+               break;
+       case I915_PARAM_NUM_FENCES_AVAIL:
+               value = dev_priv->num_fence_regs;
+               break;
+       case I915_PARAM_HAS_OVERLAY:
+               value = dev_priv->overlay ? 1 : 0;
+               break;
+       case I915_PARAM_HAS_PAGEFLIPPING:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_EXECBUF2:
+               /* depends on GEM */
+               value = 1;
+               break;
+       case I915_PARAM_HAS_BSD:
+               value = intel_engine_initialized(&dev_priv->engine[VCS]);
+               break;
+       case I915_PARAM_HAS_BLT:
+               value = intel_engine_initialized(&dev_priv->engine[BCS]);
+               break;
+       case I915_PARAM_HAS_VEBOX:
+               value = intel_engine_initialized(&dev_priv->engine[VECS]);
+               break;
+       case I915_PARAM_HAS_BSD2:
+               value = intel_engine_initialized(&dev_priv->engine[VCS2]);
+               break;
+       case I915_PARAM_HAS_RELAXED_FENCING:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_COHERENT_RINGS:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_EXEC_CONSTANTS:
+               value = INTEL_INFO(dev)->gen >= 4;
+               break;
+       case I915_PARAM_HAS_RELAXED_DELTA:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_GEN7_SOL_RESET:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_LLC:
+               value = HAS_LLC(dev);
+               break;
+       case I915_PARAM_HAS_WT:
+               value = HAS_WT(dev);
+               break;
+       case I915_PARAM_HAS_ALIASING_PPGTT:
+               value = USES_PPGTT(dev);
+               break;
+       case I915_PARAM_HAS_WAIT_TIMEOUT:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_SEMAPHORES:
+               value = i915_semaphore_is_enabled(dev_priv);
+               break;
+       case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_SECURE_BATCHES:
+               value = capable(CAP_SYS_ADMIN);
+               break;
+       case I915_PARAM_HAS_PINNED_BATCHES:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_EXEC_NO_RELOC:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+               value = 1;
+               break;
+       case I915_PARAM_CMD_PARSER_VERSION:
+               value = i915_cmd_parser_get_version(dev_priv);
+               break;
+       case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+               value = 1;
+               break;
+       case I915_PARAM_MMAP_VERSION:
+               value = 1;
+               break;
+       case I915_PARAM_SUBSLICE_TOTAL:
+               value = INTEL_INFO(dev)->subslice_total;
+               if (!value)
+                       return -ENODEV;
+               break;
+       case I915_PARAM_EU_TOTAL:
+               value = INTEL_INFO(dev)->eu_total;
+               if (!value)
+                       return -ENODEV;
+               break;
+       case I915_PARAM_HAS_GPU_RESET:
+               value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
+               break;
+       case I915_PARAM_HAS_RESOURCE_STREAMER:
+               value = HAS_RESOURCE_STREAMER(dev);
+               break;
+       case I915_PARAM_HAS_EXEC_SOFTPIN:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_POOLED_EU:
+               value = HAS_POOLED_EU(dev);
+               break;
+       case I915_PARAM_MIN_EU_IN_POOL:
+               value = INTEL_INFO(dev)->min_eu_in_pool;
+               break;
+       default:
+               DRM_DEBUG("Unknown parameter %d\n", param->param);
+               return -EINVAL;
+       }
+
+       if (put_user(value, param->value))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int i915_get_bridge_dev(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+       if (!dev_priv->bridge_dev) {
+               DRM_ERROR("bridge device not found\n");
+               return -1;
+       }
+       return 0;
+}
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+       u32 temp_lo, temp_hi = 0;
+       u64 mchbar_addr;
+       int ret;
+
+       if (INTEL_INFO(dev)->gen >= 4)
+               pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+       pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+       mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+       /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+       if (mchbar_addr &&
+           pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+               return 0;
+#endif
+
+       /* Get some space for it */
+       dev_priv->mch_res.name = "i915 MCHBAR";
+       dev_priv->mch_res.flags = IORESOURCE_MEM;
+       ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+                                    &dev_priv->mch_res,
+                                    MCHBAR_SIZE, MCHBAR_SIZE,
+                                    PCIBIOS_MIN_MEM,
+                                    0, pcibios_align_resource,
+                                    dev_priv->bridge_dev);
+       if (ret) {
+               DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+               dev_priv->mch_res.start = 0;
+               return ret;
+       }
+
+       if (INTEL_INFO(dev)->gen >= 4)
+               pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+                                      upper_32_bits(dev_priv->mch_res.start));
+
+       pci_write_config_dword(dev_priv->bridge_dev, reg,
+                              lower_32_bits(dev_priv->mch_res.start));
+       return 0;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+static void
+intel_setup_mchbar(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+       u32 temp;
+       bool enabled;
+
+       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+               return;
+
+       dev_priv->mchbar_need_disable = false;
+
+       if (IS_I915G(dev) || IS_I915GM(dev)) {
+               pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
+               enabled = !!(temp & DEVEN_MCHBAR_EN);
+       } else {
+               pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+               enabled = temp & 1;
+       }
+
+       /* If it's already enabled, don't have to do anything */
+       if (enabled)
+               return;
+
+       if (intel_alloc_mchbar_resource(dev))
+               return;
+
+       dev_priv->mchbar_need_disable = true;
+
+       /* Space is allocated or reserved, so enable it. */
+       if (IS_I915G(dev) || IS_I915GM(dev)) {
+               pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+                                      temp | DEVEN_MCHBAR_EN);
+       } else {
+               pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+               pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
+       }
+}
+
+static void
+intel_teardown_mchbar(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+       if (dev_priv->mchbar_need_disable) {
+               if (IS_I915G(dev) || IS_I915GM(dev)) {
+                       u32 deven_val;
+
+                       pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
+                                             &deven_val);
+                       deven_val &= ~DEVEN_MCHBAR_EN;
+                       pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+                                              deven_val);
+               } else {
+                       u32 mchbar_val;
+
+                       pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
+                                             &mchbar_val);
+                       mchbar_val &= ~1;
+                       pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
+                                              mchbar_val);
+               }
+       }
+
+       if (dev_priv->mch_res.start)
+               release_resource(&dev_priv->mch_res);
+}
+
+/* true = enable decode, false = disable decoder */
+static unsigned int i915_vga_set_decode(void *cookie, bool state)
+{
+       struct drm_device *dev = cookie;
+
+       intel_modeset_vga_set_state(dev, state);
+       if (state)
+               return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+                      VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+       else
+               return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+       if (state == VGA_SWITCHEROO_ON) {
+               pr_info("switched on\n");
+               dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+               /* i915 resume handler doesn't set to D0 */
+               pci_set_power_state(dev->pdev, PCI_D0);
+               i915_resume_switcheroo(dev);
+               dev->switch_power_state = DRM_SWITCH_POWER_ON;
+       } else {
+               pr_info("switched off\n");
+               dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+               i915_suspend_switcheroo(dev, pmm);
+               dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+       }
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       /*
+        * FIXME: open_count is protected by drm_global_mutex but that would lead to
+        * locking inversion with the driver load path. And the access here is
+        * completely racy anyway. So don't bother with locking for now.
+        */
+       return dev->open_count == 0;
+}
+
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+       .set_gpu_state = i915_switcheroo_set_state,
+       .reprobe = NULL,
+       .can_switch = i915_switcheroo_can_switch,
 };
 
-static const struct intel_device_info intel_i915g_info = {
-       .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
-static const struct intel_device_info intel_i915gm_info = {
-       .gen = 3, .is_mobile = 1, .num_pipes = 2,
-       .cursor_needs_physical = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .supports_tv = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
-static const struct intel_device_info intel_i945g_info = {
-       .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
-static const struct intel_device_info intel_i945gm_info = {
-       .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
-       .has_hotplug = 1, .cursor_needs_physical = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .supports_tv = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+static void i915_gem_fini(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       /*
+        * Neither the BIOS, ourselves or any other kernel
+        * expects the system to be in execlists mode on startup,
+        * so we need to reset the GPU back to legacy mode. And the only
+        * known way to disable logical contexts is through a GPU reset.
+        *
+        * So in order to leave the system in a known default configuration,
+        * always reset the GPU upon unload. Afterwards we then clean up the
+        * GEM state tracking, flushing off the requests and leaving the
+        * system in a known idle state.
+        *
+        * Note that is of the upmost importance that the GPU is idle and
+        * all stray writes are flushed *before* we dismantle the backing
+        * storage for the pinned objects.
+        *
+        * However, since we are uncertain that reseting the GPU on older
+        * machines is a good idea, we don't - just in case it leaves the
+        * machine in an unusable condition.
+        */
+       if (HAS_HW_CONTEXTS(dev)) {
+               int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
+               WARN_ON(reset && reset != -ENODEV);
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_reset(dev);
+       i915_gem_cleanup_engines(dev);
+       i915_gem_context_fini(dev);
+       mutex_unlock(&dev->struct_mutex);
+
+       WARN_ON(!list_empty(&to_i915(dev)->context_list));
+}
+
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int ret;
+
+       if (i915_inject_load_failure())
+               return -ENODEV;
+
+       ret = intel_bios_init(dev_priv);
+       if (ret)
+               DRM_INFO("failed to find VBIOS tables\n");
+
+       /* If we have > 1 VGA cards, then we need to arbitrate access
+        * to the common VGA resources.
+        *
+        * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+        * then we do not take part in VGA arbitration and the
+        * vga_client_register() fails with -ENODEV.
+        */
+       ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+       if (ret && ret != -ENODEV)
+               goto out;
+
+       intel_register_dsm_handler();
+
+       ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
+       if (ret)
+               goto cleanup_vga_client;
+
+       /* must happen before intel_power_domains_init_hw() on VLV/CHV */
+       intel_update_rawclk(dev_priv);
+
+       intel_power_domains_init_hw(dev_priv, false);
+
+       intel_csr_ucode_init(dev_priv);
+
+       ret = intel_irq_install(dev_priv);
+       if (ret)
+               goto cleanup_csr;
+
+       intel_setup_gmbus(dev);
+
+       /* Important: The output setup functions called by modeset_init need
+        * working irqs for e.g. gmbus and dp aux transfers. */
+       intel_modeset_init(dev);
+
+       intel_guc_init(dev);
+
+       ret = i915_gem_init(dev);
+       if (ret)
+               goto cleanup_irq;
+
+       intel_modeset_gem_init(dev);
+
+       if (INTEL_INFO(dev)->num_pipes == 0)
+               return 0;
+
+       ret = intel_fbdev_init(dev);
+       if (ret)
+               goto cleanup_gem;
+
+       /* Only enable hotplug handling once the fbdev is fully set up. */
+       intel_hpd_init(dev_priv);
+
+       drm_kms_helper_poll_init(dev);
+
+       return 0;
+
+cleanup_gem:
+       i915_gem_fini(dev);
+cleanup_irq:
+       intel_guc_fini(dev);
+       drm_irq_uninstall(dev);
+       intel_teardown_gmbus(dev);
+cleanup_csr:
+       intel_csr_ucode_fini(dev_priv);
+       intel_power_domains_fini(dev_priv);
+       vga_switcheroo_unregister_client(dev->pdev);
+cleanup_vga_client:
+       vga_client_register(dev->pdev, NULL, NULL, NULL);
+out:
+       return ret;
+}
+
+#if IS_ENABLED(CONFIG_FB)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+       struct apertures_struct *ap;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       bool primary;
+       int ret;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return -ENOMEM;
+
+       ap->ranges[0].base = ggtt->mappable_base;
+       ap->ranges[0].size = ggtt->mappable_end;
+
+       primary =
+               pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+       ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+       kfree(ap);
+
+       return ret;
+}
+#else
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+       return 0;
+}
+#endif
+
+#if !defined(CONFIG_VGA_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+       return 0;
+}
+#elif !defined(CONFIG_DUMMY_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+       return -ENODEV;
+}
+#else
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+       int ret = 0;
+
+       DRM_INFO("Replacing VGA console driver\n");
+
+       console_lock();
+       if (con_is_bound(&vga_con))
+               ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+       if (ret == 0) {
+               ret = do_unregister_con_driver(&vga_con);
+
+               /* Ignore "already unregistered". */
+               if (ret == -ENODEV)
+                       ret = 0;
+       }
+       console_unlock();
+
+       return ret;
+}
+#endif
+
+static void intel_init_dpio(struct drm_i915_private *dev_priv)
+{
+       /*
+        * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
+        * CHV x1 PHY (DP/HDMI D)
+        * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
+        */
+       if (IS_CHERRYVIEW(dev_priv)) {
+               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
+               DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
+       } else if (IS_VALLEYVIEW(dev_priv)) {
+               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+       }
+}
+
+static int i915_workqueues_init(struct drm_i915_private *dev_priv)
+{
+       /*
+        * The i915 workqueue is primarily used for batched retirement of
+        * requests (and thus managing bo) once the task has been completed
+        * by the GPU. i915_gem_retire_requests() is called directly when we
+        * need high-priority retirement, such as waiting for an explicit
+        * bo.
+        *
+        * It is also used for periodic low-priority events, such as
+        * idle-timers and recording error state.
+        *
+        * All tasks on the workqueue are expected to acquire the dev mutex
+        * so there is no point in running more than one instance of the
+        * workqueue at any time.  Use an ordered one.
+        */
+       dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+       if (dev_priv->wq == NULL)
+               goto out_err;
+
+       dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+       if (dev_priv->hotplug.dp_wq == NULL)
+               goto out_free_wq;
+
+       return 0;
+
+out_free_wq:
+       destroy_workqueue(dev_priv->wq);
+out_err:
+       DRM_ERROR("Failed to allocate workqueues.\n");
+
+       return -ENOMEM;
+}
+
+static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
+{
+       destroy_workqueue(dev_priv->hotplug.dp_wq);
+       destroy_workqueue(dev_priv->wq);
+}
+
+/**
+ * i915_driver_init_early - setup state not requiring device access
+ * @dev_priv: device private
+ *
+ * Initialize everything that is a "SW-only" state, that is state not
+ * requiring accessing the device or exposing the driver via kernel internal
+ * or userspace interfaces. Example steps belonging here: lock initialization,
+ * system memory allocation, setting up device specific attributes and
+ * function hooks not requiring accessing the device.
+ */
+static int i915_driver_init_early(struct drm_i915_private *dev_priv,
+                                 const struct pci_device_id *ent)
+{
+       const struct intel_device_info *match_info =
+               (struct intel_device_info *)ent->driver_data;
+       struct intel_device_info *device_info;
+       int ret = 0;
+
+       if (i915_inject_load_failure())
+               return -ENODEV;
+
+       /* Setup the write-once "constant" device info */
+       device_info = mkwrite_device_info(dev_priv);
+       memcpy(device_info, match_info, sizeof(*device_info));
+       device_info->device_id = dev_priv->drm.pdev->device;
+
+       BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+       device_info->gen_mask = BIT(device_info->gen - 1);
+
+       spin_lock_init(&dev_priv->irq_lock);
+       spin_lock_init(&dev_priv->gpu_error.lock);
+       mutex_init(&dev_priv->backlight_lock);
+       spin_lock_init(&dev_priv->uncore.lock);
+       spin_lock_init(&dev_priv->mm.object_stat_lock);
+       spin_lock_init(&dev_priv->mmio_flip_lock);
+       mutex_init(&dev_priv->sb_lock);
+       mutex_init(&dev_priv->modeset_restore_lock);
+       mutex_init(&dev_priv->av_mutex);
+       mutex_init(&dev_priv->wm.wm_mutex);
+       mutex_init(&dev_priv->pps_mutex);
+
+       ret = i915_workqueues_init(dev_priv);
+       if (ret < 0)
+               return ret;
+
+       ret = intel_gvt_init(dev_priv);
+       if (ret < 0)
+               goto err_workqueues;
+
+       /* This must be called before any calls to HAS_PCH_* */
+       intel_detect_pch(&dev_priv->drm);
+
+       intel_pm_setup(&dev_priv->drm);
+       intel_init_dpio(dev_priv);
+       intel_power_domains_init(dev_priv);
+       intel_irq_init(dev_priv);
+       intel_init_display_hooks(dev_priv);
+       intel_init_clock_gating_hooks(dev_priv);
+       intel_init_audio_hooks(dev_priv);
+       i915_gem_load_init(&dev_priv->drm);
+
+       intel_display_crc_init(&dev_priv->drm);
+
+       intel_device_info_dump(dev_priv);
+
+       /* Not all pre-production machines fall into this category, only the
+        * very first ones. Almost everything should work, except for maybe
+        * suspend/resume. And we don't implement workarounds that affect only
+        * pre-production machines. */
+       if (IS_HSW_EARLY_SDV(dev_priv))
+               DRM_INFO("This is an early pre-production Haswell machine. "
+                        "It may not be fully functional.\n");
+
+       return 0;
+
+err_workqueues:
+       i915_workqueues_cleanup(dev_priv);
+       return ret;
+}
+
+/**
+ * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
+{
+       i915_gem_load_cleanup(&dev_priv->drm);
+       i915_workqueues_cleanup(dev_priv);
+}
+
+static int i915_mmio_setup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int mmio_bar;
+       int mmio_size;
+
+       mmio_bar = IS_GEN2(dev) ? 1 : 0;
+       /*
+        * Before gen4, the registers and the GTT are behind different BARs.
+        * However, from gen4 onwards, the registers and the GTT are shared
+        * in the same BAR, so we want to restrict this ioremap from
+        * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+        * the register BAR remains the same size for all the earlier
+        * generations up to Ironlake.
+        */
+       if (INTEL_INFO(dev)->gen < 5)
+               mmio_size = 512 * 1024;
+       else
+               mmio_size = 2 * 1024 * 1024;
+       dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+       if (dev_priv->regs == NULL) {
+               DRM_ERROR("failed to map registers\n");
+
+               return -EIO;
+       }
+
+       /* Try to make sure MCHBAR is enabled before poking at it */
+       intel_setup_mchbar(dev);
+
+       return 0;
+}
+
+static void i915_mmio_cleanup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       intel_teardown_mchbar(dev);
+       pci_iounmap(dev->pdev, dev_priv->regs);
+}
+
+/**
+ * i915_driver_init_mmio - setup device MMIO
+ * @dev_priv: device private
+ *
+ * Setup minimal device state necessary for MMIO accesses later in the
+ * initialization sequence. The setup here should avoid any other device-wide
+ * side effects or exposing the driver via kernel internal or user space
+ * interfaces.
+ */
+static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       int ret;
+
+       if (i915_inject_load_failure())
+               return -ENODEV;
+
+       if (i915_get_bridge_dev(dev))
+               return -EIO;
+
+       ret = i915_mmio_setup(dev);
+       if (ret < 0)
+               goto put_bridge;
+
+       intel_uncore_init(dev_priv);
+
+       return 0;
+
+put_bridge:
+       pci_dev_put(dev_priv->bridge_dev);
+
+       return ret;
+}
+
+/**
+ * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+
+       intel_uncore_fini(dev_priv);
+       i915_mmio_cleanup(dev);
+       pci_dev_put(dev_priv->bridge_dev);
+}
+
+static void intel_sanitize_options(struct drm_i915_private *dev_priv)
+{
+       i915.enable_execlists =
+               intel_sanitize_enable_execlists(dev_priv,
+                                               i915.enable_execlists);
+
+       /*
+        * i915.enable_ppgtt is read-only, so do an early pass to validate the
+        * user's requested state against the hardware/driver capabilities.  We
+        * do this now so that we can print out any log messages once rather
+        * than every time we check intel_enable_ppgtt().
+        */
+       i915.enable_ppgtt =
+               intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
+       DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
+}
+
+/**
+ * i915_driver_init_hw - setup state requiring device access
+ * @dev_priv: device private
+ *
+ * Setup state that requires accessing the device, but doesn't require
+ * exposing the driver via kernel internal or userspace interfaces.
+ */
+static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       uint32_t aperture_size;
+       int ret;
+
+       if (i915_inject_load_failure())
+               return -ENODEV;
+
+       intel_device_info_runtime_init(dev_priv);
+
+       intel_sanitize_options(dev_priv);
+
+       ret = i915_ggtt_init_hw(dev);
+       if (ret)
+               return ret;
+
+       ret = i915_ggtt_enable_hw(dev);
+       if (ret) {
+               DRM_ERROR("failed to enable GGTT\n");
+               goto out_ggtt;
+       }
+
+       /* WARNING: Apparently we must kick fbdev drivers before vgacon,
+        * otherwise the vga fbdev driver falls over. */
+       ret = i915_kick_out_firmware_fb(dev_priv);
+       if (ret) {
+               DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+               goto out_ggtt;
+       }
+
+       ret = i915_kick_out_vgacon(dev_priv);
+       if (ret) {
+               DRM_ERROR("failed to remove conflicting VGA console\n");
+               goto out_ggtt;
+       }
+
+       pci_set_master(dev->pdev);
+
+       /* overlay on gen2 is broken and can't address above 1G */
+       if (IS_GEN2(dev)) {
+               ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+               if (ret) {
+                       DRM_ERROR("failed to set DMA mask\n");
+
+                       goto out_ggtt;
+               }
+       }
+
+
+       /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+        * using 32bit addressing, overwriting memory if HWS is located
+        * above 4GB.
+        *
+        * The documentation also mentions an issue with undefined
+        * behaviour if any general state is accessed within a page above 4GB,
+        * which also needs to be handled carefully.
+        */
+       if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
+               ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
+               if (ret) {
+                       DRM_ERROR("failed to set DMA mask\n");
+
+                       goto out_ggtt;
+               }
+       }
+
+       aperture_size = ggtt->mappable_end;
+
+       ggtt->mappable =
+               io_mapping_create_wc(ggtt->mappable_base,
+                                    aperture_size);
+       if (!ggtt->mappable) {
+               ret = -EIO;
+               goto out_ggtt;
+       }
+
+       ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
+                                             aperture_size);
+
+       pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+                          PM_QOS_DEFAULT_VALUE);
+
+       intel_uncore_sanitize(dev_priv);
+
+       intel_opregion_setup(dev_priv);
+
+       i915_gem_load_init_fences(dev_priv);
+
+       /* On the 945G/GM, the chipset reports the MSI capability on the
+        * integrated graphics even though the support isn't actually there
+        * according to the published specs.  It doesn't appear to function
+        * correctly in testing on 945G.
+        * This may be a side effect of MSI having been made available for PEG
+        * and the registers being closely associated.
+        *
+        * According to chipset errata, on the 965GM, MSI interrupts may
+        * be lost or delayed, but we use them anyways to avoid
+        * stuck interrupts on some machines.
+        */
+       if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+               if (pci_enable_msi(dev->pdev) < 0)
+                       DRM_DEBUG_DRIVER("can't enable MSI");
+       }
+
+       return 0;
+
+out_ggtt:
+       i915_ggtt_cleanup_hw(dev);
+
+       return ret;
+}
+
+/**
+ * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+       if (dev->pdev->msi_enabled)
+               pci_disable_msi(dev->pdev);
+
+       pm_qos_remove_request(&dev_priv->pm_qos);
+       arch_phys_wc_del(ggtt->mtrr);
+       io_mapping_free(ggtt->mappable);
+       i915_ggtt_cleanup_hw(dev);
+}
+
+/**
+ * i915_driver_register - register the driver with the rest of the system
+ * @dev_priv: device private
+ *
+ * Perform any steps necessary to make the driver available via kernel
+ * internal or userspace interfaces.
+ */
+static void i915_driver_register(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+
+       i915_gem_shrinker_init(dev_priv);
 
-static const struct intel_device_info intel_i965g_info = {
-       .gen = 4, .is_broadwater = 1, .num_pipes = 2,
-       .has_hotplug = 1,
-       .has_overlay = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       /*
+        * Notify a valid surface after modesetting,
+        * when running inside a VM.
+        */
+       if (intel_vgpu_active(dev_priv))
+               I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+
+       /* Reveal our presence to userspace */
+       if (drm_dev_register(dev, 0) == 0) {
+               i915_debugfs_register(dev_priv);
+               i915_setup_sysfs(dev);
+       } else
+               DRM_ERROR("Failed to register driver for userspace access!\n");
+
+       if (INTEL_INFO(dev_priv)->num_pipes) {
+               /* Must be done after probing outputs */
+               intel_opregion_register(dev_priv);
+               acpi_video_register();
+       }
 
-static const struct intel_device_info intel_i965gm_info = {
-       .gen = 4, .is_crestline = 1, .num_pipes = 2,
-       .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
-       .has_overlay = 1,
-       .supports_tv = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       if (IS_GEN5(dev_priv))
+               intel_gpu_ips_init(dev_priv);
 
-static const struct intel_device_info intel_g33_info = {
-       .gen = 3, .is_g33 = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_overlay = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       i915_audio_component_init(dev_priv);
 
-static const struct intel_device_info intel_g45_info = {
-       .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
-       .has_pipe_cxsr = 1, .has_hotplug = 1,
-       .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       /*
+        * Some ports require correctly set-up hpd registers for detection to
+        * work properly (leading to ghost connected connector status), e.g. VGA
+        * on gm45.  Hence we can only set up the initial fbdev config after hpd
+        * irqs are fully enabled. We do it last so that the async config
+        * cannot run before the connectors are registered.
+        */
+       intel_fbdev_initial_config_async(dev);
+}
 
-static const struct intel_device_info intel_gm45_info = {
-       .gen = 4, .is_g4x = 1, .num_pipes = 2,
-       .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
-       .has_pipe_cxsr = 1, .has_hotplug = 1,
-       .supports_tv = 1,
-       .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+/**
+ * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
+ * @dev_priv: device private
+ */
+static void i915_driver_unregister(struct drm_i915_private *dev_priv)
+{
+       i915_audio_component_cleanup(dev_priv);
 
-static const struct intel_device_info intel_pineview_info = {
-       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_overlay = 1,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       intel_gpu_ips_teardown();
+       acpi_video_unregister();
+       intel_opregion_unregister(dev_priv);
 
-static const struct intel_device_info intel_ironlake_d_info = {
-       .gen = 5, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       i915_teardown_sysfs(&dev_priv->drm);
+       i915_debugfs_unregister(dev_priv);
+       drm_dev_unregister(&dev_priv->drm);
 
-static const struct intel_device_info intel_ironlake_m_info = {
-       .gen = 5, .is_mobile = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       i915_gem_shrinker_cleanup(dev_priv);
+}
 
-static const struct intel_device_info intel_sandybridge_d_info = {
-       .gen = 6, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
-       .has_llc = 1,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ *   - drive output discovery via intel_modeset_init()
+ *   - initialize the memory manager
+ *   - allocate initial config memory
+ *   - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct drm_i915_private *dev_priv;
+       int ret;
 
-static const struct intel_device_info intel_sandybridge_m_info = {
-       .gen = 6, .is_mobile = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
-       .has_llc = 1,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       if (i915.nuclear_pageflip)
+               driver.driver_features |= DRIVER_ATOMIC;
 
-#define GEN7_FEATURES  \
-       .gen = 7, .num_pipes = 3, \
-       .need_gfx_hws = 1, .has_hotplug = 1, \
-       .has_fbc = 1, \
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
-       .has_llc = 1, \
-       GEN_DEFAULT_PIPEOFFSETS, \
-       IVB_CURSOR_OFFSETS
-
-static const struct intel_device_info intel_ivybridge_d_info = {
-       GEN7_FEATURES,
-       .is_ivybridge = 1,
-};
+       ret = -ENOMEM;
+       dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+       if (dev_priv)
+               ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
+       if (ret) {
+               dev_printk(KERN_ERR, &pdev->dev,
+                          "[" DRM_NAME ":%s] allocation failed\n", __func__);
+               kfree(dev_priv);
+               return ret;
+       }
 
-static const struct intel_device_info intel_ivybridge_m_info = {
-       GEN7_FEATURES,
-       .is_ivybridge = 1,
-       .is_mobile = 1,
-};
+       dev_priv->drm.pdev = pdev;
+       dev_priv->drm.dev_private = dev_priv;
 
-static const struct intel_device_info intel_ivybridge_q_info = {
-       GEN7_FEATURES,
-       .is_ivybridge = 1,
-       .num_pipes = 0, /* legal, last one wins */
-};
+       ret = pci_enable_device(pdev);
+       if (ret)
+               goto out_free_priv;
 
-#define VLV_FEATURES  \
-       .gen = 7, .num_pipes = 2, \
-       .need_gfx_hws = 1, .has_hotplug = 1, \
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
-       .display_mmio_offset = VLV_DISPLAY_BASE, \
-       GEN_DEFAULT_PIPEOFFSETS, \
-       CURSOR_OFFSETS
-
-static const struct intel_device_info intel_valleyview_m_info = {
-       VLV_FEATURES,
-       .is_valleyview = 1,
-       .is_mobile = 1,
-};
+       pci_set_drvdata(pdev, &dev_priv->drm);
 
-static const struct intel_device_info intel_valleyview_d_info = {
-       VLV_FEATURES,
-       .is_valleyview = 1,
-};
+       ret = i915_driver_init_early(dev_priv, ent);
+       if (ret < 0)
+               goto out_pci_disable;
 
-#define HSW_FEATURES  \
-       GEN7_FEATURES, \
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
-       .has_ddi = 1, \
-       .has_fpga_dbg = 1
+       intel_runtime_pm_get(dev_priv);
 
-static const struct intel_device_info intel_haswell_d_info = {
-       HSW_FEATURES,
-       .is_haswell = 1,
-};
+       ret = i915_driver_init_mmio(dev_priv);
+       if (ret < 0)
+               goto out_runtime_pm_put;
 
-static const struct intel_device_info intel_haswell_m_info = {
-       HSW_FEATURES,
-       .is_haswell = 1,
-       .is_mobile = 1,
-};
+       ret = i915_driver_init_hw(dev_priv);
+       if (ret < 0)
+               goto out_cleanup_mmio;
 
-#define BDW_FEATURES \
-       HSW_FEATURES, \
-       BDW_COLORS
+       /*
+        * TODO: move the vblank init and parts of modeset init steps into one
+        * of the i915_driver_init_/i915_driver_register functions according
+        * to the role/effect of the given init step.
+        */
+       if (INTEL_INFO(dev_priv)->num_pipes) {
+               ret = drm_vblank_init(&dev_priv->drm,
+                                     INTEL_INFO(dev_priv)->num_pipes);
+               if (ret)
+                       goto out_cleanup_hw;
+       }
 
-static const struct intel_device_info intel_broadwell_d_info = {
-       BDW_FEATURES,
-       .gen = 8,
-       .is_broadwell = 1,
-};
+       ret = i915_load_modeset_init(&dev_priv->drm);
+       if (ret < 0)
+               goto out_cleanup_vblank;
 
-static const struct intel_device_info intel_broadwell_m_info = {
-       BDW_FEATURES,
-       .gen = 8, .is_mobile = 1,
-       .is_broadwell = 1,
-};
+       i915_driver_register(dev_priv);
 
-static const struct intel_device_info intel_broadwell_gt3d_info = {
-       BDW_FEATURES,
-       .gen = 8,
-       .is_broadwell = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+       intel_runtime_pm_enable(dev_priv);
 
-static const struct intel_device_info intel_broadwell_gt3m_info = {
-       BDW_FEATURES,
-       .gen = 8, .is_mobile = 1,
-       .is_broadwell = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+       intel_runtime_pm_put(dev_priv);
 
-static const struct intel_device_info intel_cherryview_info = {
-       .gen = 8, .num_pipes = 3,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-       .is_cherryview = 1,
-       .display_mmio_offset = VLV_DISPLAY_BASE,
-       GEN_CHV_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-       CHV_COLORS,
-};
+       return 0;
 
-static const struct intel_device_info intel_skylake_info = {
-       BDW_FEATURES,
-       .is_skylake = 1,
-       .gen = 9,
-};
+out_cleanup_vblank:
+       drm_vblank_cleanup(&dev_priv->drm);
+out_cleanup_hw:
+       i915_driver_cleanup_hw(dev_priv);
+out_cleanup_mmio:
+       i915_driver_cleanup_mmio(dev_priv);
+out_runtime_pm_put:
+       intel_runtime_pm_put(dev_priv);
+       i915_driver_cleanup_early(dev_priv);
+out_pci_disable:
+       pci_disable_device(pdev);
+out_free_priv:
+       i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
+       drm_dev_unref(&dev_priv->drm);
+       return ret;
+}
 
-static const struct intel_device_info intel_skylake_gt3_info = {
-       BDW_FEATURES,
-       .is_skylake = 1,
-       .gen = 9,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+void i915_driver_unload(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
-static const struct intel_device_info intel_broxton_info = {
-       .is_preliminary = 1,
-       .is_broxton = 1,
-       .gen = 9,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-       .num_pipes = 3,
-       .has_ddi = 1,
-       .has_fpga_dbg = 1,
-       .has_fbc = 1,
-       .has_pooled_eu = 0,
-       GEN_DEFAULT_PIPEOFFSETS,
-       IVB_CURSOR_OFFSETS,
-       BDW_COLORS,
-};
+       intel_fbdev_fini(dev);
 
-static const struct intel_device_info intel_kabylake_info = {
-       BDW_FEATURES,
-       .is_kabylake = 1,
-       .gen = 9,
-};
+       if (i915_gem_suspend(dev))
+               DRM_ERROR("failed to idle hardware; continuing to unload!\n");
 
-static const struct intel_device_info intel_kabylake_gt3_info = {
-       BDW_FEATURES,
-       .is_kabylake = 1,
-       .gen = 9,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
-/*
- * Make sure any device matches here are from most specific to most
- * general.  For example, since the Quanta match is based on the subsystem
- * and subvendor IDs, we need it to come before the more general IVB
- * PCI ID matches, otherwise we'll use the wrong info struct above.
- */
-static const struct pci_device_id pciidlist[] = {
-       INTEL_I830_IDS(&intel_i830_info),
-       INTEL_I845G_IDS(&intel_845g_info),
-       INTEL_I85X_IDS(&intel_i85x_info),
-       INTEL_I865G_IDS(&intel_i865g_info),
-       INTEL_I915G_IDS(&intel_i915g_info),
-       INTEL_I915GM_IDS(&intel_i915gm_info),
-       INTEL_I945G_IDS(&intel_i945g_info),
-       INTEL_I945GM_IDS(&intel_i945gm_info),
-       INTEL_I965G_IDS(&intel_i965g_info),
-       INTEL_G33_IDS(&intel_g33_info),
-       INTEL_I965GM_IDS(&intel_i965gm_info),
-       INTEL_GM45_IDS(&intel_gm45_info),
-       INTEL_G45_IDS(&intel_g45_info),
-       INTEL_PINEVIEW_IDS(&intel_pineview_info),
-       INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
-       INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
-       INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
-       INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
-       INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
-       INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
-       INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
-       INTEL_HSW_D_IDS(&intel_haswell_d_info),
-       INTEL_HSW_M_IDS(&intel_haswell_m_info),
-       INTEL_VLV_M_IDS(&intel_valleyview_m_info),
-       INTEL_VLV_D_IDS(&intel_valleyview_d_info),
-       INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
-       INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
-       INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
-       INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
-       INTEL_CHV_IDS(&intel_cherryview_info),
-       INTEL_SKL_GT1_IDS(&intel_skylake_info),
-       INTEL_SKL_GT2_IDS(&intel_skylake_info),
-       INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
-       INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
-       INTEL_BXT_IDS(&intel_broxton_info),
-       INTEL_KBL_GT1_IDS(&intel_kabylake_info),
-       INTEL_KBL_GT2_IDS(&intel_kabylake_info),
-       INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
-       INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
-       {0, 0, 0}
-};
+       i915_driver_unregister(dev_priv);
 
-MODULE_DEVICE_TABLE(pci, pciidlist);
+       drm_vblank_cleanup(dev);
 
-static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
-{
-       enum intel_pch ret = PCH_NOP;
+       intel_modeset_cleanup(dev);
 
        /*
-        * In a virtualized passthrough environment we can be in a
-        * setup where the ISA bridge is not able to be passed through.
-        * In this case, a south bridge can be emulated and we have to
-        * make an educated guess as to which PCH is really there.
+        * free the memory space allocated for the child device
+        * config parsed from VBT
         */
-
-       if (IS_GEN5(dev)) {
-               ret = PCH_IBX;
-               DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
-       } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
-               ret = PCH_CPT;
-               DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
-       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
-               ret = PCH_LPT;
-               DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
-       } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
-               ret = PCH_SPT;
-               DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
+       if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
+               kfree(dev_priv->vbt.child_dev);
+               dev_priv->vbt.child_dev = NULL;
+               dev_priv->vbt.child_dev_num = 0;
        }
+       kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+       dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+       kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+       dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
 
-       return ret;
-}
+       vga_switcheroo_unregister_client(dev->pdev);
+       vga_client_register(dev->pdev, NULL, NULL, NULL);
 
-void intel_detect_pch(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct pci_dev *pch = NULL;
+       intel_csr_ucode_fini(dev_priv);
 
-       /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
-        * (which really amounts to a PCH but no South Display).
-        */
-       if (INTEL_INFO(dev)->num_pipes == 0) {
-               dev_priv->pch_type = PCH_NOP;
-               return;
-       }
+       /* Free error state after interrupts are fully disabled. */
+       cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+       i915_destroy_error_state(dev);
 
-       /*
-        * The reason to probe ISA bridge instead of Dev31:Fun0 is to
-        * make graphics device passthrough work easy for VMM, that only
-        * need to expose ISA bridge to let driver know the real hardware
-        * underneath. This is a requirement from virtualization team.
-        *
-        * In some virtualized environments (e.g. XEN), there is irrelevant
-        * ISA bridge in the system. To work reliably, we should scan trhough
-        * all the ISA bridge devices and check for the first match, instead
-        * of only checking the first one.
-        */
-       while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
-               if (pch->vendor == PCI_VENDOR_ID_INTEL) {
-                       unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
-                       dev_priv->pch_id = id;
+       /* Flush any outstanding unpin_work. */
+       flush_workqueue(dev_priv->wq);
 
-                       if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
-                               dev_priv->pch_type = PCH_IBX;
-                               DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
-                               WARN_ON(!IS_GEN5(dev));
-                       } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
-                               dev_priv->pch_type = PCH_CPT;
-                               DRM_DEBUG_KMS("Found CougarPoint PCH\n");
-                               WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
-                       } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
-                               /* PantherPoint is CPT compatible */
-                               dev_priv->pch_type = PCH_CPT;
-                               DRM_DEBUG_KMS("Found PantherPoint PCH\n");
-                               WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
-                       } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
-                               dev_priv->pch_type = PCH_LPT;
-                               DRM_DEBUG_KMS("Found LynxPoint PCH\n");
-                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
-                               WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
-                       } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
-                               dev_priv->pch_type = PCH_LPT;
-                               DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
-                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
-                               WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
-                       } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
-                               dev_priv->pch_type = PCH_SPT;
-                               DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
-                               WARN_ON(!IS_SKYLAKE(dev) &&
-                                       !IS_KABYLAKE(dev));
-                       } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
-                               dev_priv->pch_type = PCH_SPT;
-                               DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
-                               WARN_ON(!IS_SKYLAKE(dev) &&
-                                       !IS_KABYLAKE(dev));
-                       } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
-                                  (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
-                                  ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
-                                   pch->subsystem_vendor ==
-                                           PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
-                                   pch->subsystem_device ==
-                                           PCI_SUBDEVICE_ID_QEMU)) {
-                               dev_priv->pch_type = intel_virt_detect_pch(dev);
-                       } else
-                               continue;
+       intel_guc_fini(dev);
+       i915_gem_fini(dev);
+       intel_fbc_cleanup_cfb(dev_priv);
 
-                       break;
-               }
-       }
-       if (!pch)
-               DRM_DEBUG_KMS("No PCH found.\n");
+       intel_power_domains_fini(dev_priv);
 
-       pci_dev_put(pch);
+       i915_driver_cleanup_hw(dev_priv);
+       i915_driver_cleanup_mmio(dev_priv);
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+       i915_driver_cleanup_early(dev_priv);
 }
 
-bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
+static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 {
-       if (INTEL_GEN(dev_priv) < 6)
-               return false;
+       int ret;
 
-       if (i915.semaphores >= 0)
-               return i915.semaphores;
+       ret = i915_gem_open(dev, file);
+       if (ret)
+               return ret;
 
-       /* TODO: make semaphores and Execlists play nicely together */
-       if (i915.enable_execlists)
-               return false;
+       return 0;
+}
 
-#ifdef CONFIG_INTEL_IOMMU
-       /* Enable semaphores on SNB when IO remapping is off */
-       if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
-               return false;
-#endif
+/**
+ * i915_driver_lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited.  In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ *
+ * Additionally, in the non-mode setting case, we'll tear down the GTT
+ * and DMA structures, since the kernel won't be using them, and clea
+ * up any GEM state.
+ */
+static void i915_driver_lastclose(struct drm_device *dev)
+{
+       intel_fbdev_restore_mode(dev);
+       vga_switcheroo_process_delayed_switch();
+}
 
-       return true;
+static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
+{
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_context_close(dev, file);
+       i915_gem_release(dev, file);
+       mutex_unlock(&dev->struct_mutex);
+}
+
+static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+       struct drm_i915_file_private *file_priv = file->driver_priv;
+
+       kfree(file_priv);
 }
 
 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_encoder *encoder;
 
        drm_modeset_lock_all(dev);
@@ -583,7 +1430,7 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
 
 static int i915_drm_suspend(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        pci_power_t opregion_target_state;
        int error;
 
@@ -650,7 +1497,7 @@ static int i915_drm_suspend(struct drm_device *dev)
 
 static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
 {
-       struct drm_i915_private *dev_priv = drm_dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(drm_dev);
        bool fw_csr;
        int ret;
 
@@ -712,7 +1559,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
 {
        int error;
 
-       if (!dev || !dev->dev_private) {
+       if (!dev) {
                DRM_ERROR("dev: %p\n", dev);
                DRM_ERROR("DRM not initialized, aborting suspend.\n");
                return -ENODEV;
@@ -734,7 +1581,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
 
 static int i915_drm_resume(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        disable_rpm_wakeref_asserts(dev_priv);
@@ -768,7 +1615,7 @@ static int i915_drm_resume(struct drm_device *dev)
        mutex_lock(&dev->struct_mutex);
        if (i915_gem_init_hw(dev)) {
                DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-                       atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+               atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
        }
        mutex_unlock(&dev->struct_mutex);
 
@@ -814,7 +1661,7 @@ static int i915_drm_resume(struct drm_device *dev)
 
 static int i915_drm_resume_early(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        /*
@@ -926,7 +1773,7 @@ int i915_resume_switcheroo(struct drm_device *dev)
  */
 int i915_reset(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct i915_gpu_error *error = &dev_priv->gpu_error;
        unsigned reset_counter;
        int ret;
@@ -945,24 +1792,11 @@ int i915_reset(struct drm_i915_private *dev_priv)
                goto error;
        }
 
+       pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
        i915_gem_reset(dev);
 
        ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
-
-       /* Also reset the gpu hangman. */
-       if (error->stop_rings != 0) {
-               DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
-               error->stop_rings = 0;
-               if (ret == -ENODEV) {
-                       DRM_INFO("Reset not implemented, but ignoring "
-                                "error for simulated gpu hangs\n");
-                       ret = 0;
-               }
-       }
-
-       if (i915_stop_ring_allow_warn(dev_priv))
-               pr_notice("drm/i915: Resetting chip after gpu hang\n");
-
        if (ret) {
                if (ret != -ENODEV)
                        DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -1012,45 +1846,12 @@ int i915_reset(struct drm_i915_private *dev_priv)
        return ret;
 }
 
-static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct intel_device_info *intel_info =
-               (struct intel_device_info *) ent->driver_data;
-
-       if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
-               DRM_INFO("This hardware requires preliminary hardware support.\n"
-                        "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
-               return -ENODEV;
-       }
-
-       /* Only bind to function 0 of the device. Early generations
-        * used function 1 as a placeholder for multi-head. This causes
-        * us confusion instead, especially on the systems where both
-        * functions have the same PCI-ID!
-        */
-       if (PCI_FUNC(pdev->devfn))
-               return -ENODEV;
-
-       if (vga_switcheroo_client_probe_defer(pdev))
-               return -EPROBE_DEFER;
-
-       return drm_get_pci_dev(pdev, ent, &driver);
-}
-
-static void
-i915_pci_remove(struct pci_dev *pdev)
-{
-       struct drm_device *dev = pci_get_drvdata(pdev);
-
-       drm_put_dev(dev);
-}
-
 static int i915_pm_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
-       if (!drm_dev || !drm_dev->dev_private) {
+       if (!drm_dev) {
                dev_err(dev, "DRM not initialized, aborting suspend.\n");
                return -ENODEV;
        }
@@ -1063,7 +1864,7 @@ static int i915_pm_suspend(struct device *dev)
 
 static int i915_pm_suspend_late(struct device *dev)
 {
-       struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
 
        /*
         * We have a suspend ordering issue with the snd-hda driver also
@@ -1082,7 +1883,7 @@ static int i915_pm_suspend_late(struct device *dev)
 
 static int i915_pm_poweroff_late(struct device *dev)
 {
-       struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
 
        if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
@@ -1092,7 +1893,7 @@ static int i915_pm_poweroff_late(struct device *dev)
 
 static int i915_pm_resume_early(struct device *dev)
 {
-       struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
 
        if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
@@ -1102,7 +1903,7 @@ static int i915_pm_resume_early(struct device *dev)
 
 static int i915_pm_resume(struct device *dev)
 {
-       struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
 
        if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
@@ -1352,8 +2153,6 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
        u32 val;
        int err;
 
-#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
-
        val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
        val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
        if (force_on)
@@ -1363,13 +2162,16 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
        if (!force_on)
                return 0;
 
-       err = wait_for(COND, 20);
+       err = intel_wait_for_register(dev_priv,
+                                     VLV_GTLC_SURVIVABILITY_REG,
+                                     VLV_GFX_CLK_STATUS_BIT,
+                                     VLV_GFX_CLK_STATUS_BIT,
+                                     20);
        if (err)
                DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
                          I915_READ(VLV_GTLC_SURVIVABILITY_REG));
 
        return err;
-#undef COND
 }
 
 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
@@ -1384,13 +2186,15 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
        I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
        POSTING_READ(VLV_GTLC_WAKE_CTRL);
 
-#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
-             allow)
-       err = wait_for(COND, 1);
+       err = intel_wait_for_register(dev_priv,
+                                     VLV_GTLC_PW_STATUS,
+                                     VLV_GTLC_ALLOWWAKEACK,
+                                     allow,
+                                     1);
        if (err)
                DRM_ERROR("timeout disabling GT waking\n");
+
        return err;
-#undef COND
 }
 
 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
@@ -1402,8 +2206,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
 
        mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
        val = wait_for_on ? mask : 0;
-#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
-       if (COND)
+       if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
                return 0;
 
        DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
@@ -1414,13 +2217,14 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
         * RC6 transitioning can be delayed up to 2 msec (see
         * valleyview_enable_rps), use 3 msec for safety.
         */
-       err = wait_for(COND, 3);
+       err = intel_wait_for_register(dev_priv,
+                                     VLV_GTLC_PW_STATUS, mask, val,
+                                     3);
        if (err)
                DRM_ERROR("timeout waiting for GT wells to go %s\n",
                          onoff(wait_for_on));
 
        return err;
-#undef COND
 }
 
 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
@@ -1477,7 +2281,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
                                bool rpm_resume)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        int err;
        int ret;
 
@@ -1513,7 +2317,7 @@ static int intel_runtime_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct drm_device *dev = pci_get_drvdata(pdev);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
@@ -1551,11 +2355,8 @@ static int intel_runtime_suspend(struct device *device)
        i915_gem_release_all_mmaps(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
-       cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-
        intel_guc_suspend(dev);
 
-       intel_suspend_gt_powersave(dev_priv);
        intel_runtime_pm_disable_interrupts(dev_priv);
 
        ret = 0;
@@ -1620,7 +2421,7 @@ static int intel_runtime_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct drm_device *dev = pci_get_drvdata(pdev);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
 
        if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1670,8 +2471,6 @@ static int intel_runtime_resume(struct device *device)
        if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                intel_hpd_init(dev_priv);
 
-       intel_enable_gt_powersave(dev_priv);
-
        enable_rpm_wakeref_asserts(dev_priv);
 
        if (ret)
@@ -1682,7 +2481,7 @@ static int intel_runtime_resume(struct device *device)
        return ret;
 }
 
-static const struct dev_pm_ops i915_pm_ops = {
+const struct dev_pm_ops i915_pm_ops = {
        /*
         * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
         * PMSG_RESUME]
@@ -1741,6 +2540,68 @@ static const struct file_operations i915_driver_fops = {
        .llseek = noop_llseek,
 };
 
+static int
+i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file)
+{
+       return -ENODEV;
+}
+
+static const struct drm_ioctl_desc i915_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
+};
+
 static struct drm_driver driver = {
        /* Don't use MTRRs here; the Xserver or userspace app should
         * deal with them for Intel hardware.
@@ -1748,18 +2609,12 @@ static struct drm_driver driver = {
        .driver_features =
            DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
            DRIVER_RENDER | DRIVER_MODESET,
-       .load = i915_driver_load,
-       .unload = i915_driver_unload,
        .open = i915_driver_open,
        .lastclose = i915_driver_lastclose,
        .preclose = i915_driver_preclose,
        .postclose = i915_driver_postclose,
        .set_busid = drm_pci_set_busid,
 
-#if defined(CONFIG_DEBUG_FS)
-       .debugfs_init = i915_debugfs_init,
-       .debugfs_cleanup = i915_debugfs_cleanup,
-#endif
        .gem_free_object = i915_gem_free_object,
        .gem_vm_ops = &i915_gem_vm_ops,
 
@@ -1772,6 +2627,7 @@ static struct drm_driver driver = {
        .dumb_map_offset = i915_gem_mmap_gtt,
        .dumb_destroy = drm_gem_dumb_destroy,
        .ioctls = i915_ioctls,
+       .num_ioctls = ARRAY_SIZE(i915_ioctls),
        .fops = &i915_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
@@ -1780,56 +2636,3 @@ static struct drm_driver driver = {
        .minor = DRIVER_MINOR,
        .patchlevel = DRIVER_PATCHLEVEL,
 };
-
-static struct pci_driver i915_pci_driver = {
-       .name = DRIVER_NAME,
-       .id_table = pciidlist,
-       .probe = i915_pci_probe,
-       .remove = i915_pci_remove,
-       .driver.pm = &i915_pm_ops,
-};
-
-static int __init i915_init(void)
-{
-       driver.num_ioctls = i915_max_ioctl;
-
-       /*
-        * Enable KMS by default, unless explicitly overriden by
-        * either the i915.modeset prarameter or by the
-        * vga_text_mode_force boot option.
-        */
-
-       if (i915.modeset == 0)
-               driver.driver_features &= ~DRIVER_MODESET;
-
-       if (vgacon_text_force() && i915.modeset == -1)
-               driver.driver_features &= ~DRIVER_MODESET;
-
-       if (!(driver.driver_features & DRIVER_MODESET)) {
-               /* Silently fail loading to not upset userspace. */
-               DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
-               return 0;
-       }
-
-       if (i915.nuclear_pageflip)
-               driver.driver_features |= DRIVER_ATOMIC;
-
-       return drm_pci_init(&driver, &i915_pci_driver);
-}
-
-static void __exit i915_exit(void)
-{
-       if (!(driver.driver_features & DRIVER_MODESET))
-               return; /* Never loaded a driver. */
-
-       drm_pci_exit(&driver, &i915_pci_driver);
-}
-
-module_init(i915_init);
-module_exit(i915_exit);
-
-MODULE_AUTHOR("Tungsten Graphics, Inc.");
-MODULE_AUTHOR("Intel Corporation");
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
index 24a86c64d22ef6f0b1d0ef7db16460d0f772a21c..03e1bfaa5a41da5326d41b24f12b58cf0ddb1340 100644 (file)
@@ -69,7 +69,7 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20160620"
+#define DRIVER_DATE            "20160711"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -320,15 +320,16 @@ struct i915_hotplug {
                for_each_if ((__ports_mask) & (1 << (__port)))
 
 #define for_each_crtc(dev, crtc) \
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+       list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
 
 #define for_each_intel_plane(dev, intel_plane) \
        list_for_each_entry(intel_plane,                        \
-                           &dev->mode_config.plane_list,       \
+                           &(dev)->mode_config.plane_list,     \
                            base.head)
 
 #define for_each_intel_plane_mask(dev, intel_plane, plane_mask)                \
-       list_for_each_entry(intel_plane, &dev->mode_config.plane_list,  \
+       list_for_each_entry(intel_plane,                                \
+                           &(dev)->mode_config.plane_list,             \
                            base.head)                                  \
                for_each_if ((plane_mask) &                             \
                             (1 << drm_plane_index(&intel_plane->base)))
@@ -339,11 +340,15 @@ struct i915_hotplug {
                            base.head)                                  \
                for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
 
-#define for_each_intel_crtc(dev, intel_crtc) \
-       list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+#define for_each_intel_crtc(dev, intel_crtc)                           \
+       list_for_each_entry(intel_crtc,                                 \
+                           &(dev)->mode_config.crtc_list,              \
+                           base.head)
 
-#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
-       list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \
+#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask)           \
+       list_for_each_entry(intel_crtc,                                 \
+                           &(dev)->mode_config.crtc_list,              \
+                           base.head)                                  \
                for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
 
 #define for_each_intel_encoder(dev, intel_encoder)             \
@@ -353,7 +358,7 @@ struct i915_hotplug {
 
 #define for_each_intel_connector(dev, intel_connector)         \
        list_for_each_entry(intel_connector,                    \
-                           &dev->mode_config.connector_list,   \
+                           &(dev)->mode_config.connector_list, \
                            base.head)
 
 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
@@ -475,6 +480,7 @@ struct drm_i915_error_state {
        struct timeval time;
 
        char error_msg[128];
+       bool simulated;
        int iommu;
        u32 reset_count;
        u32 suspend_count;
@@ -506,6 +512,7 @@ struct drm_i915_error_state {
                bool valid;
                /* Software tracked state */
                bool waiting;
+               int num_waiters;
                int hangcheck_score;
                enum intel_ring_hangcheck_action hangcheck_action;
                int num_requests;
@@ -551,6 +558,12 @@ struct drm_i915_error_state {
                        u32 tail;
                } *requests;
 
+               struct drm_i915_error_waiter {
+                       char comm[TASK_COMM_LEN];
+                       pid_t pid;
+                       u32 seqno;
+               } *waiters;
+
                struct {
                        u32 gfx_mode;
                        union {
@@ -868,9 +881,12 @@ struct i915_gem_context {
 
        /* Unique identifier for this context, used by the hw for tracking */
        unsigned long flags;
+#define CONTEXT_NO_ZEROMAP             BIT(0)
+#define CONTEXT_NO_ERROR_CAPTURE       BIT(1)
        unsigned hw_id;
        u32 user_handle;
-#define CONTEXT_NO_ZEROMAP             (1<<0)
+
+       u32 ggtt_alignment;
 
        struct intel_context {
                struct drm_i915_gem_object *state;
@@ -1011,6 +1027,7 @@ enum intel_pch {
        PCH_CPT,        /* Cougarpoint PCH */
        PCH_LPT,        /* Lynxpoint PCH */
        PCH_SPT,        /* Sunrisepoint PCH */
+       PCH_KBP,        /* Kabypoint PCH */
        PCH_NOP,
 };
 
@@ -1304,38 +1321,12 @@ struct i915_gem_mm {
        /** LRU list of objects with fence regs on them. */
        struct list_head fence_list;
 
-       /**
-        * We leave the user IRQ off as much as possible,
-        * but this means that requests will finish and never
-        * be retired once the system goes idle. Set a timer to
-        * fire periodically while the ring is running. When it
-        * fires, go retire requests.
-        */
-       struct delayed_work retire_work;
-
-       /**
-        * When we detect an idle GPU, we want to turn on
-        * powersaving features. So once we see that there
-        * are no more requests outstanding and no more
-        * arrive within a small period of time, we fire
-        * off the idle_work.
-        */
-       struct delayed_work idle_work;
-
        /**
         * Are we in a non-interruptible section of code like
         * modesetting?
         */
        bool interruptible;
 
-       /**
-        * Is the GPU currently considered idle, or busy executing userspace
-        * requests?  Whilst idle, we attempt to power down the hardware and
-        * display clocks. In order to reduce the effect on performance, there
-        * is a slight delay before we do so.
-        */
-       bool busy;
-
        /* the indicator for dispatch video commands on two BSD rings */
        unsigned int bsd_ring_dispatch_index;
 
@@ -1372,7 +1363,6 @@ struct i915_gpu_error {
        /* Hang gpu twice in this window and your context gets banned */
 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
 
-       struct workqueue_struct *hangcheck_wq;
        struct delayed_work hangcheck_work;
 
        /* For reset and error_state handling. */
@@ -1408,21 +1398,20 @@ struct i915_gpu_error {
 #define I915_RESET_IN_PROGRESS_FLAG    1
 #define I915_WEDGED                    (1 << 31)
 
+       /**
+        * Waitqueue to signal when a hang is detected. Used to for waiters
+        * to release the struct_mutex for the reset to procede.
+        */
+       wait_queue_head_t wait_queue;
+
        /**
         * Waitqueue to signal when the reset has completed. Used by clients
         * that wait for dev_priv->mm.wedged to settle.
         */
        wait_queue_head_t reset_queue;
 
-       /* Userspace knobs for gpu hang simulation;
-        * combines both a ring mask, and extra flags
-        */
-       u32 stop_rings;
-#define I915_STOP_RING_ALLOW_BAN       (1 << 31)
-#define I915_STOP_RING_ALLOW_WARN      (1 << 30)
-
        /* For missed irq/seqno simulation. */
-       unsigned int test_irq_rings;
+       unsigned long test_irq_rings;
 };
 
 enum modeset_restore {
@@ -1733,7 +1722,8 @@ struct intel_wm_config {
 };
 
 struct drm_i915_private {
-       struct drm_device *dev;
+       struct drm_device drm;
+
        struct kmem_cache *objects;
        struct kmem_cache *vmas;
        struct kmem_cache *requests;
@@ -2029,6 +2019,34 @@ struct drm_i915_private {
                int (*init_engines)(struct drm_device *dev);
                void (*cleanup_engine)(struct intel_engine_cs *engine);
                void (*stop_engine)(struct intel_engine_cs *engine);
+
+               /**
+                * Is the GPU currently considered idle, or busy executing
+                * userspace requests? Whilst idle, we allow runtime power
+                * management to power down the hardware and display clocks.
+                * In order to reduce the effect on performance, there
+                * is a slight delay before we do so.
+                */
+               unsigned int active_engines;
+               bool awake;
+
+               /**
+                * We leave the user IRQ off as much as possible,
+                * but this means that requests will finish and never
+                * be retired once the system goes idle. Set a timer to
+                * fire periodically while the ring is running. When it
+                * fires, go retire requests.
+                */
+               struct delayed_work retire_work;
+
+               /**
+                * When we detect an idle GPU, we want to turn on
+                * powersaving features. So once we see that there
+                * are no more requests outstanding and no more
+                * arrive within a small period of time, we fire
+                * off the idle_work.
+                */
+               struct delayed_work idle_work;
        } gt;
 
        /* perform PHY state sanity checks? */
@@ -2044,7 +2062,7 @@ struct drm_i915_private {
 
 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
 {
-       return dev->dev_private;
+       return container_of(dev, struct drm_i915_private, drm);
 }
 
 static inline struct drm_i915_private *dev_to_i915(struct device *dev)
@@ -2215,6 +2233,7 @@ struct drm_i915_gem_object {
 
        unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
 
+       unsigned int has_wc_mmap;
        unsigned int pin_display;
 
        struct sg_table *pages;
@@ -2267,6 +2286,12 @@ struct drm_i915_gem_object {
 };
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
+static inline bool
+i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+       return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+}
+
 /*
  * Optimised SGL iterator for GEM objects
  */
@@ -2357,7 +2382,7 @@ struct drm_i915_gem_request {
        /** On Which ring this request was generated */
        struct drm_i915_private *i915;
        struct intel_engine_cs *engine;
-       unsigned reset_counter;
+       struct intel_signal_node signaling;
 
         /** GEM sequence number associated with the previous request,
          * when the HWS breadcrumb is equal to this the GPU is processing
@@ -2613,7 +2638,7 @@ struct drm_i915_cmd_table {
 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
 
 #define REVID_FOREVER          0xff
-#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
+#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
 
 #define GEN_FOREVER (0)
 /*
@@ -2743,29 +2768,34 @@ struct drm_i915_cmd_table {
  * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
  * chips, etc.).
  */
-#define IS_GEN2(dev)   (INTEL_INFO(dev)->gen_mask & BIT(1))
-#define IS_GEN3(dev)   (INTEL_INFO(dev)->gen_mask & BIT(2))
-#define IS_GEN4(dev)   (INTEL_INFO(dev)->gen_mask & BIT(3))
-#define IS_GEN5(dev)   (INTEL_INFO(dev)->gen_mask & BIT(4))
-#define IS_GEN6(dev)   (INTEL_INFO(dev)->gen_mask & BIT(5))
-#define IS_GEN7(dev)   (INTEL_INFO(dev)->gen_mask & BIT(6))
-#define IS_GEN8(dev)   (INTEL_INFO(dev)->gen_mask & BIT(7))
-#define IS_GEN9(dev)   (INTEL_INFO(dev)->gen_mask & BIT(8))
-
-#define RENDER_RING            (1<<RCS)
-#define BSD_RING               (1<<VCS)
-#define BLT_RING               (1<<BCS)
-#define VEBOX_RING             (1<<VECS)
-#define BSD2_RING              (1<<VCS2)
-#define ALL_ENGINES            (~0)
-
-#define HAS_BSD(dev)           (INTEL_INFO(dev)->ring_mask & BSD_RING)
-#define HAS_BSD2(dev)          (INTEL_INFO(dev)->ring_mask & BSD2_RING)
-#define HAS_BLT(dev)           (INTEL_INFO(dev)->ring_mask & BLT_RING)
-#define HAS_VEBOX(dev)         (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
+#define IS_GEN2(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
+#define IS_GEN3(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
+#define IS_GEN4(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
+#define IS_GEN5(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
+#define IS_GEN6(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
+#define IS_GEN7(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
+#define IS_GEN8(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
+#define IS_GEN9(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
+
+#define ENGINE_MASK(id)        BIT(id)
+#define RENDER_RING    ENGINE_MASK(RCS)
+#define BSD_RING       ENGINE_MASK(VCS)
+#define BLT_RING       ENGINE_MASK(BCS)
+#define VEBOX_RING     ENGINE_MASK(VECS)
+#define BSD2_RING      ENGINE_MASK(VCS2)
+#define ALL_ENGINES    (~0)
+
+#define HAS_ENGINE(dev_priv, id) \
+       (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
+
+#define HAS_BSD(dev_priv)      HAS_ENGINE(dev_priv, VCS)
+#define HAS_BSD2(dev_priv)     HAS_ENGINE(dev_priv, VCS2)
+#define HAS_BLT(dev_priv)      HAS_ENGINE(dev_priv, BCS)
+#define HAS_VEBOX(dev_priv)    HAS_ENGINE(dev_priv, VECS)
+
 #define HAS_LLC(dev)           (INTEL_INFO(dev)->has_llc)
 #define HAS_SNOOP(dev)         (INTEL_INFO(dev)->has_snoop)
-#define HAS_EDRAM(dev)         (__I915__(dev)->edram_cap & EDRAM_ENABLED)
+#define HAS_EDRAM(dev)         (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
 #define HAS_WT(dev)            ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
                                 HAS_EDRAM(dev))
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
@@ -2783,9 +2813,10 @@ struct drm_i915_cmd_table {
 #define HAS_BROKEN_CS_TLB(dev)         (IS_I830(dev) || IS_845G(dev))
 
 /* WaRsDisableCoarsePowerGating:skl,bxt */
-#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
-                                                IS_SKL_GT3(dev) || \
-                                                IS_SKL_GT4(dev))
+#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
+       (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
+        IS_SKL_GT3(dev_priv) || \
+        IS_SKL_GT4(dev_priv))
 
 /*
  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@@ -2832,7 +2863,7 @@ struct drm_i915_cmd_table {
  * command submission once loaded. But these are logically independent
  * properties, so we have separate macros to test them.
  */
-#define HAS_GUC(dev)           (IS_GEN9(dev) && !IS_KABYLAKE(dev))
+#define HAS_GUC(dev)           (IS_GEN9(dev))
 #define HAS_GUC_UCODE(dev)     (HAS_GUC(dev))
 #define HAS_GUC_SCHED(dev)     (HAS_GUC(dev))
 
@@ -2853,11 +2884,13 @@ struct drm_i915_cmd_table {
 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE                0x9c00
 #define INTEL_PCH_SPT_DEVICE_ID_TYPE           0xA100
 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE                0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE           0xA200
 #define INTEL_PCH_P2X_DEVICE_ID_TYPE           0x7100
 #define INTEL_PCH_P3X_DEVICE_ID_TYPE           0x7000
 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE          0x2900 /* qemu q35 has 2918 */
 
 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
 #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
@@ -2879,8 +2912,14 @@ struct drm_i915_cmd_table {
 
 #include "i915_trace.h"
 
-extern const struct drm_ioctl_desc i915_ioctls[];
-extern int i915_max_ioctl;
+static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+       if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped)
+               return true;
+#endif
+       return false;
+}
 
 extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
 extern int i915_resume_switcheroo(struct drm_device *dev);
@@ -2888,7 +2927,7 @@ extern int i915_resume_switcheroo(struct drm_device *dev);
 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                                int enable_ppgtt);
 
-/* i915_dma.c */
+/* i915_drv.c */
 void __printf(3, 4)
 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
              const char *fmt, ...);
@@ -2896,14 +2935,6 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
 #define i915_report_error(dev_priv, fmt, ...)                             \
        __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
 
-extern int i915_driver_load(struct drm_device *, unsigned long flags);
-extern int i915_driver_unload(struct drm_device *);
-extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
-extern void i915_driver_lastclose(struct drm_device * dev);
-extern void i915_driver_preclose(struct drm_device *dev,
-                                struct drm_file *file);
-extern void i915_driver_postclose(struct drm_device *dev,
-                                 struct drm_file *file);
 #ifdef CONFIG_COMPAT
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
                              unsigned long arg);
@@ -2928,7 +2959,23 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
 
 /* i915_irq.c */
-void i915_queue_hangcheck(struct drm_i915_private *dev_priv);
+static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
+{
+       unsigned long delay;
+
+       if (unlikely(!i915.enable_hangcheck))
+               return;
+
+       /* Don't continually defer the hangcheck so that it is always run at
+        * least once after work has been scheduled on any ring. Otherwise,
+        * we will ignore a hung ring if a second ring is kept busy.
+        */
+
+       delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
+       queue_delayed_work(system_long_wq,
+                          &dev_priv->gpu_error.hangcheck_work, delay);
+}
+
 __printf(3, 4)
 void i915_handle_error(struct drm_i915_private *dev_priv,
                       u32 engine_mask,
@@ -2963,6 +3010,17 @@ u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
 
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
 
+int intel_wait_for_register(struct drm_i915_private *dev_priv,
+                           i915_reg_t reg,
+                           const u32 mask,
+                           const u32 value,
+                           const unsigned long timeout_ms);
+int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+                              i915_reg_t reg,
+                              const u32 mask,
+                              const u32 value,
+                              const unsigned long timeout_ms);
+
 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
 {
        return dev_priv->gvt.initialized;
@@ -3027,7 +3085,6 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
        ibx_display_interrupt_update(dev_priv, bits, 0);
 }
 
-
 /* i915_gem.c */
 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv);
@@ -3244,31 +3301,34 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
        return (int32_t)(seq1 - seq2) >= 0;
 }
 
-static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
-                                          bool lazy_coherency)
+static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
 {
-       if (!lazy_coherency && req->engine->irq_seqno_barrier)
-               req->engine->irq_seqno_barrier(req->engine);
-       return i915_seqno_passed(req->engine->get_seqno(req->engine),
+       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
                                 req->previous_seqno);
 }
 
-static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
-                                             bool lazy_coherency)
+static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
 {
-       if (!lazy_coherency && req->engine->irq_seqno_barrier)
-               req->engine->irq_seqno_barrier(req->engine);
-       return i915_seqno_passed(req->engine->get_seqno(req->engine),
+       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
                                 req->seqno);
 }
 
+bool __i915_spin_request(const struct drm_i915_gem_request *request,
+                        int state, unsigned long timeout_us);
+static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
+                                    int state, unsigned long timeout_us)
+{
+       return (i915_gem_request_started(request) &&
+               __i915_spin_request(request, state, timeout_us));
+}
+
 int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
 
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine);
 
-bool i915_gem_retire_requests(struct drm_i915_private *dev_priv);
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
 void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
 
 static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@@ -3311,18 +3371,6 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
        return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
 }
 
-static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
-{
-       return dev_priv->gpu_error.stop_rings == 0 ||
-               dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
-}
-
-static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
-{
-       return dev_priv->gpu_error.stop_rings == 0 ||
-               dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
-}
-
 void i915_gem_reset(struct drm_device *dev);
 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_init(struct drm_device *dev);
@@ -3330,7 +3378,7 @@ int i915_gem_init_engines(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_engines(struct drm_device *dev);
-int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void __i915_add_request(struct drm_i915_gem_request *req,
                        struct drm_i915_gem_object *batch_obj,
@@ -3484,7 +3532,7 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
 {
        struct i915_gem_context *ctx;
 
-       lockdep_assert_held(&file_priv->dev_priv->dev->struct_mutex);
+       lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
 
        ctx = idr_find(&file_priv->context_idr, id);
        if (!ctx)
@@ -3500,7 +3548,7 @@ static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
 
 static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
 {
-       lockdep_assert_held(&ctx->i915->dev->struct_mutex);
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
        kref_put(&ctx->ref, i915_gem_context_free);
 }
 
@@ -3576,7 +3624,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
 /* i915_gem_tiling.c */
 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
        return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
                obj->tiling_mode != I915_TILING_NONE;
@@ -3590,12 +3638,14 @@ int i915_verify_lists(struct drm_device *dev);
 #endif
 
 /* i915_debugfs.c */
-int i915_debugfs_init(struct drm_minor *minor);
-void i915_debugfs_cleanup(struct drm_minor *minor);
 #ifdef CONFIG_DEBUG_FS
+int i915_debugfs_register(struct drm_i915_private *dev_priv);
+void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
 int i915_debugfs_connector_add(struct drm_connector *connector);
 void intel_display_crc_init(struct drm_device *dev);
 #else
+static inline int i915_debugfs_register(struct drm_i915_private *) {return 0;}
+static inline void i915_debugfs_unregister(struct drm_i915_private *) {}
 static inline int i915_debugfs_connector_add(struct drm_connector *connector)
 { return 0; }
 static inline void intel_display_crc_init(struct drm_device *dev) {}
@@ -3686,8 +3736,8 @@ extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
 extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
 #else
 static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
-static inline void intel_opregion_init(struct drm_i915_private *dev) { }
-static inline void intel_opregion_fini(struct drm_i915_private *dev) { }
+static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
+static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
 static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
 {
 }
@@ -3716,11 +3766,22 @@ static inline void intel_register_dsm_handler(void) { return; }
 static inline void intel_unregister_dsm_handler(void) { return; }
 #endif /* CONFIG_ACPI */
 
+/* intel_device_info.c */
+static inline struct intel_device_info *
+mkwrite_device_info(struct drm_i915_private *dev_priv)
+{
+       return (struct intel_device_info *)&dev_priv->info;
+}
+
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
+void intel_device_info_dump(struct drm_i915_private *dev_priv);
+
 /* modesetting */
 extern void intel_modeset_init_hw(struct drm_device *dev);
 extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
+extern int intel_connector_register(struct drm_connector *);
 extern void intel_connector_unregister(struct drm_connector *);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
 extern void intel_display_resume(struct drm_device *dev);
@@ -3731,7 +3792,6 @@ extern void intel_init_pch_refclk(struct drm_device *dev);
 extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
                                  bool enable);
-extern void intel_detect_pch(struct drm_device *dev);
 
 extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
@@ -3864,6 +3924,7 @@ __raw_write(64, q)
  */
 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
+#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
 
 /* "Broadcast RGB" property */
@@ -3927,12 +3988,80 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
                            schedule_timeout_uninterruptible(remaining_jiffies);
        }
 }
-
-static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
-                                     struct drm_i915_gem_request *req)
+static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
 {
-       if (engine->trace_irq_req == NULL && engine->irq_get(engine))
-               i915_gem_request_assign(&engine->trace_irq_req, req);
+       struct intel_engine_cs *engine = req->engine;
+
+       /* Before we do the heavier coherent read of the seqno,
+        * check the value (hopefully) in the CPU cacheline.
+        */
+       if (i915_gem_request_completed(req))
+               return true;
+
+       /* Ensure our read of the seqno is coherent so that we
+        * do not "miss an interrupt" (i.e. if this is the last
+        * request and the seqno write from the GPU is not visible
+        * by the time the interrupt fires, we will see that the
+        * request is incomplete and go back to sleep awaiting
+        * another interrupt that will never come.)
+        *
+        * Strictly, we only need to do this once after an interrupt,
+        * but it is easier and safer to do it every time the waiter
+        * is woken.
+        */
+       if (engine->irq_seqno_barrier &&
+           READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current &&
+           cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
+               struct task_struct *tsk;
+
+               /* The ordering of irq_posted versus applying the barrier
+                * is crucial. The clearing of the current irq_posted must
+                * be visible before we perform the barrier operation,
+                * such that if a subsequent interrupt arrives, irq_posted
+                * is reasserted and our task rewoken (which causes us to
+                * do another __i915_request_irq_complete() immediately
+                * and reapply the barrier). Conversely, if the clear
+                * occurs after the barrier, then an interrupt that arrived
+                * whilst we waited on the barrier would not trigger a
+                * barrier on the next pass, and the read may not see the
+                * seqno update.
+                */
+               engine->irq_seqno_barrier(engine);
+
+               /* If we consume the irq, but we are no longer the bottom-half,
+                * the real bottom-half may not have serialised their own
+                * seqno check with the irq-barrier (i.e. may have inspected
+                * the seqno before we believe it coherent since they see
+                * irq_posted == false but we are still running).
+                */
+               rcu_read_lock();
+               tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+               if (tsk && tsk != current)
+                       /* Note that if the bottom-half is changed as we
+                        * are sending the wake-up, the new bottom-half will
+                        * be woken by whomever made the change. We only have
+                        * to worry about when we steal the irq-posted for
+                        * ourself.
+                        */
+                       wake_up_process(tsk);
+               rcu_read_unlock();
+
+               if (i915_gem_request_completed(req))
+                       return true;
+       }
+
+       /* We need to check whether any gpu reset happened in between
+        * the request being submitted and now. If a reset has occurred,
+        * the seqno will have been advance past ours and our request
+        * is complete. If we are in the process of handling a reset,
+        * the request is effectively complete as the rendering will
+        * be discarded, but we need to return in order to drop the
+        * struct_mutex.
+        */
+       if (i915_reset_in_progress(&req->i915->gpu_error))
+               return true;
+
+       return false;
 }
 
 #endif
index 21d0dea573127722f24dfb470ef51a889c38a171..8f50919ba9b40e59b2db9c550abc29dd0dbb0356 100644 (file)
@@ -128,7 +128,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
 
 int i915_mutex_lock_interruptible(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
@@ -377,13 +377,13 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 
 void *i915_gem_object_alloc(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
 }
 
 void i915_gem_object_free(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        kmem_cache_free(dev_priv->objects, obj);
 }
 
@@ -508,7 +508,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 
        *needs_clflush = 0;
 
-       if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
+       if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
                return -EINVAL;
 
        if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
@@ -636,7 +636,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
                   struct drm_i915_gem_object *obj, uint64_t size,
                   uint64_t data_offset, uint64_t data_ptr)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_mm_node node;
        char __user *user_data;
@@ -760,7 +760,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
        int needs_clflush = 0;
        struct sg_page_iter sg_iter;
 
-       if (!obj->base.filp)
+       if (!i915_gem_object_has_struct_page(obj))
                return -ENODEV;
 
        user_data = u64_to_user_ptr(args->data_ptr);
@@ -1250,7 +1250,7 @@ int
 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                      struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_pwrite *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
@@ -1298,7 +1298,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
         * pread/pwrite currently are reading and writing from the CPU
         * perspective, requiring manual detiling by the client.
         */
-       if (!obj->base.filp || cpu_write_needs_clflush(obj)) {
+       if (!i915_gem_object_has_struct_page(obj) ||
+           cpu_write_needs_clflush(obj)) {
                ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
                /* Note that the gtt paths might fail with non-page-backed user
                 * pointers (e.g. gtt mappings when moving data between
@@ -1308,7 +1309,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        if (ret == -EFAULT) {
                if (obj->phys_handle)
                        ret = i915_gem_phys_pwrite(obj, args, file);
-               else if (obj->base.filp)
+               else if (i915_gem_object_has_struct_page(obj))
                        ret = i915_gem_shmem_pwrite(dev, obj, args, file);
                else
                        ret = -ENODEV;
@@ -1342,17 +1343,6 @@ i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
        return 0;
 }
 
-static void fake_irq(unsigned long data)
-{
-       wake_up_process((struct task_struct *)data);
-}
-
-static bool missed_irq(struct drm_i915_private *dev_priv,
-                      struct intel_engine_cs *engine)
-{
-       return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
-}
-
 static unsigned long local_clock_us(unsigned *cpu)
 {
        unsigned long t;
@@ -1385,9 +1375,9 @@ static bool busywait_stop(unsigned long timeout, unsigned cpu)
        return this_cpu != cpu;
 }
 
-static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
+bool __i915_spin_request(const struct drm_i915_gem_request *req,
+                        int state, unsigned long timeout_us)
 {
-       unsigned long timeout;
        unsigned cpu;
 
        /* When waiting for high frequency requests, e.g. during synchronous
@@ -1400,31 +1390,21 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
         * takes to sleep on a request, on the order of a microsecond.
         */
 
-       if (req->engine->irq_refcount)
-               return -EBUSY;
-
-       /* Only spin if we know the GPU is processing this request */
-       if (!i915_gem_request_started(req, true))
-               return -EAGAIN;
-
-       timeout = local_clock_us(&cpu) + 5;
-       while (!need_resched()) {
-               if (i915_gem_request_completed(req, true))
-                       return 0;
+       timeout_us += local_clock_us(&cpu);
+       do {
+               if (i915_gem_request_completed(req))
+                       return true;
 
                if (signal_pending_state(state, current))
                        break;
 
-               if (busywait_stop(timeout, cpu))
+               if (busywait_stop(timeout_us, cpu))
                        break;
 
                cpu_relax_lowlatency();
-       }
+       } while (!need_resched());
 
-       if (i915_gem_request_completed(req, false))
-               return 0;
-
-       return -EAGAIN;
+       return false;
 }
 
 /**
@@ -1449,25 +1429,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                        s64 *timeout,
                        struct intel_rps_client *rps)
 {
-       struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
-       struct drm_i915_private *dev_priv = req->i915;
-       const bool irq_test_in_progress =
-               ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
        int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
-       DEFINE_WAIT(wait);
-       unsigned long timeout_expire;
+       DEFINE_WAIT(reset);
+       struct intel_wait wait;
+       unsigned long timeout_remain;
        s64 before = 0; /* Only to silence a compiler warning. */
-       int ret;
+       int ret = 0;
 
-       WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
+       might_sleep();
 
        if (list_empty(&req->list))
                return 0;
 
-       if (i915_gem_request_completed(req, true))
+       if (i915_gem_request_completed(req))
                return 0;
 
-       timeout_expire = 0;
+       timeout_remain = MAX_SCHEDULE_TIMEOUT;
        if (timeout) {
                if (WARN_ON(*timeout < 0))
                        return -EINVAL;
@@ -1475,7 +1452,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                if (*timeout == 0)
                        return -ETIME;
 
-               timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+               timeout_remain = nsecs_to_jiffies_timeout(*timeout);
 
                /*
                 * Record current time in case interrupted by signal, or wedged.
@@ -1483,75 +1460,85 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                before = ktime_get_raw_ns();
        }
 
-       if (INTEL_INFO(dev_priv)->gen >= 6)
-               gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
-
        trace_i915_gem_request_wait_begin(req);
 
-       /* Optimistic spin for the next jiffie before touching IRQs */
-       ret = __i915_spin_request(req, state);
-       if (ret == 0)
-               goto out;
-
-       if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
-               ret = -ENODEV;
-               goto out;
-       }
+       /* This client is about to stall waiting for the GPU. In many cases
+        * this is undesirable and limits the throughput of the system, as
+        * many clients cannot continue processing user input/output whilst
+        * blocked. RPS autotuning may take tens of milliseconds to respond
+        * to the GPU load and thus incurs additional latency for the client.
+        * We can circumvent that by promoting the GPU frequency to maximum
+        * before we wait. This makes the GPU throttle up much more quickly
+        * (good for benchmarks and user experience, e.g. window animations),
+        * but at a cost of spending more power processing the workload
+        * (bad for battery). Not all clients even want their results
+        * immediately and for them we should just let the GPU select its own
+        * frequency to maximise efficiency. To prevent a single client from
+        * forcing the clocks too high for the whole system, we only allow
+        * each client to waitboost once in a busy period.
+        */
+       if (INTEL_INFO(req->i915)->gen >= 6)
+               gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
 
-       for (;;) {
-               struct timer_list timer;
+       /* Optimistic spin for the next ~jiffie before touching IRQs */
+       if (i915_spin_request(req, state, 5))
+               goto complete;
 
-               prepare_to_wait(&engine->irq_queue, &wait, state);
+       set_current_state(state);
+       add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
-               /* We need to check whether any gpu reset happened in between
-                * the request being submitted and now. If a reset has occurred,
-                * the request is effectively complete (we either are in the
-                * process of or have discarded the rendering and completely
-                * reset the GPU. The results of the request are lost and we
-                * are free to continue on with the original operation.
+       intel_wait_init(&wait, req->seqno);
+       if (intel_engine_add_wait(req->engine, &wait))
+               /* In order to check that we haven't missed the interrupt
+                * as we enabled it, we need to kick ourselves to do a
+                * coherent check on the seqno before we sleep.
                 */
-               if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
-                       ret = 0;
-                       break;
-               }
-
-               if (i915_gem_request_completed(req, false)) {
-                       ret = 0;
-                       break;
-               }
+               goto wakeup;
 
+       for (;;) {
                if (signal_pending_state(state, current)) {
                        ret = -ERESTARTSYS;
                        break;
                }
 
-               if (timeout && time_after_eq(jiffies, timeout_expire)) {
+               /* Ensure that even if the GPU hangs, we get woken up.
+                *
+                * However, note that if no one is waiting, we never notice
+                * a gpu hang. Eventually, we will have to wait for a resource
+                * held by the GPU and so trigger a hangcheck. In the most
+                * pathological case, this will be upon memory starvation!
+                */
+               i915_queue_hangcheck(req->i915);
+
+               timeout_remain = io_schedule_timeout(timeout_remain);
+               if (timeout_remain == 0) {
                        ret = -ETIME;
                        break;
                }
 
-               timer.function = NULL;
-               if (timeout || missed_irq(dev_priv, engine)) {
-                       unsigned long expire;
+               if (intel_wait_complete(&wait))
+                       break;
 
-                       setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-                       expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
-                       mod_timer(&timer, expire);
-               }
+               set_current_state(state);
 
-               io_schedule();
+wakeup:
+               /* Carefully check if the request is complete, giving time
+                * for the seqno to be visible following the interrupt.
+                * We also have to check in case we are kicked by the GPU
+                * reset in order to drop the struct_mutex.
+                */
+               if (__i915_request_irq_complete(req))
+                       break;
 
-               if (timer.function) {
-                       del_singleshot_timer_sync(&timer);
-                       destroy_timer_on_stack(&timer);
-               }
+               /* Only spin if we know the GPU is processing this request */
+               if (i915_spin_request(req, state, 2))
+                       break;
        }
-       if (!irq_test_in_progress)
-               engine->irq_put(engine);
-
-       finish_wait(&engine->irq_queue, &wait);
+       remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
-out:
+       intel_engine_remove_wait(req->engine, &wait);
+       __set_current_state(TASK_RUNNING);
+complete:
        trace_i915_gem_request_wait_end(req);
 
        if (timeout) {
@@ -1570,6 +1557,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                        *timeout = 0;
        }
 
+       if (rps && req->seqno == req->engine->last_submitted_seqno) {
+               /* The GPU is now idle and this client has stalled.
+                * Since no other client has submitted a request in the
+                * meantime, assume that this client is the only one
+                * supplying work to the GPU but is unable to keep that
+                * work supplied because it is waiting. Since the GPU is
+                * then never kept fully busy, RPS autoclocking will
+                * keep the clocks relatively low, causing further delays.
+                * Compensate by giving the synchronous client credit for
+                * a waitboost next time.
+                */
+               spin_lock(&req->i915->rps.client_lock);
+               list_del_init(&rps->link);
+               spin_unlock(&req->i915->rps.client_lock);
+       }
+
        return ret;
 }
 
@@ -1648,7 +1651,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
        struct intel_engine_cs *engine = req->engine;
        struct drm_i915_gem_request *tmp;
 
-       lockdep_assert_held(&engine->i915->dev->struct_mutex);
+       lockdep_assert_held(&engine->i915->drm.struct_mutex);
 
        if (list_empty(&req->list))
                return;
@@ -1677,14 +1680,14 @@ i915_wait_request(struct drm_i915_gem_request *req)
 
        interruptible = dev_priv->mm.interruptible;
 
-       BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+       BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
 
        ret = __i915_wait_request(req, interruptible, NULL, NULL);
        if (ret)
                return ret;
 
        /* If the GPU hung, we want to keep the requests to find the guilty. */
-       if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
+       if (!i915_reset_in_progress(&dev_priv->gpu_error))
                __i915_gem_request_retire__upto(req);
 
        return 0;
@@ -1745,7 +1748,7 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
        else if (obj->last_write_req == req)
                i915_gem_object_retire__write(obj);
 
-       if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
+       if (!i915_reset_in_progress(&req->i915->gpu_error))
                __i915_gem_request_retire__upto(req);
 }
 
@@ -1758,7 +1761,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                                            bool readonly)
 {
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
        int ret, i, n = 0;
 
@@ -1809,6 +1812,13 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
        return &fpriv->rps;
 }
 
+static enum fb_op_origin
+write_origin(struct drm_i915_gem_object *obj, unsigned domain)
+{
+       return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
+              ORIGIN_GTT : ORIGIN_CPU;
+}
+
 /**
  * Called when user space prepares to use an object with the CPU, either
  * through the mmap ioctl's mapping or a GTT mapping.
@@ -1865,9 +1875,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 
        if (write_domain != 0)
-               intel_fb_obj_invalidate(obj,
-                                       write_domain == I915_GEM_DOMAIN_GTT ?
-                                       ORIGIN_GTT : ORIGIN_CPU);
+               intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
 
 unref:
        drm_gem_object_unreference(&obj->base);
@@ -1974,6 +1982,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                else
                        addr = -ENOMEM;
                up_write(&mm->mmap_sem);
+
+               /* This may race, but that's ok, it only gets set */
+               WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
        }
        drm_gem_object_unreference_unlocked(obj);
        if (IS_ERR((void *)addr))
@@ -2262,7 +2273,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        int ret;
 
        dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -2478,7 +2489,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        int page_count, i;
        struct address_space *mapping;
        struct sg_table *st;
@@ -2609,7 +2620,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        const struct drm_i915_gem_object_ops *ops = obj->ops;
        int ret;
 
@@ -2773,6 +2784,13 @@ i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
        }
        i915_gem_retire_requests(dev_priv);
 
+       /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
+       if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
+               while (intel_kick_waiters(dev_priv) ||
+                      intel_kick_signalers(dev_priv))
+                       yield();
+       }
+
        /* Finally reset hw state */
        for_each_engine(engine, dev_priv)
                intel_ring_init_seqno(engine, seqno);
@@ -2782,7 +2800,7 @@ i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
 
 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        if (seqno == 0)
@@ -2822,6 +2840,26 @@ i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
        return 0;
 }
 
+static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       dev_priv->gt.active_engines |= intel_engine_flag(engine);
+       if (dev_priv->gt.awake)
+               return;
+
+       intel_runtime_pm_get_noresume(dev_priv);
+       dev_priv->gt.awake = true;
+
+       i915_update_gfx_val(dev_priv);
+       if (INTEL_GEN(dev_priv) >= 6)
+               gen6_rps_busy(dev_priv);
+
+       queue_delayed_work(dev_priv->wq,
+                          &dev_priv->gt.retire_work,
+                          round_jiffies_up_relative(HZ));
+}
+
 /*
  * NB: This function is not allowed to fail. Doing so would mean the the
  * request is not being tracked for completion but the work itself is
@@ -2832,7 +2870,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
                        bool flush_caches)
 {
        struct intel_engine_cs *engine;
-       struct drm_i915_private *dev_priv;
        struct intel_ringbuffer *ringbuf;
        u32 request_start;
        u32 reserved_tail;
@@ -2842,7 +2879,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
                return;
 
        engine = request->engine;
-       dev_priv = request->i915;
        ringbuf = request->ringbuf;
 
        /*
@@ -2908,14 +2944,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
        }
        /* Not allowed to fail! */
        WARN(ret, "emit|add_request failed: %d!\n", ret);
-
-       i915_queue_hangcheck(engine->i915);
-
-       queue_delayed_work(dev_priv->wq,
-                          &dev_priv->mm.retire_work,
-                          round_jiffies_up_relative(HZ));
-       intel_mark_busy(dev_priv);
-
        /* Sanity check that the reserved size was large enough. */
        ret = intel_ring_get_tail(ringbuf) - request_start;
        if (ret < 0)
@@ -2924,46 +2952,34 @@ void __i915_add_request(struct drm_i915_gem_request *request,
                  "Not enough space reserved (%d bytes) "
                  "for adding the request (%d bytes)\n",
                  reserved_tail, ret);
+
+       i915_gem_mark_busy(engine);
 }
 
-static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
-                                  const struct i915_gem_context *ctx)
+static bool i915_context_is_banned(const struct i915_gem_context *ctx)
 {
        unsigned long elapsed;
 
-       elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
-
        if (ctx->hang_stats.banned)
                return true;
 
+       elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
        if (ctx->hang_stats.ban_period_seconds &&
            elapsed <= ctx->hang_stats.ban_period_seconds) {
-               if (!i915_gem_context_is_default(ctx)) {
-                       DRM_DEBUG("context hanging too fast, banning!\n");
-                       return true;
-               } else if (i915_stop_ring_allow_ban(dev_priv)) {
-                       if (i915_stop_ring_allow_warn(dev_priv))
-                               DRM_ERROR("gpu hanging too fast, banning!\n");
-                       return true;
-               }
+               DRM_DEBUG("context hanging too fast, banning!\n");
+               return true;
        }
 
        return false;
 }
 
-static void i915_set_reset_status(struct drm_i915_private *dev_priv,
-                                 struct i915_gem_context *ctx,
+static void i915_set_reset_status(struct i915_gem_context *ctx,
                                  const bool guilty)
 {
-       struct i915_ctx_hang_stats *hs;
-
-       if (WARN_ON(!ctx))
-               return;
-
-       hs = &ctx->hang_stats;
+       struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
 
        if (guilty) {
-               hs->banned = i915_context_is_banned(dev_priv, ctx);
+               hs->banned = i915_context_is_banned(ctx);
                hs->batch_active++;
                hs->guilty_ts = get_seconds();
        } else {
@@ -3012,7 +3028,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
        kref_init(&req->ref);
        req->i915 = dev_priv;
        req->engine = engine;
-       req->reset_counter = reset_counter;
        req->ctx  = ctx;
        i915_gem_context_reference(req->ctx);
 
@@ -3072,8 +3087,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
 
+       /* We are called by the error capture and reset at a random
+        * point in time. In particular, note that neither is crucially
+        * ordered with an interrupt. After a hang, the GPU is dead and we
+        * assume that no more writes can happen (we waited long enough for
+        * all writes that were in transaction to be flushed) - adding an
+        * extra delay for a recent interrupt is pointless. Hence, we do
+        * not need an engine->irq_seqno_barrier() before the seqno reads.
+        */
        list_for_each_entry(request, &engine->request_list, list) {
-               if (i915_gem_request_completed(request, false))
+               if (i915_gem_request_completed(request))
                        continue;
 
                return request;
@@ -3082,27 +3105,23 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
        return NULL;
 }
 
-static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
-                                      struct intel_engine_cs *engine)
+static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
        bool ring_hung;
 
        request = i915_gem_find_active_request(engine);
-
        if (request == NULL)
                return;
 
        ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 
-       i915_set_reset_status(dev_priv, request->ctx, ring_hung);
-
+       i915_set_reset_status(request->ctx, ring_hung);
        list_for_each_entry_continue(request, &engine->request_list, list)
-               i915_set_reset_status(dev_priv, request->ctx, false);
+               i915_set_reset_status(request->ctx, false);
 }
 
-static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
-                                       struct intel_engine_cs *engine)
+static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 {
        struct intel_ringbuffer *buffer;
 
@@ -3163,7 +3182,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
 
 void i915_gem_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        /*
@@ -3172,10 +3191,10 @@ void i915_gem_reset(struct drm_device *dev)
         * their reference to the objects, the inspection must be done first.
         */
        for_each_engine(engine, dev_priv)
-               i915_gem_reset_engine_status(dev_priv, engine);
+               i915_gem_reset_engine_status(engine);
 
        for_each_engine(engine, dev_priv)
-               i915_gem_reset_engine_cleanup(dev_priv, engine);
+               i915_gem_reset_engine_cleanup(engine);
 
        i915_gem_context_reset(dev);
 
@@ -3205,7 +3224,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
                                           struct drm_i915_gem_request,
                                           list);
 
-               if (!i915_gem_request_completed(request, true))
+               if (!i915_gem_request_completed(request))
                        break;
 
                i915_gem_request_retire(request);
@@ -3228,55 +3247,52 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
                i915_gem_object_retire__read(obj, engine->id);
        }
 
-       if (unlikely(engine->trace_irq_req &&
-                    i915_gem_request_completed(engine->trace_irq_req, true))) {
-               engine->irq_put(engine);
-               i915_gem_request_assign(&engine->trace_irq_req, NULL);
-       }
-
        WARN_ON(i915_verify_lists(engine->dev));
 }
 
-bool
-i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
-       bool idle = true;
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+       if (dev_priv->gt.active_engines == 0)
+               return;
+
+       GEM_BUG_ON(!dev_priv->gt.awake);
 
        for_each_engine(engine, dev_priv) {
                i915_gem_retire_requests_ring(engine);
-               idle &= list_empty(&engine->request_list);
-               if (i915.enable_execlists) {
-                       spin_lock_bh(&engine->execlist_lock);
-                       idle &= list_empty(&engine->execlist_queue);
-                       spin_unlock_bh(&engine->execlist_lock);
-               }
+               if (list_empty(&engine->request_list))
+                       dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
        }
 
-       if (idle)
-               mod_delayed_work(dev_priv->wq,
-                                  &dev_priv->mm.idle_work,
+       if (dev_priv->gt.active_engines == 0)
+               queue_delayed_work(dev_priv->wq,
+                                  &dev_priv->gt.idle_work,
                                   msecs_to_jiffies(100));
-
-       return idle;
 }
 
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), mm.retire_work.work);
-       struct drm_device *dev = dev_priv->dev;
-       bool idle;
+               container_of(work, typeof(*dev_priv), gt.retire_work.work);
+       struct drm_device *dev = &dev_priv->drm;
 
        /* Come back later if the device is busy... */
-       idle = false;
        if (mutex_trylock(&dev->struct_mutex)) {
-               idle = i915_gem_retire_requests(dev_priv);
+               i915_gem_retire_requests(dev_priv);
                mutex_unlock(&dev->struct_mutex);
        }
-       if (!idle)
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+
+       /* Keep the retire handler running until we are finally idle.
+        * We do not need to do this test under locking as in the worst-case
+        * we queue the retire worker once too often.
+        */
+       if (READ_ONCE(dev_priv->gt.awake))
+               queue_delayed_work(dev_priv->wq,
+                                  &dev_priv->gt.retire_work,
                                   round_jiffies_up_relative(HZ));
 }
 
@@ -3284,25 +3300,55 @@ static void
 i915_gem_idle_work_handler(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), mm.idle_work.work);
-       struct drm_device *dev = dev_priv->dev;
+               container_of(work, typeof(*dev_priv), gt.idle_work.work);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
+       unsigned int stuck_engines;
+       bool rearm_hangcheck;
+
+       if (!READ_ONCE(dev_priv->gt.awake))
+               return;
+
+       if (READ_ONCE(dev_priv->gt.active_engines))
+               return;
+
+       rearm_hangcheck =
+               cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+
+       if (!mutex_trylock(&dev->struct_mutex)) {
+               /* Currently busy, come back later */
+               mod_delayed_work(dev_priv->wq,
+                                &dev_priv->gt.idle_work,
+                                msecs_to_jiffies(50));
+               goto out_rearm;
+       }
+
+       if (dev_priv->gt.active_engines)
+               goto out_unlock;
 
        for_each_engine(engine, dev_priv)
-               if (!list_empty(&engine->request_list))
-                       return;
+               i915_gem_batch_pool_fini(&engine->batch_pool);
 
-       /* we probably should sync with hangcheck here, using cancel_work_sync.
-        * Also locking seems to be fubar here, engine->request_list is protected
-        * by dev->struct_mutex. */
+       GEM_BUG_ON(!dev_priv->gt.awake);
+       dev_priv->gt.awake = false;
+       rearm_hangcheck = false;
 
-       intel_mark_idle(dev_priv);
+       stuck_engines = intel_kick_waiters(dev_priv);
+       if (unlikely(stuck_engines)) {
+               DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
+               dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
+       }
 
-       if (mutex_trylock(&dev->struct_mutex)) {
-               for_each_engine(engine, dev_priv)
-                       i915_gem_batch_pool_fini(&engine->batch_pool);
+       if (INTEL_GEN(dev_priv) >= 6)
+               gen6_rps_idle(dev_priv);
+       intel_runtime_pm_put(dev_priv);
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
 
-               mutex_unlock(&dev->struct_mutex);
+out_rearm:
+       if (rearm_hangcheck) {
+               GEM_BUG_ON(!dev_priv->gt.awake);
+               i915_queue_hangcheck(dev_priv);
        }
 }
 
@@ -3327,7 +3373,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
                if (req == NULL)
                        continue;
 
-               if (i915_gem_request_completed(req, true))
+               if (i915_gem_request_completed(req))
                        i915_gem_object_retire__read(obj, i);
        }
 
@@ -3435,7 +3481,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
        if (to == from)
                return 0;
 
-       if (i915_gem_request_completed(from_req, true))
+       if (i915_gem_request_completed(from_req))
                return 0;
 
        if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
@@ -3586,7 +3632,7 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        int ret;
 
        if (list_empty(&vma->obj_link))
@@ -3662,26 +3708,16 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
        return __i915_vma_unbind(vma, false);
 }
 
-int i915_gpu_idle(struct drm_device *dev)
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        int ret;
 
-       /* Flush everything onto the inactive list. */
-       for_each_engine(engine, dev_priv) {
-               if (!i915.enable_execlists) {
-                       struct drm_i915_gem_request *req;
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-                       req = i915_gem_request_alloc(engine, NULL);
-                       if (IS_ERR(req))
-                               return PTR_ERR(req);
-
-                       ret = i915_switch_context(req);
-                       i915_add_request_no_flush(req);
-                       if (ret)
-                               return ret;
-               }
+       for_each_engine(engine, dev_priv) {
+               if (engine->last_context == NULL)
+                       continue;
 
                ret = intel_engine_idle(engine);
                if (ret)
@@ -4214,7 +4250,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
                               struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_caching *args = data;
        struct drm_i915_gem_object *obj;
        enum i915_cache_level level;
@@ -4408,7 +4444,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 static int
 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
        struct drm_i915_gem_request *request, *target = NULL;
@@ -4444,9 +4480,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
                return 0;
 
        ret = __i915_wait_request(target, true, NULL, NULL);
-       if (ret == 0)
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
-
        i915_gem_request_unreference(target);
 
        return ret;
@@ -4505,7 +4538,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
                       uint32_t alignment,
                       uint64_t flags)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct i915_vma *vma;
        unsigned bound;
        int ret;
@@ -4669,7 +4702,7 @@ int
 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_madvise *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
@@ -4739,7 +4772,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        obj->fence_reg = I915_FENCE_REG_NONE;
        obj->madv = I915_MADV_WILLNEED;
 
-       i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+       i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4834,7 +4867,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_vma *vma, *next;
 
        intel_runtime_pm_get(dev_priv);
@@ -4938,7 +4971,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
 static void
 i915_gem_stop_engines(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv)
@@ -4948,11 +4981,11 @@ i915_gem_stop_engines(struct drm_device *dev)
 int
 i915_gem_suspend(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
 
        mutex_lock(&dev->struct_mutex);
-       ret = i915_gpu_idle(dev);
+       ret = i915_gem_wait_for_idle(dev_priv);
        if (ret)
                goto err;
 
@@ -4963,13 +4996,13 @@ i915_gem_suspend(struct drm_device *dev)
        mutex_unlock(&dev->struct_mutex);
 
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-       cancel_delayed_work_sync(&dev_priv->mm.retire_work);
-       flush_delayed_work(&dev_priv->mm.idle_work);
+       cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+       flush_delayed_work(&dev_priv->gt.idle_work);
 
        /* Assert that we sucessfully flushed all the work and
         * reset the GPU back to its idle, low power state.
         */
-       WARN_ON(dev_priv->mm.busy);
+       WARN_ON(dev_priv->gt.awake);
 
        return 0;
 
@@ -4980,7 +5013,7 @@ i915_gem_suspend(struct drm_device *dev)
 
 void i915_gem_init_swizzling(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_INFO(dev)->gen < 5 ||
            dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
@@ -5005,7 +5038,7 @@ void i915_gem_init_swizzling(struct drm_device *dev)
 
 static void init_unused_ring(struct drm_device *dev, u32 base)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(RING_CTL(base), 0);
        I915_WRITE(RING_HEAD(base), 0);
@@ -5032,7 +5065,7 @@ static void init_unused_rings(struct drm_device *dev)
 
 int i915_gem_init_engines(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        ret = intel_init_render_ring_buffer(dev);
@@ -5080,7 +5113,7 @@ int i915_gem_init_engines(struct drm_device *dev)
 int
 i915_gem_init_hw(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        int ret;
 
@@ -5138,12 +5171,6 @@ i915_gem_init_hw(struct drm_device *dev)
        if (ret)
                goto out;
 
-       /*
-        * Increment the next seqno by 0x100 so we have a visible break
-        * on re-initialisation
-        */
-       ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
-
 out:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
        return ret;
@@ -5151,7 +5178,7 @@ i915_gem_init_hw(struct drm_device *dev)
 
 int i915_gem_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        mutex_lock(&dev->struct_mutex);
@@ -5208,7 +5235,7 @@ int i915_gem_init(struct drm_device *dev)
 void
 i915_gem_cleanup_engines(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv)
@@ -5225,7 +5252,7 @@ init_engine_lists(struct intel_engine_cs *engine)
 void
 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
            !IS_CHERRYVIEW(dev_priv))
@@ -5249,7 +5276,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 void
 i915_gem_load_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        dev_priv->objects =
@@ -5277,22 +5304,15 @@ i915_gem_load_init(struct drm_device *dev)
                init_engine_lists(&dev_priv->engine[i]);
        for (i = 0; i < I915_MAX_NUM_FENCES; i++)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
-       INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+       INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
                          i915_gem_retire_work_handler);
-       INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+       INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
                          i915_gem_idle_work_handler);
+       init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
        dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
-       /*
-        * Set initial sequence number for requests.
-        * Using this number allows the wraparound to happen early,
-        * catching any obvious problems.
-        */
-       dev_priv->next_seqno = ((u32)~0 - 0x1100);
-       dev_priv->last_seqno = ((u32)~0 - 0x1101);
-
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 
        init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -5378,7 +5398,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
                return -ENOMEM;
 
        file->driver_priv = file_priv;
-       file_priv->dev_priv = dev->dev_private;
+       file_priv->dev_priv = to_i915(dev);
        file_priv->file = file;
        INIT_LIST_HEAD(&file_priv->rps.link);
 
@@ -5424,7 +5444,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
                        struct i915_address_space *vm)
 {
-       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(o->base.dev);
        struct i915_vma *vma;
 
        WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
@@ -5528,7 +5548,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
        struct page *page;
 
        /* Only default objects have per-page dirty tracking */
-       if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
+       if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
                return NULL;
 
        page = i915_gem_object_get_page(obj, n);
index 30d9b4fd30f32b636d304263ea4acb1e14d2eebd..3c97f0e7a003758e6d60f3a4f65b040c91d3a316 100644 (file)
@@ -154,7 +154,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
        struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
        int i;
 
-       lockdep_assert_held(&ctx->i915->dev->struct_mutex);
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
        trace_i915_context_free(ctx);
 
        /*
@@ -250,7 +250,7 @@ static struct i915_gem_context *
 __create_hw_context(struct drm_device *dev,
                    struct drm_i915_file_private *file_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_gem_context *ctx;
        int ret;
 
@@ -268,6 +268,8 @@ __create_hw_context(struct drm_device *dev,
        list_add_tail(&ctx->link, &dev_priv->context_list);
        ctx->i915 = dev_priv;
 
+       ctx->ggtt_alignment = get_context_alignment(dev_priv);
+
        if (dev_priv->hw_context_size) {
                struct drm_i915_gem_object *obj =
                                i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
@@ -394,7 +396,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
 
 void i915_gem_context_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        lockdep_assert_held(&dev->struct_mutex);
 
@@ -410,7 +412,7 @@ void i915_gem_context_reset(struct drm_device *dev)
 
 int i915_gem_context_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_gem_context *ctx;
 
        /* Init should only be called once per module load. Eventually the
@@ -451,26 +453,6 @@ int i915_gem_context_init(struct drm_device *dev)
                return PTR_ERR(ctx);
        }
 
-       if (!i915.enable_execlists && ctx->engine[RCS].state) {
-               int ret;
-
-               /* We may need to do things with the shrinker which
-                * require us to immediately switch back to the default
-                * context. This can cause a problem as pinning the
-                * default context also requires GTT space which may not
-                * be available. To avoid this we always pin the default
-                * context.
-                */
-               ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state,
-                                           get_context_alignment(dev_priv), 0);
-               if (ret) {
-                       DRM_ERROR("Failed to pinned default global context (error %d)\n",
-                                 ret);
-                       i915_gem_context_unreference(ctx);
-                       return ret;
-               }
-       }
-
        dev_priv->kernel_context = ctx;
 
        DRM_DEBUG_DRIVER("%s context support initialized\n",
@@ -483,33 +465,45 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
 
-       lockdep_assert_held(&dev_priv->dev->struct_mutex);
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
        for_each_engine(engine, dev_priv) {
                if (engine->last_context) {
                        i915_gem_context_unpin(engine->last_context, engine);
                        engine->last_context = NULL;
                }
-
-               /* Force the GPU state to be reinitialised on enabling */
-               dev_priv->kernel_context->engine[engine->id].initialised =
-                       engine->init_context == NULL;
        }
 
-       /* Force the GPU state to be reinitialised on enabling */
-       dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
+       /* Force the GPU state to be restored on enabling */
+       if (!i915.enable_execlists) {
+               struct i915_gem_context *ctx;
+
+               list_for_each_entry(ctx, &dev_priv->context_list, link) {
+                       if (!i915_gem_context_is_default(ctx))
+                               continue;
+
+                       for_each_engine(engine, dev_priv)
+                               ctx->engine[engine->id].initialised = false;
+
+                       ctx->remap_slice = ALL_L3_SLICES(dev_priv);
+               }
+
+               for_each_engine(engine, dev_priv) {
+                       struct intel_context *kce =
+                               &dev_priv->kernel_context->engine[engine->id];
+
+                       kce->initialised = true;
+               }
+       }
 }
 
 void i915_gem_context_fini(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_gem_context *dctx = dev_priv->kernel_context;
 
        lockdep_assert_held(&dev->struct_mutex);
 
-       if (!i915.enable_execlists && dctx->engine[RCS].state)
-               i915_gem_object_ggtt_unpin(dctx->engine[RCS].state);
-
        i915_gem_context_unreference(dctx);
        dev_priv->kernel_context = NULL;
 
@@ -759,7 +753,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 
        /* Trying to pin first makes error handling easier. */
        ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
-                                   get_context_alignment(engine->i915),
+                                   to->ggtt_alignment,
                                    0);
        if (ret)
                return ret;
@@ -901,7 +895,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
        struct intel_engine_cs *engine = req->engine;
 
        WARN_ON(i915.enable_execlists);
-       lockdep_assert_held(&req->i915->dev->struct_mutex);
+       lockdep_assert_held(&req->i915->drm.struct_mutex);
 
        if (!req->ctx->engine[engine->id].state) {
                struct i915_gem_context *to = req->ctx;
@@ -1032,6 +1026,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
                else
                        args->value = to_i915(dev)->ggtt.base.total;
                break;
+       case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+               args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -1077,6 +1074,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
                        ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
                }
                break;
+       case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+               if (args->size) {
+                       ret = -EINVAL;
+               } else {
+                       if (args->value)
+                               ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
+                       else
+                               ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
+               }
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -1089,7 +1096,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
                                       void *data, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_reset_stats *args = data;
        struct i915_ctx_hang_stats *hs;
        struct i915_gem_context *ctx;
index b144c3f5c6504e51ecd88d694ac44d2db6b1cf9b..3c1280ec7ff648979ea96a17325a5aa6924e688d 100644 (file)
 #include "intel_drv.h"
 #include "i915_trace.h"
 
+static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *engine;
+
+       if (i915.enable_execlists)
+               return 0;
+
+       for_each_engine(engine, dev_priv) {
+               struct drm_i915_gem_request *req;
+               int ret;
+
+               if (engine->last_context == NULL)
+                       continue;
+
+               if (engine->last_context == dev_priv->kernel_context)
+                       continue;
+
+               req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
+               if (IS_ERR(req))
+                       return PTR_ERR(req);
+
+               ret = i915_switch_context(req);
+               i915_add_request_no_flush(req);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+
 static bool
 mark_free(struct i915_vma *vma, struct list_head *unwind)
 {
@@ -150,11 +181,19 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
 
        /* Only idle the GPU and repeat the search once */
        if (pass++ == 0) {
-               ret = i915_gpu_idle(dev);
+               struct drm_i915_private *dev_priv = to_i915(dev);
+
+               if (i915_is_ggtt(vm)) {
+                       ret = switch_to_pinned_context(dev_priv);
+                       if (ret)
+                               return ret;
+               }
+
+               ret = i915_gem_wait_for_idle(dev_priv);
                if (ret)
                        return ret;
 
-               i915_gem_retire_requests(to_i915(dev));
+               i915_gem_retire_requests(dev_priv);
                goto search_again;
        }
 
@@ -261,11 +300,19 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
        trace_i915_gem_evict_vm(vm);
 
        if (do_idle) {
-               ret = i915_gpu_idle(vm->dev);
+               struct drm_i915_private *dev_priv = to_i915(vm->dev);
+
+               if (i915_is_ggtt(vm)) {
+                       ret = switch_to_pinned_context(dev_priv);
+                       if (ret)
+                               return ret;
+               }
+
+               ret = i915_gem_wait_for_idle(dev_priv);
                if (ret)
                        return ret;
 
-               i915_gem_retire_requests(to_i915(vm->dev));
+               i915_gem_retire_requests(dev_priv);
 
                WARN_ON(!list_empty(&vm->active_list));
        }
index 7941f1fe9cd24e449678ae1de4f4918246e06196..1978633e7549ad4e7c7fd0f0d4eacfe4fce229c9 100644 (file)
@@ -1142,7 +1142,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
                            struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret, i;
 
        if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
@@ -1225,7 +1225,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 {
        struct drm_device *dev = params->dev;
        struct intel_engine_cs *engine = params->engine;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u64 exec_start, exec_len;
        int instp_mode;
        u32 instp_mask;
@@ -1328,10 +1328,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
        /* Check whether the file_priv has already selected one ring. */
        if ((int)file_priv->bsd_ring < 0) {
                /* If not, use the ping-pong mechanism to select one. */
-               mutex_lock(&dev_priv->dev->struct_mutex);
+               mutex_lock(&dev_priv->drm.struct_mutex);
                file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
                dev_priv->mm.bsd_ring_dispatch_index ^= 1;
-               mutex_unlock(&dev_priv->dev->struct_mutex);
+               mutex_unlock(&dev_priv->drm.struct_mutex);
        }
 
        return file_priv->bsd_ring;
@@ -1477,6 +1477,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                dispatch_flags |= I915_DISPATCH_RS;
        }
 
+       /* Take a local wakeref for preparing to dispatch the execbuf as
+        * we expect to access the hardware fairly frequently in the
+        * process. Upon first dispatch, we acquire another prolonged
+        * wakeref that we hold until the GPU has been idle for at least
+        * 100ms.
+        */
        intel_runtime_pm_get(dev_priv);
 
        ret = i915_mutex_lock_interruptible(dev);
index 2b6bdc267fb5ed590cd4935a65a7484dda0eec10..251d7a95af891dc5f4071196bc5168769cd74398 100644 (file)
@@ -58,7 +58,7 @@
 static void i965_write_fence_reg(struct drm_device *dev, int reg,
                                 struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t fence_reg_lo, fence_reg_hi;
        int fence_pitch_shift;
 
@@ -117,7 +117,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
 static void i915_write_fence_reg(struct drm_device *dev, int reg,
                                 struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
 
        if (obj) {
@@ -156,7 +156,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
 static void i830_write_fence_reg(struct drm_device *dev, int reg,
                                struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t val;
 
        if (obj) {
@@ -193,7 +193,7 @@ inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
 static void i915_gem_write_fence(struct drm_device *dev, int reg,
                                 struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Ensure that all CPU reads are completed before installing a fence
         * and all writes before removing the fence.
@@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
                                         struct drm_i915_fence_reg *fence,
                                         bool enable)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        int reg = fence_number(dev_priv, fence);
 
        i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
@@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct drm_i915_fence_reg *fence;
        int ret;
 
@@ -311,7 +311,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 static struct drm_i915_fence_reg *
 i915_find_fence_reg(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_fence_reg *reg, *avail;
        int i;
 
@@ -367,7 +367,7 @@ int
 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
 {
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        bool enable = obj->tiling_mode != I915_TILING_NONE;
        struct drm_i915_fence_reg *reg;
        int ret;
@@ -433,7 +433,7 @@ bool
 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
 {
        if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+               struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
                struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
 
                WARN_ON(!ggtt_vma ||
@@ -457,7 +457,7 @@ void
 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
 {
        if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+               struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
                WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
                dev_priv->fence_regs[obj->fence_reg].pin_count--;
        }
@@ -472,7 +472,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
  */
 void i915_gem_restore_fences(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
@@ -549,7 +549,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
 void
 i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
        uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
index 5890017b9832a82c5d61e7141e4f07c1e3c356dc..10f1e32767e60c7f636161485658bc3f9f5ec99f 100644 (file)
@@ -153,7 +153,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
 #endif
 
        /* Early VLV doesn't have this */
-       if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) {
+       if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
                DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
                return 0;
        }
@@ -1570,13 +1570,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
        struct i915_page_table *unused;
        gen6_pte_t scratch_pte;
        uint32_t pd_entry;
-       uint32_t  pte, pde, temp;
+       uint32_t  pte, pde;
        uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
 
        scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
                                     I915_CACHE_LLC, true, 0);
 
-       gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
+       gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
                u32 expected;
                gen6_pte_t *pt_vaddr;
                const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
@@ -1640,9 +1640,9 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
 {
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_page_table *pt;
-       uint32_t pde, temp;
+       uint32_t pde;
 
-       gen6_for_each_pde(pt, pd, start, length, temp, pde)
+       gen6_for_each_pde(pt, pd, start, length, pde)
                gen6_write_pde(pd, pde, pt);
 
        /* Make sure write is complete before other code can use this page
@@ -1683,17 +1683,6 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
        return 0;
 }
 
-static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct drm_i915_gem_request *req)
-{
-       struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
-
-       I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
-       I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
-       return 0;
-}
-
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
                          struct drm_i915_gem_request *req)
 {
@@ -1731,21 +1720,16 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
                          struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = ppgtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
+       struct drm_i915_private *dev_priv = req->i915;
 
        I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
        I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
-
-       POSTING_READ(RING_PP_DIR_DCLV(engine));
-
        return 0;
 }
 
 static void gen8_ppgtt_enable(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv) {
@@ -1757,7 +1741,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
 
 static void gen7_ppgtt_enable(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        uint32_t ecochk, ecobits;
 
@@ -1782,7 +1766,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
 
 static void gen6_ppgtt_enable(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t ecochk, gab_ctl, ecobits;
 
        ecobits = I915_READ(GAC_ECO_BITS);
@@ -1875,7 +1859,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct i915_page_table *pt;
        uint32_t start, length, start_save, length_save;
-       uint32_t pde, temp;
+       uint32_t pde;
        int ret;
 
        if (WARN_ON(start_in + length_in > ppgtt->base.total))
@@ -1891,7 +1875,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
         * need allocation. The second stage marks use ptes within the page
         * tables.
         */
-       gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+       gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
                if (pt != vm->scratch_pt) {
                        WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
                        continue;
@@ -1916,7 +1900,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
        start = start_save;
        length = length_save;
 
-       gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+       gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
                DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
 
                bitmap_zero(tmp_bitmap, GEN6_PTES);
@@ -1985,15 +1969,16 @@ static void gen6_free_scratch(struct i915_address_space *vm)
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+       struct i915_page_directory *pd = &ppgtt->pd;
+       struct drm_device *dev = vm->dev;
        struct i915_page_table *pt;
        uint32_t pde;
 
        drm_mm_remove_node(&ppgtt->node);
 
-       gen6_for_all_pdes(pt, ppgtt, pde) {
+       gen6_for_all_pdes(pt, pd, pde)
                if (pt != vm->scratch_pt)
-                       free_pt(ppgtt->base.dev, pt);
-       }
+                       free_pt(dev, pt);
 
        gen6_free_scratch(vm);
 }
@@ -2059,9 +2044,9 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
                                  uint64_t start, uint64_t length)
 {
        struct i915_page_table *unused;
-       uint32_t pde, temp;
+       uint32_t pde;
 
-       gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
+       gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
                ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
 }
 
@@ -2073,18 +2058,15 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        int ret;
 
        ppgtt->base.pte_encode = ggtt->base.pte_encode;
-       if (IS_GEN6(dev)) {
+       if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
                ppgtt->switch_mm = gen6_mm_switch;
-       } else if (IS_HASWELL(dev)) {
+       else if (IS_HASWELL(dev))
                ppgtt->switch_mm = hsw_mm_switch;
-       } else if (IS_GEN7(dev)) {
+       else if (IS_GEN7(dev))
                ppgtt->switch_mm = gen7_mm_switch;
-       else
+       else
                BUG();
 
-       if (intel_vgpu_active(dev_priv))
-               ppgtt->switch_mm = vgpu_mm_switch;
-
        ret = gen6_ppgtt_alloc(ppgtt);
        if (ret)
                return ret;
@@ -2133,7 +2115,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
                                    struct drm_i915_private *dev_priv)
 {
        drm_mm_init(&vm->mm, vm->start, vm->total);
-       vm->dev = dev_priv->dev;
+       vm->dev = &dev_priv->drm;
        INIT_LIST_HEAD(&vm->active_list);
        INIT_LIST_HEAD(&vm->inactive_list);
        list_add_tail(&vm->global_link, &dev_priv->vm_list);
@@ -2141,7 +2123,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
 
 static void gtt_write_workarounds(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* This function is for gtt related workarounds. This function is
         * called on driver load and after a GPU reset, so you can place
@@ -2160,7 +2142,7 @@ static void gtt_write_workarounds(struct drm_device *dev)
 
 static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
 
        ret = __hw_ppgtt_init(dev, ppgtt);
@@ -2261,8 +2243,8 @@ static bool do_idling(struct drm_i915_private *dev_priv)
 
        if (unlikely(ggtt->do_idle_maps)) {
                dev_priv->mm.interruptible = false;
-               if (i915_gpu_idle(dev_priv->dev)) {
-                       DRM_ERROR("Couldn't idle GPU\n");
+               if (i915_gem_wait_for_idle(dev_priv)) {
+                       DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
                }
@@ -2610,7 +2592,7 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
                                     uint64_t start,
                                     enum i915_cache_level cache_level, u32 unused)
 {
-       struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(vm->dev);
        unsigned int flags = (cache_level == I915_CACHE_NONE) ?
                AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
        int rpm_atomic_seq;
@@ -2628,7 +2610,7 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
                                  uint64_t length,
                                  bool unused)
 {
-       struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(vm->dev);
        unsigned first_entry = start >> PAGE_SHIFT;
        unsigned num_entries = length >> PAGE_SHIFT;
        int rpm_atomic_seq;
@@ -2709,7 +2691,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 static void ggtt_unbind_vma(struct i915_vma *vma)
 {
        struct drm_device *dev = vma->vm->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj = vma->obj;
        const uint64_t size = min_t(uint64_t,
                                    obj->base.size,
@@ -2735,7 +2717,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 {
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        bool interruptible;
 
        interruptible = do_idling(dev_priv);
@@ -3137,7 +3119,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        ggtt->base.unbind_vma = ggtt_unbind_vma;
        ggtt->base.insert_page = gen8_ggtt_insert_page;
        ggtt->base.clear_range = nop_clear_range;
-       if (!USES_FULL_PPGTT(dev_priv))
+       if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
                ggtt->base.clear_range = gen8_ggtt_clear_range;
 
        ggtt->base.insert_entries = gen8_ggtt_insert_entries;
@@ -3197,7 +3179,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
-       ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
+       ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
        if (!ret) {
                DRM_ERROR("failed to set up gmch\n");
                return -EIO;
@@ -3206,7 +3188,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
        intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
                      &ggtt->mappable_base, &ggtt->mappable_end);
 
-       ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
+       ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
        ggtt->base.insert_page = i915_ggtt_insert_page;
        ggtt->base.insert_entries = i915_ggtt_insert_entries;
        ggtt->base.clear_range = i915_ggtt_clear_range;
index 163b564fb87dcc644d8ddb624e33b761356861fe..aa5f31d1c2edaaa4d41ee220c7b4b33005016ae8 100644 (file)
@@ -390,27 +390,27 @@ struct i915_hw_ppgtt {
        void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
 };
 
-/* For each pde iterates over every pde between from start until start + length.
- * If start, and start+length are not perfectly divisible, the macro will round
- * down, and up as needed. The macro modifies pde, start, and length. Dev is
- * only used to differentiate shift values. Temp is temp.  On gen6/7, start = 0,
- * and length = 2G effectively iterates over every PDE in the system.
- *
- * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
+/*
+ * gen6_for_each_pde() iterates over every pde from start until start+length.
+ * If start and start+length are not perfectly divisible, the macro will round
+ * down and up as needed. Start=0 and length=2G effectively iterates over
+ * every PDE in the system. The macro modifies ALL its parameters except 'pd',
+ * so each of the other parameters should preferably be a simple variable, or
+ * at most an lvalue with no side-effects!
  */
-#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
-       for (iter = gen6_pde_index(start); \
-            length > 0 && iter < I915_PDES ? \
-                       (pt = (pd)->page_table[iter]), 1 : 0; \
-            iter++, \
-            temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
-            temp = min_t(unsigned, temp, length), \
-            start += temp, length -= temp)
-
-#define gen6_for_all_pdes(pt, ppgtt, iter)  \
-       for (iter = 0;          \
-            pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
-            iter++)
+#define gen6_for_each_pde(pt, pd, start, length, iter)                 \
+       for (iter = gen6_pde_index(start);                              \
+            length > 0 && iter < I915_PDES &&                          \
+               (pt = (pd)->page_table[iter], true);                    \
+            ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT);         \
+                   temp = min(temp - start, length);                   \
+                   start += temp, length -= temp; }), ++iter)
+
+#define gen6_for_all_pdes(pt, pd, iter)                                        \
+       for (iter = 0;                                                  \
+            iter < I915_PDES &&                                        \
+               (pt = (pd)->page_table[iter], true);                    \
+            ++iter)
 
 static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
 {
index b7c1b5fb61eaf8680c067b4a042e869a00b45893..f75bbd67a13a2805768cb5ffb9735a3df49b8957 100644 (file)
@@ -58,7 +58,7 @@ static int render_state_init(struct render_state *so,
        if (so->rodata->batch_items * 4 > 4096)
                return -EINVAL;
 
-       so->obj = i915_gem_object_create(dev_priv->dev, 4096);
+       so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
        if (IS_ERR(so->obj))
                return PTR_ERR(so->obj);
 
index 538c3049984806a6b94389648e3d6669974116b0..067632ad2f293e9d8655507d3135c9557cfd6aa7 100644 (file)
@@ -257,7 +257,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
        struct drm_i915_private *dev_priv =
                container_of(shrinker, struct drm_i915_private, mm.shrinker);
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_i915_gem_object *obj;
        unsigned long count;
        bool unlock;
@@ -265,6 +265,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
        if (!i915_gem_shrinker_lock(dev, &unlock))
                return 0;
 
+       i915_gem_retire_requests(dev_priv);
+
        count = 0;
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
                if (can_release_pages(obj))
@@ -286,7 +288,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 {
        struct drm_i915_private *dev_priv =
                container_of(shrinker, struct drm_i915_private, mm.shrinker);
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        unsigned long freed;
        bool unlock;
 
@@ -321,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
 {
        unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
 
-       while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
+       while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
                schedule_timeout_killable(1);
                if (fatal_signal_pending(current))
                        return false;
@@ -342,7 +344,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
 {
        dev_priv->mm.interruptible = slu->was_interruptible;
        if (slu->unlock)
-               mutex_unlock(&dev_priv->dev->struct_mutex);
+               mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
 static int
@@ -408,7 +410,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
                return NOTIFY_DONE;
 
        /* Force everything onto the inactive lists */
-       ret = i915_gpu_idle(dev_priv->dev);
+       ret = i915_gem_wait_for_idle(dev_priv);
        if (ret)
                goto out;
 
index e9cd822904085bfb952cb6ddc0c42e5a1cdc2b1e..66be299a1486f4f656ba4e3970249a0e5e909aeb 100644 (file)
@@ -270,7 +270,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 
 void i915_gem_cleanup_stolen(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return;
@@ -550,7 +550,7 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
        if (obj->stolen) {
                i915_gem_stolen_remove_node(dev_priv, obj->stolen);
@@ -601,7 +601,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
 struct drm_i915_gem_object *
 i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        struct drm_mm_node *stolen;
        int ret;
index a6eb5c47a49c79c3458713791a84ea6415c5f110..8030199731dbb5856bdce6f38b9cd386550e98cf 100644 (file)
@@ -162,7 +162,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                   struct drm_file *file)
 {
        struct drm_i915_gem_set_tiling *args = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        int ret = 0;
 
@@ -294,7 +294,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
                   struct drm_file *file)
 {
        struct drm_i915_gem_get_tiling *args = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
 
        obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
index 34ff2459ceea3aab4147b75b0d3a36723444d99c..9d73d2216adc6925f09b97f76a71690daf213374 100644 (file)
@@ -332,7 +332,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                            const struct i915_error_state_file_priv *error_priv)
 {
        struct drm_device *dev = error_priv->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_error_state *error = error_priv->error;
        struct drm_i915_error_object *obj;
        int i, j, offset, elt;
@@ -463,6 +463,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                        }
                }
 
+               if (error->ring[i].num_waiters) {
+                       err_printf(m, "%s --- %d waiters\n",
+                                  dev_priv->engine[i].name,
+                                  error->ring[i].num_waiters);
+                       for (j = 0; j < error->ring[i].num_waiters; j++) {
+                               err_printf(m, " seqno 0x%08x for %s [%d]\n",
+                                          error->ring[i].waiters[j].seqno,
+                                          error->ring[i].waiters[j].comm,
+                                          error->ring[i].waiters[j].pid);
+                       }
+               }
+
                if ((obj = error->ring[i].ringbuffer)) {
                        err_printf(m, "%s --- ringbuffer = 0x%08x\n",
                                   dev_priv->engine[i].name,
@@ -488,7 +500,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                                           hws_page[elt+1],
                                           hws_page[elt+2],
                                           hws_page[elt+3]);
-                                       offset += 16;
+                               offset += 16;
                        }
                }
 
@@ -605,8 +617,9 @@ static void i915_error_state_free(struct kref *error_ref)
                i915_error_object_free(error->ring[i].ringbuffer);
                i915_error_object_free(error->ring[i].hws_page);
                i915_error_object_free(error->ring[i].ctx);
-               kfree(error->ring[i].requests);
                i915_error_object_free(error->ring[i].wa_ctx);
+               kfree(error->ring[i].requests);
+               kfree(error->ring[i].waiters);
        }
 
        i915_error_object_free(error->semaphore_obj);
@@ -892,6 +905,48 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
        }
 }
 
+static void engine_record_waiters(struct intel_engine_cs *engine,
+                                 struct drm_i915_error_ring *ering)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct drm_i915_error_waiter *waiter;
+       struct rb_node *rb;
+       int count;
+
+       ering->num_waiters = 0;
+       ering->waiters = NULL;
+
+       spin_lock(&b->lock);
+       count = 0;
+       for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
+               count++;
+       spin_unlock(&b->lock);
+
+       waiter = NULL;
+       if (count)
+               waiter = kmalloc_array(count,
+                                      sizeof(struct drm_i915_error_waiter),
+                                      GFP_ATOMIC);
+       if (!waiter)
+               return;
+
+       ering->waiters = waiter;
+
+       spin_lock(&b->lock);
+       for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+               struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+               strcpy(waiter->comm, w->tsk->comm);
+               waiter->pid = w->tsk->pid;
+               waiter->seqno = w->seqno;
+               waiter++;
+
+               if (++ering->num_waiters == count)
+                       break;
+       }
+       spin_unlock(&b->lock);
+}
+
 static void i915_record_ring_state(struct drm_i915_private *dev_priv,
                                   struct drm_i915_error_state *error,
                                   struct intel_engine_cs *engine,
@@ -926,10 +981,10 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
                ering->instdone = I915_READ(GEN2_INSTDONE);
        }
 
-       ering->waiting = waitqueue_active(&engine->irq_queue);
+       ering->waiting = intel_engine_has_waiter(engine);
        ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
        ering->acthd = intel_ring_get_active_head(engine);
-       ering->seqno = engine->get_seqno(engine);
+       ering->seqno = intel_engine_get_seqno(engine);
        ering->last_seqno = engine->last_submitted_seqno;
        ering->start = I915_READ_START(engine);
        ering->head = I915_READ_HEAD(engine);
@@ -1022,7 +1077,6 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 
        for (i = 0; i < I915_NUM_ENGINES; i++) {
                struct intel_engine_cs *engine = &dev_priv->engine[i];
-               struct intel_ringbuffer *rbuf;
 
                error->ring[i].pid = -1;
 
@@ -1032,14 +1086,15 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
                error->ring[i].valid = true;
 
                i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
+               engine_record_waiters(engine, &error->ring[i]);
 
                request = i915_gem_find_active_request(engine);
                if (request) {
                        struct i915_address_space *vm;
+                       struct intel_ringbuffer *rb;
 
-                       vm = request->ctx && request->ctx->ppgtt ?
-                               &request->ctx->ppgtt->base :
-                               &ggtt->base;
+                       vm = request->ctx->ppgtt ?
+                               &request->ctx->ppgtt->base : &ggtt->base;
 
                        /* We need to copy these to an anonymous buffer
                         * as the simplest method to avoid being overwritten
@@ -1066,26 +1121,17 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
                                }
                                rcu_read_unlock();
                        }
-               }
-
-               if (i915.enable_execlists) {
-                       /* TODO: This is only a small fix to keep basic error
-                        * capture working, but we need to add more information
-                        * for it to be useful (e.g. dump the context being
-                        * executed).
-                        */
-                       if (request)
-                               rbuf = request->ctx->engine[engine->id].ringbuf;
-                       else
-                               rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
-               } else
-                       rbuf = engine->buffer;
 
-               error->ring[i].cpu_ring_head = rbuf->head;
-               error->ring[i].cpu_ring_tail = rbuf->tail;
+                       error->simulated |=
+                               request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
 
-               error->ring[i].ringbuffer =
-                       i915_error_ggtt_object_create(dev_priv, rbuf->obj);
+                       rb = request->ringbuf;
+                       error->ring[i].cpu_ring_head = rb->head;
+                       error->ring[i].cpu_ring_tail = rb->tail;
+                       error->ring[i].ringbuffer =
+                               i915_error_ggtt_object_create(dev_priv,
+                                                             rb->obj);
+               }
 
                error->ring[i].hws_page =
                        i915_error_ggtt_object_create(dev_priv,
@@ -1230,7 +1276,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
 static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
                                   struct drm_i915_error_state *error)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        int i;
 
        /* General organization
@@ -1355,6 +1401,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
        struct drm_i915_error_state *error;
        unsigned long flags;
 
+       if (READ_ONCE(dev_priv->gpu_error.first_error))
+               return;
+
        /* Account for pipe specific data like PIPE*STAT */
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
        if (!error) {
@@ -1378,12 +1427,14 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
        i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
        DRM_INFO("%s\n", error->error_msg);
 
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-       if (dev_priv->gpu_error.first_error == NULL) {
-               dev_priv->gpu_error.first_error = error;
-               error = NULL;
+       if (!error->simulated) {
+               spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+               if (!dev_priv->gpu_error.first_error) {
+                       dev_priv->gpu_error.first_error = error;
+                       error = NULL;
+               }
+               spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
        }
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
 
        if (error) {
                i915_error_state_free(&error->ref);
@@ -1395,7 +1446,8 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
                DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
                DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
                DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
-               DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
+               DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+                        dev_priv->drm.primary->index);
                warned = true;
        }
 }
@@ -1403,7 +1455,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
 void i915_error_state_get(struct drm_device *dev,
                          struct i915_error_state_file_priv *error_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        spin_lock_irq(&dev_priv->gpu_error.lock);
        error_priv->error = dev_priv->gpu_error.first_error;
@@ -1421,7 +1473,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
 
 void i915_destroy_error_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_error_state *error;
 
        spin_lock_irq(&dev_priv->gpu_error.lock);
index 22a55ac4e51c32d27e9c17a07b4303c85e656bc6..2112e029db6a929ea7f55c4266ffbe24af860d83 100644 (file)
@@ -97,8 +97,14 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
 
        I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
 
-       /* No HOST2GUC command should take longer than 10ms */
-       ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10);
+       /*
+        * Fast commands should complete in less than 10us, so sample quickly
+        * up to that length of time, then switch to a slower sleep-wait loop.
+        * No HOST2GUC command should ever take longer than 10ms.
+        */
+       ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10);
+       if (ret)
+               ret = wait_for(host2guc_action_response(dev_priv, &status), 10);
        if (status != GUC2HOST_STATUS_SUCCESS) {
                /*
                 * Either the GuC explicitly returned an error (which
@@ -153,12 +159,11 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
                                     struct i915_guc_client *client)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct drm_device *dev = dev_priv->dev;
        u32 data[2];
 
        data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
        /* WaRsDisableCoarsePowerGating:skl,bxt */
-       if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev))
+       if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
                data[1] = 0;
        else
                /* bit 0 and 1 are for Render and Media domain separately */
@@ -582,7 +587,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
  */
 int i915_guc_submit(struct drm_i915_gem_request *rq)
 {
-       unsigned int engine_id = rq->engine->guc_id;
+       unsigned int engine_id = rq->engine->id;
        struct intel_guc *guc = &rq->i915->guc;
        struct i915_guc_client *client = guc->execbuf_client;
        int b_ret;
@@ -623,7 +628,7 @@ gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
 {
        struct drm_i915_gem_object *obj;
 
-       obj = i915_gem_object_create(dev_priv->dev, size);
+       obj = i915_gem_object_create(&dev_priv->drm, size);
        if (IS_ERR(obj))
                return NULL;
 
@@ -1034,7 +1039,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
  */
 int intel_guc_suspend(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_guc *guc = &dev_priv->guc;
        struct i915_gem_context *ctx;
        u32 data[3];
@@ -1060,7 +1065,7 @@ int intel_guc_suspend(struct drm_device *dev)
  */
 int intel_guc_resume(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_guc *guc = &dev_priv->guc;
        struct i915_gem_context *ctx;
        u32 data[3];
index 4378a659d962d96d31b0f5a503dbb6318c764855..1c2aec39241257fe63183a770053a135b68e2f61 100644 (file)
@@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
        dev_priv->gt_irq_mask &= ~interrupt_mask;
        dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-       POSTING_READ(GTIMR);
 }
 
 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
        ilk_update_gt_irq(dev_priv, mask, mask);
+       POSTING_READ_FW(GTIMR);
 }
 
 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
@@ -351,9 +351,8 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
        spin_lock_irq(&dev_priv->irq_lock);
-
-       WARN_ON(dev_priv->rps.pm_iir);
-       WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+       WARN_ON_ONCE(dev_priv->rps.pm_iir);
+       WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
        dev_priv->rps.interrupts_enabled = true;
        I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
                                dev_priv->pm_rps_events);
@@ -371,11 +370,6 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
        spin_lock_irq(&dev_priv->irq_lock);
        dev_priv->rps.interrupts_enabled = false;
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       cancel_work_sync(&dev_priv->rps.work);
-
-       spin_lock_irq(&dev_priv->irq_lock);
 
        I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
 
@@ -384,8 +378,15 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
                                ~dev_priv->pm_rps_events);
 
        spin_unlock_irq(&dev_priv->irq_lock);
+       synchronize_irq(dev_priv->drm.irq);
 
-       synchronize_irq(dev_priv->dev->irq);
+       /* Now that we will not be generating any more work, flush any
+        * outsanding tasks. As we are called on the RPS idle path,
+        * we will reset the GPU to minimum frequencies, so the current
+        * state of the worker can be discarded.
+        */
+       cancel_work_sync(&dev_priv->rps.work);
+       gen6_reset_rps_interrupts(dev_priv);
 }
 
 /**
@@ -565,7 +566,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
        u32 enable_mask;
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+               enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
                                                           status_mask);
        else
                enable_mask = status_mask << 16;
@@ -579,7 +580,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
        u32 enable_mask;
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+               enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
                                                           status_mask);
        else
                enable_mask = status_mask << 16;
@@ -666,7 +667,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  */
 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t high_frame, low_frame;
        u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
        struct intel_crtc *intel_crtc =
@@ -713,7 +714,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 
 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
 }
@@ -722,7 +723,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const struct drm_display_mode *mode = &crtc->base.hwmode;
        enum pipe pipe = crtc->pipe;
        int position, vtotal;
@@ -774,7 +775,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
                                    ktime_t *stime, ktime_t *etime,
                                    const struct drm_display_mode *mode)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int position;
@@ -895,7 +896,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 
 int intel_get_crtc_scanline(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        unsigned long irqflags;
        int position;
 
@@ -976,13 +977,11 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
 
 static void notify_ring(struct intel_engine_cs *engine)
 {
-       if (!intel_engine_initialized(engine))
-               return;
-
-       trace_i915_gem_request_notify(engine);
-       engine->user_interrupts++;
-
-       wake_up_all(&engine->irq_queue);
+       smp_store_mb(engine->breadcrumbs.irq_posted, true);
+       if (intel_engine_wakeup(engine)) {
+               trace_i915_gem_request_notify(engine);
+               engine->breadcrumbs.irq_wakeups++;
+       }
 }
 
 static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1063,7 +1062,7 @@ static bool any_waiters(struct drm_i915_private *dev_priv)
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv)
-               if (engine->irq_refcount)
+               if (intel_engine_has_waiter(engine))
                        return true;
 
        return false;
@@ -1084,13 +1083,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
                return;
        }
 
-       /*
-        * The RPS work is synced during runtime suspend, we don't require a
-        * wakeref. TODO: instead of disabling the asserts make sure that we
-        * always hold an RPM reference while the work is running.
-        */
-       DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
-
        pm_iir = dev_priv->rps.pm_iir;
        dev_priv->rps.pm_iir = 0;
        /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
@@ -1103,7 +1095,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
        WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
 
        if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
-               goto out;
+               return;
 
        mutex_lock(&dev_priv->rps.hw_lock);
 
@@ -1158,8 +1150,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
        intel_set_rps(dev_priv, new_delay);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
-out:
-       ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
 }
 
 
@@ -1185,7 +1175,7 @@ static void ivybridge_parity_work(struct work_struct *work)
         * In order to prevent a get/put style interface, acquire struct mutex
         * any time we access those registers.
         */
-       mutex_lock(&dev_priv->dev->struct_mutex);
+       mutex_lock(&dev_priv->drm.struct_mutex);
 
        /* If we've screwed up tracking, just let the interrupt fire again */
        if (WARN_ON(!dev_priv->l3_parity.which_slice))
@@ -1221,7 +1211,7 @@ static void ivybridge_parity_work(struct work_struct *work)
                parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
                parity_event[5] = NULL;
 
-               kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
+               kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
                                   KOBJ_CHANGE, parity_event);
 
                DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
@@ -1241,7 +1231,7 @@ static void ivybridge_parity_work(struct work_struct *work)
        gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       mutex_unlock(&dev_priv->dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -1267,8 +1257,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv
 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
                               u32 gt_iir)
 {
-       if (gt_iir &
-           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+       if (gt_iir & GT_RENDER_USER_INTERRUPT)
                notify_ring(&dev_priv->engine[RCS]);
        if (gt_iir & ILK_BSD_USER_INTERRUPT)
                notify_ring(&dev_priv->engine[VCS]);
@@ -1277,9 +1266,7 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
                               u32 gt_iir)
 {
-
-       if (gt_iir &
-           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+       if (gt_iir & GT_RENDER_USER_INTERRUPT)
                notify_ring(&dev_priv->engine[RCS]);
        if (gt_iir & GT_BSD_USER_INTERRUPT)
                notify_ring(&dev_priv->engine[VCS]);
@@ -1526,7 +1513,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 
        entry = &pipe_crc->entries[head];
 
-       entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev,
+       entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
                                                                 pipe);
        entry->crc[0] = crc0;
        entry->crc[1] = crc1;
@@ -1602,7 +1589,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
                gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
                if (dev_priv->rps.interrupts_enabled) {
                        dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
-                       queue_work(dev_priv->wq, &dev_priv->rps.work);
+                       schedule_work(&dev_priv->rps.work);
                }
                spin_unlock(&dev_priv->irq_lock);
        }
@@ -1624,7 +1611,7 @@ static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
 {
        bool ret;
 
-       ret = drm_handle_vblank(dev_priv->dev, pipe);
+       ret = drm_handle_vblank(&dev_priv->drm, pipe);
        if (ret)
                intel_finish_page_flip_mmio(dev_priv, pipe);
 
@@ -1757,7 +1744,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        irqreturn_t ret = IRQ_NONE;
 
        if (!intel_irqs_enabled(dev_priv))
@@ -1840,7 +1827,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        irqreturn_t ret = IRQ_NONE;
 
        if (!intel_irqs_enabled(dev_priv))
@@ -2225,7 +2212,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 de_iir, gt_iir, de_ier, sde_ier = 0;
        irqreturn_t ret = IRQ_NONE;
 
@@ -2438,7 +2425,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                        I915_WRITE(SDEIIR, iir);
                        ret = IRQ_HANDLED;
 
-                       if (HAS_PCH_SPT(dev_priv))
+                       if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
                                spt_irq_handler(dev_priv, iir);
                        else
                                cpt_irq_handler(dev_priv, iir);
@@ -2457,7 +2444,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 static irqreturn_t gen8_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 master_ctl;
        u32 gt_iir[4] = {};
        irqreturn_t ret;
@@ -2488,11 +2475,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        return ret;
 }
 
-static void i915_error_wake_up(struct drm_i915_private *dev_priv,
-                              bool reset_completed)
+static void i915_error_wake_up(struct drm_i915_private *dev_priv)
 {
-       struct intel_engine_cs *engine;
-
        /*
         * Notify all waiters for GPU completion events that reset state has
         * been changed, and that they need to restart their wait after
@@ -2501,18 +2485,10 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
         */
 
        /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
-       for_each_engine(engine, dev_priv)
-               wake_up_all(&engine->irq_queue);
+       wake_up_all(&dev_priv->gpu_error.wait_queue);
 
        /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
        wake_up_all(&dev_priv->pending_flip_queue);
-
-       /*
-        * Signal tasks blocked in i915_gem_wait_for_error that the pending
-        * reset state is cleared.
-        */
-       if (reset_completed)
-               wake_up_all(&dev_priv->gpu_error.reset_queue);
 }
 
 /**
@@ -2524,7 +2500,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
  */
 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
 {
-       struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
+       struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
        char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
        char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
        char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@@ -2577,7 +2553,7 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
                 * Note: The wake_up also serves as a memory barrier so that
                 * waiters see the update value of the reset counter atomic_t.
                 */
-               i915_error_wake_up(dev_priv, true);
+               wake_up_all(&dev_priv->gpu_error.reset_queue);
        }
 }
 
@@ -2714,7 +2690,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
                 * ensure that the waiters see the updated value of the reset
                 * counter atomic_t.
                 */
-               i915_error_wake_up(dev_priv, false);
+               i915_error_wake_up(dev_priv);
        }
 
        i915_reset_and_wakeup(dev_priv);
@@ -2725,7 +2701,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
  */
 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2742,7 +2718,7 @@ static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
 
 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
        uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
                                                     DE_PIPE_VBLANK(pipe);
@@ -2756,7 +2732,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
 
 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2769,7 +2745,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
 
 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2784,7 +2760,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
  */
 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2796,7 +2772,7 @@ static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
 
 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
        uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
                                                     DE_PIPE_VBLANK(pipe);
@@ -2808,7 +2784,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
 
 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2819,7 +2795,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
 
 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2835,9 +2811,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
 }
 
 static bool
-ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
+ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
 {
-       if (INTEL_GEN(dev_priv) >= 8) {
+       if (INTEL_GEN(engine->i915) >= 8) {
                return (ipehr >> 23) == 0x1c;
        } else {
                ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2908,7 +2884,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
                return NULL;
 
        ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
-       if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
+       if (!ipehr_is_semaphore_wait(engine, ipehr))
                return NULL;
 
        /*
@@ -2966,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
        if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
                return -1;
 
-       if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
+       if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
                return 1;
 
        /* cursory check for an unkickable deadlock */
@@ -3078,23 +3054,21 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
        return HANGCHECK_HUNG;
 }
 
-static unsigned kick_waiters(struct intel_engine_cs *engine)
+static unsigned long kick_waiters(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *i915 = engine->i915;
-       unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
+       unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups);
 
-       if (engine->hangcheck.user_interrupts == user_interrupts &&
+       if (engine->hangcheck.user_interrupts == irq_count &&
            !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
-               if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
+               if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
                        DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
                                  engine->name);
-               else
-                       DRM_INFO("Fake missed irq on %s\n",
-                                engine->name);
-               wake_up_all(&engine->irq_queue);
+
+               intel_engine_enable_fake_irq(engine);
        }
 
-       return user_interrupts;
+       return irq_count;
 }
 /*
  * This is called when the chip hasn't reported back with completed
@@ -3110,9 +3084,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                container_of(work, typeof(*dev_priv),
                             gpu_error.hangcheck_work.work);
        struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       int busy_count = 0, rings_hung = 0;
-       bool stuck[I915_NUM_ENGINES] = { 0 };
+       unsigned int hung = 0, stuck = 0;
+       int busy_count = 0;
 #define BUSY 1
 #define KICK 5
 #define HUNG 20
@@ -3121,12 +3094,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
        if (!i915.enable_hangcheck)
                return;
 
-       /*
-        * The hangcheck work is synced during runtime suspend, we don't
-        * require a wakeref. TODO: instead of disabling the asserts make
-        * sure that we hold a reference when this work is running.
-        */
-       DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
+       if (!READ_ONCE(dev_priv->gt.awake))
+               return;
 
        /* As enabling the GPU requires fairly extensive mmio access,
         * periodically arm the mmio checker to see if we are triggering
@@ -3134,11 +3103,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
         */
        intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
 
-       for_each_engine_id(engine, dev_priv, id) {
+       for_each_engine(engine, dev_priv) {
+               bool busy = intel_engine_has_waiter(engine);
                u64 acthd;
                u32 seqno;
                unsigned user_interrupts;
-               bool busy = true;
 
                semaphore_clear_deadlocks(dev_priv);
 
@@ -3153,7 +3122,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                        engine->irq_seqno_barrier(engine);
 
                acthd = intel_ring_get_active_head(engine);
-               seqno = engine->get_seqno(engine);
+               seqno = intel_engine_get_seqno(engine);
 
                /* Reset stuck interrupts between batch advances */
                user_interrupts = 0;
@@ -3161,12 +3130,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                if (engine->hangcheck.seqno == seqno) {
                        if (ring_idle(engine, seqno)) {
                                engine->hangcheck.action = HANGCHECK_IDLE;
-                               if (waitqueue_active(&engine->irq_queue)) {
+                               if (busy) {
                                        /* Safeguard against driver failure */
                                        user_interrupts = kick_waiters(engine);
                                        engine->hangcheck.score += BUSY;
-                               } else
-                                       busy = false;
+                               }
                        } else {
                                /* We always increment the hangcheck score
                                 * if the ring is busy and still processing
@@ -3198,10 +3166,15 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                                        break;
                                case HANGCHECK_HUNG:
                                        engine->hangcheck.score += HUNG;
-                                       stuck[id] = true;
                                        break;
                                }
                        }
+
+                       if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+                               hung |= intel_engine_flag(engine);
+                               if (engine->hangcheck.action != HANGCHECK_HUNG)
+                                       stuck |= intel_engine_flag(engine);
+                       }
                } else {
                        engine->hangcheck.action = HANGCHECK_ACTIVE;
 
@@ -3226,48 +3199,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                busy_count += busy;
        }
 
-       for_each_engine_id(engine, dev_priv, id) {
-               if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
-                       DRM_INFO("%s on %s\n",
-                                stuck[id] ? "stuck" : "no progress",
-                                engine->name);
-                       rings_hung |= intel_engine_flag(engine);
-               }
-       }
+       if (hung) {
+               char msg[80];
+               int len;
 
-       if (rings_hung) {
-               i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
-               goto out;
+               /* If some rings hung but others were still busy, only
+                * blame the hanging rings in the synopsis.
+                */
+               if (stuck != hung)
+                       hung &= ~stuck;
+               len = scnprintf(msg, sizeof(msg),
+                               "%s on ", stuck == hung ? "No progress" : "Hang");
+               for_each_engine_masked(engine, dev_priv, hung)
+                       len += scnprintf(msg + len, sizeof(msg) - len,
+                                        "%s, ", engine->name);
+               msg[len-2] = '\0';
+
+               return i915_handle_error(dev_priv, hung, msg);
        }
 
+       /* Reset timer in case GPU hangs without another request being added */
        if (busy_count)
-               /* Reset timer case chip hangs without another request
-                * being added */
                i915_queue_hangcheck(dev_priv);
-
-out:
-       ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
-}
-
-void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
-{
-       struct i915_gpu_error *e = &dev_priv->gpu_error;
-
-       if (!i915.enable_hangcheck)
-               return;
-
-       /* Don't continually defer the hangcheck so that it is always run at
-        * least once after work has been scheduled on any ring. Otherwise,
-        * we will ignore a hung ring if a second ring is kept busy.
-        */
-
-       queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
-                          round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
 }
 
 static void ibx_irq_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (HAS_PCH_NOP(dev))
                return;
@@ -3288,7 +3246,7 @@ static void ibx_irq_reset(struct drm_device *dev)
  */
 static void ibx_irq_pre_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (HAS_PCH_NOP(dev))
                return;
@@ -3300,7 +3258,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
 
 static void gen5_gt_irq_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        GEN5_IRQ_RESET(GT);
        if (INTEL_INFO(dev)->gen >= 6)
@@ -3360,7 +3318,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 */
 static void ironlake_irq_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(HWSTAM, 0xffffffff);
 
@@ -3375,7 +3333,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
 
 static void valleyview_irq_preinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(VLV_MASTER_IER, 0);
        POSTING_READ(VLV_MASTER_IER);
@@ -3398,7 +3356,7 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
 
 static void gen8_irq_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        I915_WRITE(GEN8_MASTER_IRQ, 0);
@@ -3444,12 +3402,12 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
        spin_unlock_irq(&dev_priv->irq_lock);
 
        /* make sure we're done processing display irqs */
-       synchronize_irq(dev_priv->dev->irq);
+       synchronize_irq(dev_priv->drm.irq);
 }
 
 static void cherryview_irq_preinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(GEN8_MASTER_IRQ, 0);
        POSTING_READ(GEN8_MASTER_IRQ);
@@ -3470,7 +3428,7 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
        struct intel_encoder *encoder;
        u32 enabled_irqs = 0;
 
-       for_each_intel_encoder(dev_priv->dev, encoder)
+       for_each_intel_encoder(&dev_priv->drm, encoder)
                if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
                        enabled_irqs |= hpd[encoder->hpd_pin];
 
@@ -3601,7 +3559,7 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
 
 static void ibx_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 mask;
 
        if (HAS_PCH_NOP(dev))
@@ -3618,7 +3576,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
 
 static void gen5_gt_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pm_irqs, gt_irqs;
 
        pm_irqs = gt_irqs = 0;
@@ -3632,8 +3590,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
 
        gt_irqs |= GT_RENDER_USER_INTERRUPT;
        if (IS_GEN5(dev)) {
-               gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
-                          ILK_BSD_USER_INTERRUPT;
+               gt_irqs |= ILK_BSD_USER_INTERRUPT;
        } else {
                gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
        }
@@ -3655,7 +3612,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
 
 static int ironlake_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 display_mask, extra_mask;
 
        if (INTEL_INFO(dev)->gen >= 7) {
@@ -3734,7 +3691,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
 
 static int valleyview_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        gen5_gt_irq_postinstall(dev);
 
@@ -3827,7 +3784,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 
 static int gen8_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (HAS_PCH_SPLIT(dev))
                ibx_irq_pre_postinstall(dev);
@@ -3846,7 +3803,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
 
 static int cherryview_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        gen8_gt_irq_postinstall(dev_priv);
 
@@ -3863,7 +3820,7 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
 
 static void gen8_irq_uninstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!dev_priv)
                return;
@@ -3873,7 +3830,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
 
 static void valleyview_irq_uninstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!dev_priv)
                return;
@@ -3893,7 +3850,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
 
 static void cherryview_irq_uninstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!dev_priv)
                return;
@@ -3913,7 +3870,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
 
 static void ironlake_irq_uninstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!dev_priv)
                return;
@@ -3923,7 +3880,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
 
 static void i8xx_irq_preinstall(struct drm_device * dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        for_each_pipe(dev_priv, pipe)
@@ -3935,7 +3892,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
 
 static int i8xx_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE16(EMR,
                     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -3998,7 +3955,7 @@ static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u16 iir, new_iir;
        u32 pipe_stats[2];
        int pipe;
@@ -4075,7 +4032,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 
 static void i8xx_irq_uninstall(struct drm_device * dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        for_each_pipe(dev_priv, pipe) {
@@ -4090,7 +4047,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
 
 static void i915_irq_preinstall(struct drm_device * dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        if (I915_HAS_HOTPLUG(dev)) {
@@ -4108,7 +4065,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
 
 static int i915_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 enable_mask;
 
        I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4187,7 +4144,7 @@ static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
 static irqreturn_t i915_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
        u32 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4292,7 +4249,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 
 static void i915_irq_uninstall(struct drm_device * dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        if (I915_HAS_HOTPLUG(dev)) {
@@ -4314,7 +4271,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
 
 static void i965_irq_preinstall(struct drm_device * dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4330,7 +4287,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
 
 static int i965_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 enable_mask;
        u32 error_mask;
 
@@ -4414,7 +4371,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
 static irqreturn_t i965_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 iir, new_iir;
        u32 pipe_stats[I915_MAX_PIPES];
        int ret = IRQ_NONE, pipe;
@@ -4523,7 +4480,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 
 static void i965_irq_uninstall(struct drm_device * dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        if (!dev_priv)
@@ -4553,7 +4510,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
  */
 void intel_irq_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        intel_hpd_init_work(dev_priv);
 
@@ -4631,7 +4588,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev->driver->disable_vblank = gen8_disable_vblank;
                if (IS_BROXTON(dev))
                        dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
-               else if (HAS_PCH_SPT(dev))
+               else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
                        dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
                else
                        dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
@@ -4687,7 +4644,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
         */
        dev_priv->pm.irqs_enabled = true;
 
-       return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
+       return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
 }
 
 /**
@@ -4699,7 +4656,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
  */
 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
 {
-       drm_irq_uninstall(dev_priv->dev);
+       drm_irq_uninstall(&dev_priv->drm);
        intel_hpd_cancel_work(dev_priv);
        dev_priv->pm.irqs_enabled = false;
 }
@@ -4713,9 +4670,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
 {
-       dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
+       dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
        dev_priv->pm.irqs_enabled = false;
-       synchronize_irq(dev_priv->dev->irq);
+       synchronize_irq(dev_priv->drm.irq);
 }
 
 /**
@@ -4728,6 +4685,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
 {
        dev_priv->pm.irqs_enabled = true;
-       dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
-       dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
+       dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
+       dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
 }
index 7effe68d552c9798837e6c55f35a6faf013cc651..8b13bfa47fbaaf2be040bfcd6a420ef8aaca0e1b 100644 (file)
@@ -224,6 +224,6 @@ module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600
 MODULE_PARM_DESC(enable_dpcd_backlight,
        "Enable support for DPCD backlight control (default:false)");
 
-module_param_named(enable_gvt, i915.enable_gvt, bool, 0600);
+module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
 MODULE_PARM_DESC(enable_gvt,
        "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
new file mode 100644 (file)
index 0000000..949c016
--- /dev/null
@@ -0,0 +1,503 @@
+/*
+ * Copyright Â© 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+
+#include "i915_drv.h"
+
+#define GEN_DEFAULT_PIPEOFFSETS \
+       .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+                         PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
+       .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+                          TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
+       .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+
+#define GEN_CHV_PIPEOFFSETS \
+       .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+                         CHV_PIPE_C_OFFSET }, \
+       .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+                          CHV_TRANSCODER_C_OFFSET, }, \
+       .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
+                            CHV_PALETTE_C_OFFSET }
+
+#define CURSOR_OFFSETS \
+       .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
+
+#define IVB_CURSOR_OFFSETS \
+       .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
+
+#define BDW_COLORS \
+       .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
+#define CHV_COLORS \
+       .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+
+static const struct intel_device_info intel_i830_info = {
+       .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_845g_info = {
+       .gen = 2, .num_pipes = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i85x_info = {
+       .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+       .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .has_fbc = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i865g_info = {
+       .gen = 2, .num_pipes = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i915g_info = {
+       .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i915gm_info = {
+       .gen = 3, .is_mobile = 1, .num_pipes = 2,
+       .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .supports_tv = 1,
+       .has_fbc = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i945g_info = {
+       .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i945gm_info = {
+       .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+       .has_hotplug = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .supports_tv = 1,
+       .has_fbc = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i965g_info = {
+       .gen = 4, .is_broadwater = 1, .num_pipes = 2,
+       .has_hotplug = 1,
+       .has_overlay = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i965gm_info = {
+       .gen = 4, .is_crestline = 1, .num_pipes = 2,
+       .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+       .has_overlay = 1,
+       .supports_tv = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_g33_info = {
+       .gen = 3, .is_g33 = 1, .num_pipes = 2,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_overlay = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_g45_info = {
+       .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
+       .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_gm45_info = {
+       .gen = 4, .is_g4x = 1, .num_pipes = 2,
+       .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
+       .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .supports_tv = 1,
+       .ring_mask = RENDER_RING | BSD_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_pineview_info = {
+       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_overlay = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_ironlake_d_info = {
+       .gen = 5, .num_pipes = 2,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_ironlake_m_info = {
+       .gen = 5, .is_mobile = 1, .num_pipes = 2,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_fbc = 1,
+       .ring_mask = RENDER_RING | BSD_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_sandybridge_d_info = {
+       .gen = 6, .num_pipes = 2,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_fbc = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+       .has_llc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_sandybridge_m_info = {
+       .gen = 6, .is_mobile = 1, .num_pipes = 2,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_fbc = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+       .has_llc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+#define GEN7_FEATURES  \
+       .gen = 7, .num_pipes = 3, \
+       .need_gfx_hws = 1, .has_hotplug = 1, \
+       .has_fbc = 1, \
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+       .has_llc = 1, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       IVB_CURSOR_OFFSETS
+
+static const struct intel_device_info intel_ivybridge_d_info = {
+       GEN7_FEATURES,
+       .is_ivybridge = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_m_info = {
+       GEN7_FEATURES,
+       .is_ivybridge = 1,
+       .is_mobile = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_q_info = {
+       GEN7_FEATURES,
+       .is_ivybridge = 1,
+       .num_pipes = 0, /* legal, last one wins */
+};
+
+#define VLV_FEATURES  \
+       .gen = 7, .num_pipes = 2, \
+       .need_gfx_hws = 1, .has_hotplug = 1, \
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+       .display_mmio_offset = VLV_DISPLAY_BASE, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       CURSOR_OFFSETS
+
+static const struct intel_device_info intel_valleyview_m_info = {
+       VLV_FEATURES,
+       .is_valleyview = 1,
+       .is_mobile = 1,
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+       VLV_FEATURES,
+       .is_valleyview = 1,
+};
+
+#define HSW_FEATURES  \
+       GEN7_FEATURES, \
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+       .has_ddi = 1, \
+       .has_fpga_dbg = 1
+
+static const struct intel_device_info intel_haswell_d_info = {
+       HSW_FEATURES,
+       .is_haswell = 1,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+       HSW_FEATURES,
+       .is_haswell = 1,
+       .is_mobile = 1,
+};
+
+#define BDW_FEATURES \
+       HSW_FEATURES, \
+       BDW_COLORS
+
+static const struct intel_device_info intel_broadwell_d_info = {
+       BDW_FEATURES,
+       .gen = 8,
+       .is_broadwell = 1,
+};
+
+static const struct intel_device_info intel_broadwell_m_info = {
+       BDW_FEATURES,
+       .gen = 8, .is_mobile = 1,
+       .is_broadwell = 1,
+};
+
+static const struct intel_device_info intel_broadwell_gt3d_info = {
+       BDW_FEATURES,
+       .gen = 8,
+       .is_broadwell = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+static const struct intel_device_info intel_broadwell_gt3m_info = {
+       BDW_FEATURES,
+       .gen = 8, .is_mobile = 1,
+       .is_broadwell = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+static const struct intel_device_info intel_cherryview_info = {
+       .gen = 8, .num_pipes = 3,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+       .is_cherryview = 1,
+       .display_mmio_offset = VLV_DISPLAY_BASE,
+       GEN_CHV_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+       CHV_COLORS,
+};
+
+static const struct intel_device_info intel_skylake_info = {
+       BDW_FEATURES,
+       .is_skylake = 1,
+       .gen = 9,
+};
+
+static const struct intel_device_info intel_skylake_gt3_info = {
+       BDW_FEATURES,
+       .is_skylake = 1,
+       .gen = 9,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+static const struct intel_device_info intel_broxton_info = {
+       .is_broxton = 1,
+       .gen = 9,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+       .num_pipes = 3,
+       .has_ddi = 1,
+       .has_fpga_dbg = 1,
+       .has_fbc = 1,
+       .has_pooled_eu = 0,
+       GEN_DEFAULT_PIPEOFFSETS,
+       IVB_CURSOR_OFFSETS,
+       BDW_COLORS,
+};
+
+static const struct intel_device_info intel_kabylake_info = {
+       BDW_FEATURES,
+       .is_kabylake = 1,
+       .gen = 9,
+};
+
+static const struct intel_device_info intel_kabylake_gt3_info = {
+       BDW_FEATURES,
+       .is_kabylake = 1,
+       .gen = 9,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+/*
+ * Make sure any device matches here are from most specific to most
+ * general.  For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+static const struct pci_device_id pciidlist[] = {
+       INTEL_I830_IDS(&intel_i830_info),
+       INTEL_I845G_IDS(&intel_845g_info),
+       INTEL_I85X_IDS(&intel_i85x_info),
+       INTEL_I865G_IDS(&intel_i865g_info),
+       INTEL_I915G_IDS(&intel_i915g_info),
+       INTEL_I915GM_IDS(&intel_i915gm_info),
+       INTEL_I945G_IDS(&intel_i945g_info),
+       INTEL_I945GM_IDS(&intel_i945gm_info),
+       INTEL_I965G_IDS(&intel_i965g_info),
+       INTEL_G33_IDS(&intel_g33_info),
+       INTEL_I965GM_IDS(&intel_i965gm_info),
+       INTEL_GM45_IDS(&intel_gm45_info),
+       INTEL_G45_IDS(&intel_g45_info),
+       INTEL_PINEVIEW_IDS(&intel_pineview_info),
+       INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
+       INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
+       INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
+       INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
+       INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
+       INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
+       INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
+       INTEL_HSW_D_IDS(&intel_haswell_d_info),
+       INTEL_HSW_M_IDS(&intel_haswell_m_info),
+       INTEL_VLV_M_IDS(&intel_valleyview_m_info),
+       INTEL_VLV_D_IDS(&intel_valleyview_d_info),
+       INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
+       INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
+       INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
+       INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
+       INTEL_CHV_IDS(&intel_cherryview_info),
+       INTEL_SKL_GT1_IDS(&intel_skylake_info),
+       INTEL_SKL_GT2_IDS(&intel_skylake_info),
+       INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
+       INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
+       INTEL_BXT_IDS(&intel_broxton_info),
+       INTEL_KBL_GT1_IDS(&intel_kabylake_info),
+       INTEL_KBL_GT2_IDS(&intel_kabylake_info),
+       INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
+       INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
+       {0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+extern int i915_driver_load(struct pci_dev *pdev,
+                           const struct pci_device_id *ent);
+
+static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct intel_device_info *intel_info =
+               (struct intel_device_info *) ent->driver_data;
+
+       if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
+               DRM_INFO("This hardware requires preliminary hardware support.\n"
+                        "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+               return -ENODEV;
+       }
+
+       /* Only bind to function 0 of the device. Early generations
+        * used function 1 as a placeholder for multi-head. This causes
+        * us confusion instead, especially on the systems where both
+        * functions have the same PCI-ID!
+        */
+       if (PCI_FUNC(pdev->devfn))
+               return -ENODEV;
+
+       /*
+        * apple-gmux is needed on dual GPU MacBook Pro
+        * to probe the panel if we're the inactive GPU.
+        */
+       if (vga_switcheroo_client_probe_defer(pdev))
+               return -EPROBE_DEFER;
+
+       return i915_driver_load(pdev, ent);
+}
+
+extern void i915_driver_unload(struct drm_device *dev);
+
+static void i915_pci_remove(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       i915_driver_unload(dev);
+       drm_dev_unref(dev);
+}
+
+extern const struct dev_pm_ops i915_pm_ops;
+
+static struct pci_driver i915_pci_driver = {
+       .name = DRIVER_NAME,
+       .id_table = pciidlist,
+       .probe = i915_pci_probe,
+       .remove = i915_pci_remove,
+       .driver.pm = &i915_pm_ops,
+};
+
+static int __init i915_init(void)
+{
+       bool use_kms = true;
+
+       /*
+        * Enable KMS by default, unless explicitly overriden by
+        * either the i915.modeset prarameter or by the
+        * vga_text_mode_force boot option.
+        */
+
+       if (i915.modeset == 0)
+               use_kms = false;
+
+       if (vgacon_text_force() && i915.modeset == -1)
+               use_kms = false;
+
+       if (!use_kms) {
+               /* Silently fail loading to not upset userspace. */
+               DRM_DEBUG_DRIVER("KMS disabled.\n");
+               return 0;
+       }
+
+       return pci_register_driver(&i915_pci_driver);
+}
+
+static void __exit i915_exit(void)
+{
+       if (!i915_pci_driver.driver.owner)
+               return;
+
+       pci_unregister_driver(&i915_pci_driver);
+}
+
+module_init(i915_init);
+module_exit(i915_exit);
+
+MODULE_AUTHOR("Tungsten Graphics, Inc.");
+MODULE_AUTHOR("Intel Corporation");
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
index c6bfbf8d7cca9e60636fcf6ae491f7f58687cc49..8bfde75789f60dada455a59ee69a3727d2c91e45 100644 (file)
@@ -7070,7 +7070,8 @@ enum {
 #define GEN6_RPDEUC                            _MMIO(0xA084)
 #define GEN6_RPDEUCSW                          _MMIO(0xA088)
 #define GEN6_RC_STATE                          _MMIO(0xA094)
-#define   RC6_STATE                            (1 << 18)
+#define   RC_SW_TARGET_STATE_SHIFT             16
+#define   RC_SW_TARGET_STATE_MASK              (7 << RC_SW_TARGET_STATE_SHIFT)
 #define GEN6_RC1_WAKE_RATE_LIMIT               _MMIO(0xA098)
 #define GEN6_RC6_WAKE_RATE_LIMIT               _MMIO(0xA09C)
 #define GEN6_RC6pp_WAKE_RATE_LIMIT             _MMIO(0xA0A0)
@@ -7085,12 +7086,16 @@ enum {
 #define GEN6_RC6pp_THRESHOLD                   _MMIO(0xA0C0)
 #define GEN6_PMINTRMSK                         _MMIO(0xA168)
 #define   GEN8_PMINTR_REDIRECT_TO_NON_DISP     (1<<31)
+#define GEN8_MISC_CTRL0                                _MMIO(0xA180)
 #define VLV_PWRDWNUPCTL                                _MMIO(0xA294)
 #define GEN9_MEDIA_PG_IDLE_HYSTERESIS          _MMIO(0xA0C4)
 #define GEN9_RENDER_PG_IDLE_HYSTERESIS         _MMIO(0xA0C8)
 #define GEN9_PG_ENABLE                         _MMIO(0xA210)
 #define GEN9_RENDER_PG_ENABLE                  (1<<0)
 #define GEN9_MEDIA_PG_ENABLE                   (1<<1)
+#define GEN8_PUSHBUS_CONTROL                   _MMIO(0xA248)
+#define GEN8_PUSHBUS_ENABLE                    _MMIO(0xA250)
+#define GEN8_PUSHBUS_SHIFT                     _MMIO(0xA25C)
 
 #define VLV_CHICKEN_3                          _MMIO(VLV_DISPLAY_BASE + 0x7040C)
 #define  PIXEL_OVERLAP_CNT_MASK                        (3 << 30)
index 34e061a9ef0673fde527c9ecbd14aabf532bb926..5cfe4c7716b4ec8a73a94ab66bdd27e9ae0abe76 100644 (file)
@@ -31,7 +31,7 @@
 
 static void i915_save_display(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Display arbitration control */
        if (INTEL_INFO(dev)->gen <= 4)
@@ -63,7 +63,7 @@ static void i915_save_display(struct drm_device *dev)
 
 static void i915_restore_display(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 mask = 0xffffffff;
 
        /* Display arbitration */
@@ -103,7 +103,7 @@ static void i915_restore_display(struct drm_device *dev)
 
 int i915_save_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        mutex_lock(&dev->struct_mutex);
@@ -148,7 +148,7 @@ int i915_save_state(struct drm_device *dev)
 
 int i915_restore_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        mutex_lock(&dev->struct_mutex);
index 02507bfc8def3159608ac59a4213d0157fbae005..d61829e54f93d74b4b1b7aab81c9c704c472d38f 100644 (file)
@@ -38,7 +38,7 @@
 static u32 calc_residency(struct drm_device *dev,
                          i915_reg_t reg)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u64 raw_time; /* 32b value may overflow during fixed point math */
        u64 units = 128ULL, div = 100000ULL;
        u32 ret;
@@ -166,7 +166,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
        struct device *dev = kobj_to_dev(kobj);
        struct drm_minor *dminor = dev_to_drm_minor(dev);
        struct drm_device *drm_dev = dminor->dev;
-       struct drm_i915_private *dev_priv = drm_dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(drm_dev);
        int slice = (int)(uintptr_t)attr->private;
        int ret;
 
@@ -202,7 +202,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
        struct device *dev = kobj_to_dev(kobj);
        struct drm_minor *dminor = dev_to_drm_minor(dev);
        struct drm_device *drm_dev = dminor->dev;
-       struct drm_i915_private *dev_priv = drm_dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(drm_dev);
        struct i915_gem_context *ctx;
        u32 *temp = NULL; /* Just here to make handling failures easy */
        int slice = (int)(uintptr_t)attr->private;
@@ -227,13 +227,6 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
                }
        }
 
-       ret = i915_gpu_idle(drm_dev);
-       if (ret) {
-               kfree(temp);
-               mutex_unlock(&drm_dev->struct_mutex);
-               return ret;
-       }
-
        /* TODO: Ideally we really want a GPU reset here to make sure errors
         * aren't propagated. Since I cannot find a stable way to reset the GPU
         * at this point it is left as a TODO.
@@ -275,7 +268,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -309,7 +302,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -330,7 +323,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        return snprintf(buf, PAGE_SIZE,
                        "%d\n",
@@ -341,7 +334,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -359,7 +352,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
        ssize_t ret;
 
@@ -409,7 +402,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -427,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
        ssize_t ret;
 
@@ -487,7 +480,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
 
        if (attr == &dev_attr_gt_RP0_freq_mhz)
index 6768db032f848cc7446c6c42d2484dc5f7d7ad2c..534154e05fbe47b70870e4149e8265caf4a89572 100644 (file)
@@ -118,7 +118,7 @@ TRACE_EVENT(i915_gem_shrink,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = i915->dev->primary->index;
+                          __entry->dev = i915->drm.primary->index;
                           __entry->target = target;
                           __entry->flags = flags;
                           ),
@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = from->i915->dev->primary->index;
+                          __entry->dev = from->i915->drm.primary->index;
                           __entry->sync_from = from->id;
                           __entry->sync_to = to_req->engine->id;
                           __entry->seqno = i915_gem_request_get_seqno(req);
@@ -486,11 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = req->i915->dev->primary->index;
+                          __entry->dev = req->i915->drm.primary->index;
                           __entry->ring = req->engine->id;
                           __entry->seqno = req->seqno;
                           __entry->flags = flags;
-                          i915_trace_irq_get(req->engine, req);
+                          intel_engine_enable_signaling(req);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -509,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = req->i915->dev->primary->index;
+                          __entry->dev = req->i915->drm.primary->index;
                           __entry->ring = req->engine->id;
                           __entry->invalidate = invalidate;
                           __entry->flush = flush;
@@ -531,7 +531,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = req->i915->dev->primary->index;
+                          __entry->dev = req->i915->drm.primary->index;
                           __entry->ring = req->engine->id;
                           __entry->seqno = req->seqno;
                           ),
@@ -556,9 +556,9 @@ TRACE_EVENT(i915_gem_request_notify,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = engine->i915->dev->primary->index;
+                          __entry->dev = engine->i915->drm.primary->index;
                           __entry->ring = engine->id;
-                          __entry->seqno = engine->get_seqno(engine);
+                          __entry->seqno = intel_engine_get_seqno(engine);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -593,11 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
             * less desirable.
             */
            TP_fast_assign(
-                          __entry->dev = req->i915->dev->primary->index;
+                          __entry->dev = req->i915->drm.primary->index;
                           __entry->ring = req->engine->id;
                           __entry->seqno = req->seqno;
                           __entry->blocking =
-                                    mutex_is_locked(&req->i915->dev->struct_mutex);
+                                    mutex_is_locked(&req->i915->drm.struct_mutex);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -746,7 +746,7 @@ DECLARE_EVENT_CLASS(i915_context,
        TP_fast_assign(
                        __entry->ctx = ctx;
                        __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
-                       __entry->dev = ctx->i915->dev->primary->index;
+                       __entry->dev = ctx->i915->drm.primary->index;
        ),
 
        TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
@@ -786,7 +786,7 @@ TRACE_EVENT(switch_mm,
                        __entry->ring = engine->id;
                        __entry->to = to;
                        __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
-                       __entry->dev = engine->i915->dev->primary->index;
+                       __entry->dev = engine->i915->drm.primary->index;
        ),
 
        TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
index b9329c2a670a0fa69e522726fa398e577ddfb18e..6700a7be7f78755675d6a8c1c17c69377d0117b4 100644 (file)
@@ -154,7 +154,7 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc,
 {
        if (((mode->clock == TMDS_297M) ||
                 (mode->clock == TMDS_296M)) &&
-               intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+               intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
                return true;
        else
                return false;
@@ -165,7 +165,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
                               i915_reg_t reg_elda, uint32_t bits_elda,
                               i915_reg_t reg_edid)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        uint8_t *eld = connector->eld;
        uint32_t tmp;
        int i;
@@ -189,7 +189,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
 
 static void g4x_audio_codec_disable(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        uint32_t eldv, tmp;
 
        DRM_DEBUG_KMS("Disable audio codec\n");
@@ -210,7 +210,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
                                   struct intel_encoder *encoder,
                                   const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        uint8_t *eld = connector->eld;
        uint32_t eldv;
        uint32_t tmp;
@@ -247,7 +247,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
 
 static void hsw_audio_codec_disable(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        enum pipe pipe = intel_crtc->pipe;
        uint32_t tmp;
@@ -262,7 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
        tmp |= AUD_CONFIG_N_PROG_ENABLE;
        tmp &= ~AUD_CONFIG_UPPER_N_MASK;
        tmp &= ~AUD_CONFIG_LOWER_N_MASK;
-       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                tmp |= AUD_CONFIG_N_VALUE_INDEX;
        I915_WRITE(HSW_AUD_CFG(pipe), tmp);
 
@@ -279,7 +279,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
                                   struct intel_encoder *encoder,
                                   const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        enum pipe pipe = intel_crtc->pipe;
        struct i915_audio_component *acomp = dev_priv->audio_component;
@@ -328,7 +328,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
        tmp = I915_READ(HSW_AUD_CFG(pipe));
        tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
        tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
-       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                tmp |= AUD_CONFIG_N_VALUE_INDEX;
        else
                tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -357,7 +357,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
 
 static void ilk_audio_codec_disable(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_digital_port *intel_dig_port =
                enc_to_dig_port(&encoder->base);
@@ -389,7 +389,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
        tmp |= AUD_CONFIG_N_PROG_ENABLE;
        tmp &= ~AUD_CONFIG_UPPER_N_MASK;
        tmp &= ~AUD_CONFIG_LOWER_N_MASK;
-       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                tmp |= AUD_CONFIG_N_VALUE_INDEX;
        I915_WRITE(aud_config, tmp);
 
@@ -405,7 +405,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
                                   struct intel_encoder *encoder,
                                   const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_digital_port *intel_dig_port =
                enc_to_dig_port(&encoder->base);
@@ -475,7 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
        tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
        tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
        tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
-       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                tmp |= AUD_CONFIG_N_VALUE_INDEX;
        else
                tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -496,7 +496,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
        struct drm_connector *connector;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_audio_component *acomp = dev_priv->audio_component;
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        enum port port = intel_dig_port->port;
@@ -513,7 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
 
        /* ELD Conn_Type */
        connector->eld[5] &= ~(3 << 2);
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+       if (intel_crtc_has_dp_encoder(crtc->config))
                connector->eld[5] |= (1 << 2);
 
        connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -543,7 +543,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_audio_component *acomp = dev_priv->audio_component;
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        enum port port = intel_dig_port->port;
@@ -749,14 +749,14 @@ static int i915_audio_component_bind(struct device *i915_dev,
        if (WARN_ON(acomp->ops || acomp->dev))
                return -EEXIST;
 
-       drm_modeset_lock_all(dev_priv->dev);
+       drm_modeset_lock_all(&dev_priv->drm);
        acomp->ops = &i915_audio_component_ops;
        acomp->dev = i915_dev;
        BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
        for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
                acomp->aud_sample_rate[i] = 0;
        dev_priv->audio_component = acomp;
-       drm_modeset_unlock_all(dev_priv->dev);
+       drm_modeset_unlock_all(&dev_priv->drm);
 
        return 0;
 }
@@ -767,11 +767,11 @@ static void i915_audio_component_unbind(struct device *i915_dev,
        struct i915_audio_component *acomp = data;
        struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
 
-       drm_modeset_lock_all(dev_priv->dev);
+       drm_modeset_lock_all(&dev_priv->drm);
        acomp->ops = NULL;
        acomp->dev = NULL;
        dev_priv->audio_component = NULL;
-       drm_modeset_unlock_all(dev_priv->dev);
+       drm_modeset_unlock_all(&dev_priv->drm);
 }
 
 static const struct component_ops i915_audio_component_bind_ops = {
@@ -799,7 +799,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
 {
        int ret;
 
-       ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+       ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
        if (ret < 0) {
                DRM_ERROR("failed to add audio component (%d)\n", ret);
                /* continue with reduced functionality */
@@ -821,6 +821,6 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
        if (!dev_priv->audio_component_registered)
                return;
 
-       component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+       component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
        dev_priv->audio_component_registered = false;
 }
index da5ed4a850b9bf7c17bb412f3181df5d4913c061..c6e69e4cfa8314a051277797bed0ec1fbcceb3ea 100644 (file)
@@ -1426,7 +1426,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
 int
 intel_bios_init(struct drm_i915_private *dev_priv)
 {
-       struct pci_dev *pdev = dev_priv->dev->pdev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        const struct vbt_header *vbt = dev_priv->opregion.vbt;
        const struct bdb_header *bdb;
        u8 __iomem *bios = NULL;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
new file mode 100644 (file)
index 0000000..d89b2c9
--- /dev/null
@@ -0,0 +1,586 @@
+/*
+ * Copyright Â© 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+
+#include "i915_drv.h"
+
+static void intel_breadcrumbs_fake_irq(unsigned long data)
+{
+       struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+
+       /*
+        * The timer persists in case we cannot enable interrupts,
+        * or if we have previously seen seqno/interrupt incoherency
+        * ("missed interrupt" syndrome). Here the worker will wake up
+        * every jiffie in order to kick the oldest waiter to do the
+        * coherent seqno check.
+        */
+       rcu_read_lock();
+       if (intel_engine_wakeup(engine))
+               mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+       rcu_read_unlock();
+}
+
+static void irq_enable(struct intel_engine_cs *engine)
+{
+       /* Enabling the IRQ may miss the generation of the interrupt, but
+        * we still need to force the barrier before reading the seqno,
+        * just in case.
+        */
+       engine->breadcrumbs.irq_posted = true;
+
+       spin_lock_irq(&engine->i915->irq_lock);
+       engine->irq_enable(engine);
+       spin_unlock_irq(&engine->i915->irq_lock);
+}
+
+static void irq_disable(struct intel_engine_cs *engine)
+{
+       spin_lock_irq(&engine->i915->irq_lock);
+       engine->irq_disable(engine);
+       spin_unlock_irq(&engine->i915->irq_lock);
+
+       engine->breadcrumbs.irq_posted = false;
+}
+
+static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
+{
+       struct intel_engine_cs *engine =
+               container_of(b, struct intel_engine_cs, breadcrumbs);
+       struct drm_i915_private *i915 = engine->i915;
+
+       assert_spin_locked(&b->lock);
+       if (b->rpm_wakelock)
+               return;
+
+       /* Since we are waiting on a request, the GPU should be busy
+        * and should have its own rpm reference. For completeness,
+        * record an rpm reference for ourselves to cover the
+        * interrupt we unmask.
+        */
+       intel_runtime_pm_get_noresume(i915);
+       b->rpm_wakelock = true;
+
+       /* No interrupts? Kick the waiter every jiffie! */
+       if (intel_irqs_enabled(i915)) {
+               if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
+                       irq_enable(engine);
+               b->irq_enabled = true;
+       }
+
+       if (!b->irq_enabled ||
+           test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
+               mod_timer(&b->fake_irq, jiffies + 1);
+}
+
+static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
+{
+       struct intel_engine_cs *engine =
+               container_of(b, struct intel_engine_cs, breadcrumbs);
+
+       assert_spin_locked(&b->lock);
+       if (!b->rpm_wakelock)
+               return;
+
+       if (b->irq_enabled) {
+               irq_disable(engine);
+               b->irq_enabled = false;
+       }
+
+       intel_runtime_pm_put(engine->i915);
+       b->rpm_wakelock = false;
+}
+
+static inline struct intel_wait *to_wait(struct rb_node *node)
+{
+       return container_of(node, struct intel_wait, node);
+}
+
+static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
+                                             struct intel_wait *wait)
+{
+       assert_spin_locked(&b->lock);
+
+       /* This request is completed, so remove it from the tree, mark it as
+        * complete, and *then* wake up the associated task.
+        */
+       rb_erase(&wait->node, &b->waiters);
+       RB_CLEAR_NODE(&wait->node);
+
+       wake_up_process(wait->tsk); /* implicit smp_wmb() */
+}
+
+static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
+                                   struct intel_wait *wait)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct rb_node **p, *parent, *completed;
+       bool first;
+       u32 seqno;
+
+       /* Insert the request into the retirement ordered list
+        * of waiters by walking the rbtree. If we are the oldest
+        * seqno in the tree (the first to be retired), then
+        * set ourselves as the bottom-half.
+        *
+        * As we descend the tree, prune completed branches since we hold the
+        * spinlock we know that the first_waiter must be delayed and can
+        * reduce some of the sequential wake up latency if we take action
+        * ourselves and wake up the completed tasks in parallel. Also, by
+        * removing stale elements in the tree, we may be able to reduce the
+        * ping-pong between the old bottom-half and ourselves as first-waiter.
+        */
+       first = true;
+       parent = NULL;
+       completed = NULL;
+       seqno = intel_engine_get_seqno(engine);
+
+        /* If the request completed before we managed to grab the spinlock,
+         * return now before adding ourselves to the rbtree. We let the
+         * current bottom-half handle any pending wakeups and instead
+         * try and get out of the way quickly.
+         */
+       if (i915_seqno_passed(seqno, wait->seqno)) {
+               RB_CLEAR_NODE(&wait->node);
+               return first;
+       }
+
+       p = &b->waiters.rb_node;
+       while (*p) {
+               parent = *p;
+               if (wait->seqno == to_wait(parent)->seqno) {
+                       /* We have multiple waiters on the same seqno, select
+                        * the highest priority task (that with the smallest
+                        * task->prio) to serve as the bottom-half for this
+                        * group.
+                        */
+                       if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
+                               p = &parent->rb_right;
+                               first = false;
+                       } else {
+                               p = &parent->rb_left;
+                       }
+               } else if (i915_seqno_passed(wait->seqno,
+                                            to_wait(parent)->seqno)) {
+                       p = &parent->rb_right;
+                       if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
+                               completed = parent;
+                       else
+                               first = false;
+               } else {
+                       p = &parent->rb_left;
+               }
+       }
+       rb_link_node(&wait->node, parent, p);
+       rb_insert_color(&wait->node, &b->waiters);
+       GEM_BUG_ON(!first && !b->irq_seqno_bh);
+
+       if (completed) {
+               struct rb_node *next = rb_next(completed);
+
+               GEM_BUG_ON(!next && !first);
+               if (next && next != &wait->node) {
+                       GEM_BUG_ON(first);
+                       b->first_wait = to_wait(next);
+                       smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+                       /* As there is a delay between reading the current
+                        * seqno, processing the completed tasks and selecting
+                        * the next waiter, we may have missed the interrupt
+                        * and so need for the next bottom-half to wakeup.
+                        *
+                        * Also as we enable the IRQ, we may miss the
+                        * interrupt for that seqno, so we have to wake up
+                        * the next bottom-half in order to do a coherent check
+                        * in case the seqno passed.
+                        */
+                       __intel_breadcrumbs_enable_irq(b);
+                       if (READ_ONCE(b->irq_posted))
+                               wake_up_process(to_wait(next)->tsk);
+               }
+
+               do {
+                       struct intel_wait *crumb = to_wait(completed);
+                       completed = rb_prev(completed);
+                       __intel_breadcrumbs_finish(b, crumb);
+               } while (completed);
+       }
+
+       if (first) {
+               GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
+               b->first_wait = wait;
+               smp_store_mb(b->irq_seqno_bh, wait->tsk);
+               /* After assigning ourselves as the new bottom-half, we must
+                * perform a cursory check to prevent a missed interrupt.
+                * Either we miss the interrupt whilst programming the hardware,
+                * or if there was a previous waiter (for a later seqno) they
+                * may be woken instead of us (due to the inherent race
+                * in the unlocked read of b->irq_seqno_bh in the irq handler)
+                * and so we miss the wake up.
+                */
+               __intel_breadcrumbs_enable_irq(b);
+       }
+       GEM_BUG_ON(!b->irq_seqno_bh);
+       GEM_BUG_ON(!b->first_wait);
+       GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
+
+       return first;
+}
+
+bool intel_engine_add_wait(struct intel_engine_cs *engine,
+                          struct intel_wait *wait)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       bool first;
+
+       spin_lock(&b->lock);
+       first = __intel_engine_add_wait(engine, wait);
+       spin_unlock(&b->lock);
+
+       return first;
+}
+
+void intel_engine_enable_fake_irq(struct intel_engine_cs *engine)
+{
+       mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+}
+
+static inline bool chain_wakeup(struct rb_node *rb, int priority)
+{
+       return rb && to_wait(rb)->tsk->prio <= priority;
+}
+
+static inline int wakeup_priority(struct intel_breadcrumbs *b,
+                                 struct task_struct *tsk)
+{
+       if (tsk == b->signaler)
+               return INT_MIN;
+       else
+               return tsk->prio;
+}
+
+void intel_engine_remove_wait(struct intel_engine_cs *engine,
+                             struct intel_wait *wait)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+       /* Quick check to see if this waiter was already decoupled from
+        * the tree by the bottom-half to avoid contention on the spinlock
+        * by the herd.
+        */
+       if (RB_EMPTY_NODE(&wait->node))
+               return;
+
+       spin_lock(&b->lock);
+
+       if (RB_EMPTY_NODE(&wait->node))
+               goto out_unlock;
+
+       if (b->first_wait == wait) {
+               const int priority = wakeup_priority(b, wait->tsk);
+               struct rb_node *next;
+
+               GEM_BUG_ON(b->irq_seqno_bh != wait->tsk);
+
+               /* We are the current bottom-half. Find the next candidate,
+                * the first waiter in the queue on the remaining oldest
+                * request. As multiple seqnos may complete in the time it
+                * takes us to wake up and find the next waiter, we have to
+                * wake up that waiter for it to perform its own coherent
+                * completion check.
+                */
+               next = rb_next(&wait->node);
+               if (chain_wakeup(next, priority)) {
+                       /* If the next waiter is already complete,
+                        * wake it up and continue onto the next waiter. So
+                        * if have a small herd, they will wake up in parallel
+                        * rather than sequentially, which should reduce
+                        * the overall latency in waking all the completed
+                        * clients.
+                        *
+                        * However, waking up a chain adds extra latency to
+                        * the first_waiter. This is undesirable if that
+                        * waiter is a high priority task.
+                        */
+                       u32 seqno = intel_engine_get_seqno(engine);
+
+                       while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
+                               struct rb_node *n = rb_next(next);
+
+                               __intel_breadcrumbs_finish(b, to_wait(next));
+                               next = n;
+                               if (!chain_wakeup(next, priority))
+                                       break;
+                       }
+               }
+
+               if (next) {
+                       /* In our haste, we may have completed the first waiter
+                        * before we enabled the interrupt. Do so now as we
+                        * have a second waiter for a future seqno. Afterwards,
+                        * we have to wake up that waiter in case we missed
+                        * the interrupt, or if we have to handle an
+                        * exception rather than a seqno completion.
+                        */
+                       b->first_wait = to_wait(next);
+                       smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+                       if (b->first_wait->seqno != wait->seqno)
+                               __intel_breadcrumbs_enable_irq(b);
+                       wake_up_process(b->irq_seqno_bh);
+               } else {
+                       b->first_wait = NULL;
+                       WRITE_ONCE(b->irq_seqno_bh, NULL);
+                       __intel_breadcrumbs_disable_irq(b);
+               }
+       } else {
+               GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
+       }
+
+       GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
+       rb_erase(&wait->node, &b->waiters);
+
+out_unlock:
+       GEM_BUG_ON(b->first_wait == wait);
+       GEM_BUG_ON(rb_first(&b->waiters) !=
+                  (b->first_wait ? &b->first_wait->node : NULL));
+       GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters));
+       spin_unlock(&b->lock);
+}
+
+static bool signal_complete(struct drm_i915_gem_request *request)
+{
+       if (!request)
+               return false;
+
+       /* If another process served as the bottom-half it may have already
+        * signalled that this wait is already completed.
+        */
+       if (intel_wait_complete(&request->signaling.wait))
+               return true;
+
+       /* Carefully check if the request is complete, giving time for the
+        * seqno to be visible or if the GPU hung.
+        */
+       if (__i915_request_irq_complete(request))
+               return true;
+
+       return false;
+}
+
+static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
+{
+       return container_of(rb, struct drm_i915_gem_request, signaling.node);
+}
+
+static void signaler_set_rtpriority(void)
+{
+        struct sched_param param = { .sched_priority = 1 };
+
+        sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+}
+
+static int intel_breadcrumbs_signaler(void *arg)
+{
+       struct intel_engine_cs *engine = arg;
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct drm_i915_gem_request *request;
+
+       /* Install ourselves with high priority to reduce signalling latency */
+       signaler_set_rtpriority();
+
+       do {
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               /* We are either woken up by the interrupt bottom-half,
+                * or by a client adding a new signaller. In both cases,
+                * the GPU seqno may have advanced beyond our oldest signal.
+                * If it has, propagate the signal, remove the waiter and
+                * check again with the next oldest signal. Otherwise we
+                * need to wait for a new interrupt from the GPU or for
+                * a new client.
+                */
+               request = READ_ONCE(b->first_signal);
+               if (signal_complete(request)) {
+                       /* Wake up all other completed waiters and select the
+                        * next bottom-half for the next user interrupt.
+                        */
+                       intel_engine_remove_wait(engine,
+                                                &request->signaling.wait);
+
+                       /* Find the next oldest signal. Note that as we have
+                        * not been holding the lock, another client may
+                        * have installed an even older signal than the one
+                        * we just completed - so double check we are still
+                        * the oldest before picking the next one.
+                        */
+                       spin_lock(&b->lock);
+                       if (request == b->first_signal) {
+                               struct rb_node *rb =
+                                       rb_next(&request->signaling.node);
+                               b->first_signal = rb ? to_signaler(rb) : NULL;
+                       }
+                       rb_erase(&request->signaling.node, &b->signals);
+                       spin_unlock(&b->lock);
+
+                       i915_gem_request_unreference(request);
+               } else {
+                       if (kthread_should_stop())
+                               break;
+
+                       schedule();
+               }
+       } while (1);
+       __set_current_state(TASK_RUNNING);
+
+       return 0;
+}
+
+void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
+{
+       struct intel_engine_cs *engine = request->engine;
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct rb_node *parent, **p;
+       bool first, wakeup;
+
+       if (unlikely(READ_ONCE(request->signaling.wait.tsk)))
+               return;
+
+       spin_lock(&b->lock);
+       if (unlikely(request->signaling.wait.tsk)) {
+               wakeup = false;
+               goto unlock;
+       }
+
+       request->signaling.wait.tsk = b->signaler;
+       request->signaling.wait.seqno = request->seqno;
+       i915_gem_request_reference(request);
+
+       /* First add ourselves into the list of waiters, but register our
+        * bottom-half as the signaller thread. As per usual, only the oldest
+        * waiter (not just signaller) is tasked as the bottom-half waking
+        * up all completed waiters after the user interrupt.
+        *
+        * If we are the oldest waiter, enable the irq (after which we
+        * must double check that the seqno did not complete).
+        */
+       wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
+
+       /* Now insert ourselves into the retirement ordered list of signals
+        * on this engine. We track the oldest seqno as that will be the
+        * first signal to complete.
+        */
+       parent = NULL;
+       first = true;
+       p = &b->signals.rb_node;
+       while (*p) {
+               parent = *p;
+               if (i915_seqno_passed(request->seqno,
+                                     to_signaler(parent)->seqno)) {
+                       p = &parent->rb_right;
+                       first = false;
+               } else {
+                       p = &parent->rb_left;
+               }
+       }
+       rb_link_node(&request->signaling.node, parent, p);
+       rb_insert_color(&request->signaling.node, &b->signals);
+       if (first)
+               smp_store_mb(b->first_signal, request);
+
+unlock:
+       spin_unlock(&b->lock);
+
+       if (wakeup)
+               wake_up_process(b->signaler);
+}
+
+int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct task_struct *tsk;
+
+       spin_lock_init(&b->lock);
+       setup_timer(&b->fake_irq,
+                   intel_breadcrumbs_fake_irq,
+                   (unsigned long)engine);
+
+       /* Spawn a thread to provide a common bottom-half for all signals.
+        * As this is an asynchronous interface we cannot steal the current
+        * task for handling the bottom-half to the user interrupt, therefore
+        * we create a thread to do the coherent seqno dance after the
+        * interrupt and then signal the waitqueue (via the dma-buf/fence).
+        */
+       tsk = kthread_run(intel_breadcrumbs_signaler, engine,
+                         "i915/signal:%d", engine->id);
+       if (IS_ERR(tsk))
+               return PTR_ERR(tsk);
+
+       b->signaler = tsk;
+
+       return 0;
+}
+
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+       if (!IS_ERR_OR_NULL(b->signaler))
+               kthread_stop(b->signaler);
+
+       del_timer_sync(&b->fake_irq);
+}
+
+unsigned int intel_kick_waiters(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       unsigned int mask = 0;
+
+       /* To avoid the task_struct disappearing beneath us as we wake up
+        * the process, we must first inspect the task_struct->state under the
+        * RCU lock, i.e. as we call wake_up_process() we must be holding the
+        * rcu_read_lock().
+        */
+       rcu_read_lock();
+       for_each_engine(engine, i915)
+               if (unlikely(intel_engine_wakeup(engine)))
+                       mask |= intel_engine_flag(engine);
+       rcu_read_unlock();
+
+       return mask;
+}
+
+unsigned int intel_kick_signalers(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       unsigned int mask = 0;
+
+       for_each_engine(engine, i915) {
+               if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
+                       wake_up_process(engine->breadcrumbs.signaler);
+                       mask |= intel_engine_flag(engine);
+               }
+       }
+
+       return mask;
+}
index 522f5a2de01597c73fe458bede96d028615abac7..bc0fef3d33356a1fb39f543ec04c31577694ecdb 100644 (file)
@@ -96,7 +96,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
 {
        struct drm_crtc *crtc = crtc_state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int i, pipe = intel_crtc->pipe;
        uint16_t coeffs[9] = { 0, };
@@ -207,7 +207,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
 {
        struct drm_crtc *crtc = state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = to_intel_crtc(crtc)->pipe;
        uint32_t mode;
 
@@ -255,7 +255,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
 void intel_color_set_csc(struct drm_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc_state->crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (dev_priv->display.load_csc_matrix)
                dev_priv->display.load_csc_matrix(crtc_state);
@@ -266,13 +266,13 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
                                    struct drm_property_blob *blob)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum pipe pipe = intel_crtc->pipe;
        int i;
 
        if (HAS_GMCH_DISPLAY(dev)) {
-               if (intel_crtc->config->has_dsi_encoder)
+               if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
                        assert_pll_enabled(dev_priv, pipe);
@@ -313,7 +313,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
 {
        struct drm_crtc *crtc = crtc_state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *intel_crtc_state =
                to_intel_crtc_state(crtc_state);
@@ -343,7 +343,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
 {
        struct drm_crtc *crtc = state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
        uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
@@ -426,7 +426,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
 {
        struct drm_crtc *crtc = state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
        struct drm_color_lut *lut;
        uint32_t i, lut_size;
@@ -485,7 +485,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
 void intel_color_load_luts(struct drm_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc_state->crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->display.load_luts(crtc_state);
 }
@@ -526,7 +526,7 @@ int intel_color_check(struct drm_crtc *crtc,
 void intel_color_init(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        drm_mode_crtc_set_gamma_size(crtc, 256);
 
index e115bcc6766fed43752d5de420cc8c14cf18eb9c..5819d524d9179dc225e698a8d1d59da558c2aea5 100644 (file)
@@ -67,7 +67,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
        enum intel_display_power_domain power_domain;
        u32 tmp;
@@ -98,7 +98,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
 
 static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
        u32 tmp, flags = 0;
 
@@ -146,7 +146,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
 static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -281,7 +281,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct intel_crt *crt = intel_attached_crt(connector);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 adpa;
        bool ret;
 
@@ -301,8 +301,10 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 
                I915_WRITE(crt->adpa_reg, adpa);
 
-               if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
-                            1000))
+               if (intel_wait_for_register(dev_priv,
+                                           crt->adpa_reg,
+                                           ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
+                                           1000))
                        DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
 
                if (turn_off_dac) {
@@ -326,7 +328,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct intel_crt *crt = intel_attached_crt(connector);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 adpa;
        bool ret;
        u32 save_adpa;
@@ -338,8 +340,10 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
 
        I915_WRITE(crt->adpa_reg, adpa);
 
-       if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
-                    1000)) {
+       if (intel_wait_for_register(dev_priv,
+                                   crt->adpa_reg,
+                                   ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
+                                   1000)) {
                DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
                I915_WRITE(crt->adpa_reg, save_adpa);
        }
@@ -367,7 +371,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
 static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 stat;
        bool ret = false;
        int i, tries = 0;
@@ -394,9 +398,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
                                              CRT_HOTPLUG_FORCE_DETECT,
                                              CRT_HOTPLUG_FORCE_DETECT);
                /* wait for FORCE_DETECT to go off */
-               if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
-                             CRT_HOTPLUG_FORCE_DETECT) == 0,
-                            1000))
+               if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN,
+                                           CRT_HOTPLUG_FORCE_DETECT, 0,
+                                           1000))
                        DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
        }
 
@@ -449,7 +453,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
 static bool intel_crt_detect_ddc(struct drm_connector *connector)
 {
        struct intel_crt *crt = intel_attached_crt(connector);
-       struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
        struct edid *edid;
        struct i2c_adapter *i2c;
 
@@ -485,7 +489,7 @@ static enum drm_connector_status
 intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
 {
        struct drm_device *dev = crt->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t save_bclrpat;
        uint32_t save_vtotal;
        uint32_t vtotal, vactive;
@@ -600,7 +604,7 @@ static enum drm_connector_status
 intel_crt_detect(struct drm_connector *connector, bool force)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crt *crt = intel_attached_crt(connector);
        struct intel_encoder *intel_encoder = &crt->base;
        enum intel_display_power_domain power_domain;
@@ -681,7 +685,7 @@ static void intel_crt_destroy(struct drm_connector *connector)
 static int intel_crt_get_modes(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crt *crt = intel_attached_crt(connector);
        struct intel_encoder *intel_encoder = &crt->base;
        enum intel_display_power_domain power_domain;
@@ -716,7 +720,7 @@ static int intel_crt_set_property(struct drm_connector *connector,
 static void intel_crt_reset(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crt *crt = intel_attached_crt(connector);
 
        if (INTEL_INFO(dev)->gen >= 5) {
@@ -743,6 +747,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_crt_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
+       .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
        .destroy = intel_crt_destroy,
        .set_property = intel_crt_set_property,
@@ -791,7 +796,7 @@ void intel_crt_init(struct drm_device *dev)
        struct drm_connector *connector;
        struct intel_crt *crt;
        struct intel_connector *intel_connector;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t adpa_reg;
        u32 adpa;
 
@@ -879,8 +884,6 @@ void intel_crt_init(struct drm_device *dev)
 
        drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
-       drm_connector_register(connector);
-
        if (!I915_HAS_HOTPLUG(dev))
                intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 
index 2b3b428d9cd2b01e4d5d3627e89e712b18937547..c3b33a10c15cd8a6f74bcec4b7d442b786cce969 100644 (file)
  * be moved to FW_FAILED.
  */
 
-#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
+#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
 MODULE_FIRMWARE(I915_CSR_KBL);
 #define KBL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 1)
 
-#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
 MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 23)
+#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 26)
 
-#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
 MODULE_FIRMWARE(I915_CSR_BXT);
 #define BXT_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
 
@@ -286,7 +286,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
        uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
        uint32_t i;
        uint32_t *dmc_payload;
-       uint32_t required_min_version;
+       uint32_t required_version;
 
        if (!fw)
                return NULL;
@@ -303,24 +303,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
        csr->version = css_header->version;
 
        if (IS_KABYLAKE(dev_priv)) {
-               required_min_version = KBL_CSR_VERSION_REQUIRED;
+               required_version = KBL_CSR_VERSION_REQUIRED;
        } else if (IS_SKYLAKE(dev_priv)) {
-               required_min_version = SKL_CSR_VERSION_REQUIRED;
+               required_version = SKL_CSR_VERSION_REQUIRED;
        } else if (IS_BROXTON(dev_priv)) {
-               required_min_version = BXT_CSR_VERSION_REQUIRED;
+               required_version = BXT_CSR_VERSION_REQUIRED;
        } else {
                MISSING_CASE(INTEL_REVID(dev_priv));
-               required_min_version = 0;
+               required_version = 0;
        }
 
-       if (csr->version < required_min_version) {
-               DRM_INFO("Refusing to load old DMC firmware v%u.%u,"
-                        " please upgrade to v%u.%u or later"
-                          " [" FIRMWARE_URL "].\n",
+       if (csr->version != required_version) {
+               DRM_INFO("Refusing to load DMC firmware v%u.%u,"
+                        " please use v%u.%u [" FIRMWARE_URL "].\n",
                         CSR_VERSION_MAJOR(csr->version),
                         CSR_VERSION_MINOR(csr->version),
-                        CSR_VERSION_MAJOR(required_min_version),
-                        CSR_VERSION_MINOR(required_min_version));
+                        CSR_VERSION_MAJOR(required_version),
+                        CSR_VERSION_MINOR(required_version));
                return NULL;
        }
 
@@ -413,7 +412,7 @@ static void csr_load_work_fn(struct work_struct *work)
        csr = &dev_priv->csr;
 
        ret = request_firmware(&fw, dev_priv->csr.fw_path,
-                              &dev_priv->dev->pdev->dev);
+                              &dev_priv->drm.pdev->dev);
        if (fw)
                dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
 
@@ -427,7 +426,7 @@ static void csr_load_work_fn(struct work_struct *work)
                         CSR_VERSION_MAJOR(csr->version),
                         CSR_VERSION_MINOR(csr->version));
        } else {
-               dev_notice(dev_priv->dev->dev,
+               dev_notice(dev_priv->drm.dev,
                           "Failed to load DMC firmware"
                           " [" FIRMWARE_URL "],"
                           " disabling runtime power management.\n");
index ad3b0ee5e55b4073aadf946ab04ff0319c6e2ce9..dd1d6fe122976edb5f6dd78428f34bb2b4313adb 100644 (file)
@@ -318,7 +318,7 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
        default:
                WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
                /* fallthrough and treat as unknown */
-       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_EDP:
        case INTEL_OUTPUT_HDMI:
        case INTEL_OUTPUT_UNKNOWN:
@@ -482,7 +482,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
                ddi_translations = ddi_translations_edp;
                size = n_edp_entries;
                break;
-       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_HDMI:
                ddi_translations = ddi_translations_dp;
                size = n_dp_entries;
@@ -543,7 +543,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
 void hsw_fdi_link_train(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        u32 temp, i, rx_ctl_val;
@@ -834,7 +834,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
        if (pipe_config->has_pch_encoder)
                dotclock = intel_dotclock_calculate(pipe_config->port_clock,
                                                    &pipe_config->fdi_m_n);
-       else if (pipe_config->has_dp_encoder)
+       else if (intel_crtc_has_dp_encoder(pipe_config))
                dotclock = intel_dotclock_calculate(pipe_config->port_clock,
                                                    &pipe_config->dp_m_n);
        else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
@@ -851,7 +851,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
 static void skl_ddi_clock_get(struct intel_encoder *encoder,
                                struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        int link_clock = 0;
        uint32_t dpll_ctl1, dpll;
 
@@ -899,7 +899,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
 static void hsw_ddi_clock_get(struct intel_encoder *encoder,
                              struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        int link_clock = 0;
        u32 val, pll;
 
@@ -971,7 +971,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
 static void bxt_ddi_clock_get(struct intel_encoder *encoder,
                                struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = intel_ddi_get_encoder_port(encoder);
        uint32_t dpll = port;
 
@@ -1061,14 +1061,14 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
 
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        int type = intel_encoder->type;
        uint32_t temp;
 
-       if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
+       if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
                WARN_ON(transcoder_is_dsi(cpu_transcoder));
 
                temp = TRANS_MSA_SYNC_CLK;
@@ -1096,7 +1096,7 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        uint32_t temp;
        temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -1113,7 +1113,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
        struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = intel_crtc->pipe;
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -1182,7 +1182,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
                temp |= TRANS_DDI_MODE_SELECT_FDI;
                temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
 
-       } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+       } else if (type == INTEL_OUTPUT_DP ||
                   type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
@@ -1223,7 +1223,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
 {
        struct drm_device *dev = intel_connector->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *intel_encoder = intel_connector->encoder;
        int type = intel_connector->base.connector_type;
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -1285,7 +1285,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
                            enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_ddi_get_encoder_port(encoder);
        enum intel_display_power_domain power_domain;
        u32 tmp;
@@ -1359,7 +1359,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
 {
        struct drm_crtc *crtc = &intel_crtc->base;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -1371,7 +1371,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
 
 void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
 {
-       struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
        if (cpu_transcoder != TRANSCODER_EDP)
@@ -1392,7 +1392,7 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
        dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
        hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
 
-       if (type == INTEL_OUTPUT_DISPLAYPORT) {
+       if (type == INTEL_OUTPUT_DP) {
                if (dp_iboost) {
                        iboost = dp_iboost;
                } else {
@@ -1450,7 +1450,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
        if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
                n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
                ddi_translations = bxt_ddi_translations_edp;
-       } else if (type == INTEL_OUTPUT_DISPLAYPORT
+       } else if (type == INTEL_OUTPUT_DP
                        || type == INTEL_OUTPUT_EDP) {
                n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
                ddi_translations = bxt_ddi_translations_dp;
@@ -1624,7 +1624,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 
        intel_ddi_clk_select(intel_encoder, crtc->config);
 
-       if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+       if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
                intel_dp_set_link_params(intel_dp, crtc->config);
@@ -1648,7 +1648,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
        int type = intel_encoder->type;
        uint32_t val;
@@ -1669,7 +1669,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
        if (wait)
                intel_wait_ddi_buf_idle(dev_priv, port);
 
-       if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+       if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
                intel_edp_panel_vdd_on(intel_dp);
@@ -1695,7 +1695,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
        struct drm_crtc *crtc = encoder->crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
        int type = intel_encoder->type;
 
@@ -1734,7 +1734,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int type = intel_encoder->type;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (intel_crtc->config->has_audio) {
                intel_audio_codec_disable(intel_encoder);
@@ -1808,7 +1808,10 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
                                  enum dpio_phy phy)
 {
-       if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
+       if (intel_wait_for_register(dev_priv,
+                                   BXT_PORT_REF_DW3(phy),
+                                   GRC_DONE, GRC_DONE,
+                                   10))
                DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
 }
 
@@ -2121,7 +2124,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
 
 void intel_ddi_fdi_disable(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
        uint32_t val;
 
@@ -2154,7 +2157,7 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
 void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
        struct intel_hdmi *intel_hdmi;
@@ -2208,7 +2211,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                break;
        case TRANS_DDI_MODE_SELECT_DP_SST:
        case TRANS_DDI_MODE_SELECT_DP_MST:
-               pipe_config->has_dp_encoder = true;
                pipe_config->lane_count =
                        ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
                intel_dp_get_m_n(intel_crtc, pipe_config);
@@ -2253,7 +2255,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 static bool intel_ddi_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        int type = encoder->type;
        int port = intel_ddi_get_encoder_port(encoder);
        int ret;
@@ -2319,7 +2321,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
 
 void intel_ddi_init(struct drm_device *dev, enum port port)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_digital_port *intel_dig_port;
        struct intel_encoder *intel_encoder;
        struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
new file mode 100644 (file)
index 0000000..cba137f
--- /dev/null
@@ -0,0 +1,388 @@
+/*
+ * Copyright Â© 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+void intel_device_info_dump(struct drm_i915_private *dev_priv)
+{
+       const struct intel_device_info *info = &dev_priv->info;
+
+#define PRINT_S(name) "%s"
+#define SEP_EMPTY
+#define PRINT_FLAG(name) info->name ? #name "," : ""
+#define SEP_COMMA ,
+       DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
+                        DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
+                        info->gen,
+                        dev_priv->drm.pdev->device,
+                        dev_priv->drm.pdev->revision,
+                        DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
+#undef PRINT_S
+#undef SEP_EMPTY
+#undef PRINT_FLAG
+#undef SEP_COMMA
+}
+
+static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       u32 fuse, eu_dis;
+
+       fuse = I915_READ(CHV_FUSE_GT);
+
+       info->slice_total = 1;
+
+       if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+               info->subslice_per_slice++;
+               eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+                                CHV_FGT_EU_DIS_SS0_R1_MASK);
+               info->eu_total += 8 - hweight32(eu_dis);
+       }
+
+       if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+               info->subslice_per_slice++;
+               eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+                                CHV_FGT_EU_DIS_SS1_R1_MASK);
+               info->eu_total += 8 - hweight32(eu_dis);
+       }
+
+       info->subslice_total = info->subslice_per_slice;
+       /*
+        * CHV expected to always have a uniform distribution of EU
+        * across subslices.
+       */
+       info->eu_per_subslice = info->subslice_total ?
+                               info->eu_total / info->subslice_total :
+                               0;
+       /*
+        * CHV supports subslice power gating on devices with more than
+        * one subslice, and supports EU power gating on devices with
+        * more than one EU pair per subslice.
+       */
+       info->has_slice_pg = 0;
+       info->has_subslice_pg = (info->subslice_total > 1);
+       info->has_eu_pg = (info->eu_per_subslice > 2);
+}
+
+static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       int s_max = 3, ss_max = 4, eu_max = 8;
+       int s, ss;
+       u32 fuse2, s_enable, ss_disable, eu_disable;
+       u8 eu_mask = 0xff;
+
+       fuse2 = I915_READ(GEN8_FUSE2);
+       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+       ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
+
+       info->slice_total = hweight32(s_enable);
+       /*
+        * The subslice disable field is global, i.e. it applies
+        * to each of the enabled slices.
+       */
+       info->subslice_per_slice = ss_max - hweight32(ss_disable);
+       info->subslice_total = info->slice_total * info->subslice_per_slice;
+
+       /*
+        * Iterate through enabled slices and subslices to
+        * count the total enabled EU.
+       */
+       for (s = 0; s < s_max; s++) {
+               if (!(s_enable & BIT(s)))
+                       /* skip disabled slice */
+                       continue;
+
+               eu_disable = I915_READ(GEN9_EU_DISABLE(s));
+               for (ss = 0; ss < ss_max; ss++) {
+                       int eu_per_ss;
+
+                       if (ss_disable & BIT(ss))
+                               /* skip disabled subslice */
+                               continue;
+
+                       eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
+                                                     eu_mask);
+
+                       /*
+                        * Record which subslice(s) has(have) 7 EUs. we
+                        * can tune the hash used to spread work among
+                        * subslices if they are unbalanced.
+                        */
+                       if (eu_per_ss == 7)
+                               info->subslice_7eu[s] |= BIT(ss);
+
+                       info->eu_total += eu_per_ss;
+               }
+       }
+
+       /*
+        * SKL is expected to always have a uniform distribution
+        * of EU across subslices with the exception that any one
+        * EU in any one subslice may be fused off for die
+        * recovery. BXT is expected to be perfectly uniform in EU
+        * distribution.
+       */
+       info->eu_per_subslice = info->subslice_total ?
+                               DIV_ROUND_UP(info->eu_total,
+                                            info->subslice_total) : 0;
+       /*
+        * SKL supports slice power gating on devices with more than
+        * one slice, and supports EU power gating on devices with
+        * more than one EU pair per subslice. BXT supports subslice
+        * power gating on devices with more than one subslice, and
+        * supports EU power gating on devices with more than one EU
+        * pair per subslice.
+       */
+       info->has_slice_pg =
+               (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+               info->slice_total > 1;
+       info->has_subslice_pg =
+               IS_BROXTON(dev_priv) && info->subslice_total > 1;
+       info->has_eu_pg = info->eu_per_subslice > 2;
+
+       if (IS_BROXTON(dev_priv)) {
+#define IS_SS_DISABLED(_ss_disable, ss)    (_ss_disable & BIT(ss))
+               /*
+                * There is a HW issue in 2x6 fused down parts that requires
+                * Pooled EU to be enabled as a WA. The pool configuration
+                * changes depending upon which subslice is fused down. This
+                * doesn't affect if the device has all 3 subslices enabled.
+                */
+               /* WaEnablePooledEuFor2x6:bxt */
+               info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
+                                      (info->subslice_per_slice == 2 &&
+                                       INTEL_REVID(dev_priv) < BXT_REVID_C0));
+
+               info->min_eu_in_pool = 0;
+               if (info->has_pooled_eu) {
+                       if (IS_SS_DISABLED(ss_disable, 0) ||
+                           IS_SS_DISABLED(ss_disable, 2))
+                               info->min_eu_in_pool = 3;
+                       else if (IS_SS_DISABLED(ss_disable, 1))
+                               info->min_eu_in_pool = 6;
+                       else
+                               info->min_eu_in_pool = 9;
+               }
+#undef IS_SS_DISABLED
+       }
+}
+
+static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       const int s_max = 3, ss_max = 3, eu_max = 8;
+       int s, ss;
+       u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+
+       fuse2 = I915_READ(GEN8_FUSE2);
+       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+       ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+
+       eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
+       eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
+                       ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
+                        (32 - GEN8_EU_DIS0_S1_SHIFT));
+       eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
+                       ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
+                        (32 - GEN8_EU_DIS1_S2_SHIFT));
+
+       info->slice_total = hweight32(s_enable);
+
+       /*
+        * The subslice disable field is global, i.e. it applies
+        * to each of the enabled slices.
+        */
+       info->subslice_per_slice = ss_max - hweight32(ss_disable);
+       info->subslice_total = info->slice_total * info->subslice_per_slice;
+
+       /*
+        * Iterate through enabled slices and subslices to
+        * count the total enabled EU.
+        */
+       for (s = 0; s < s_max; s++) {
+               if (!(s_enable & (0x1 << s)))
+                       /* skip disabled slice */
+                       continue;
+
+               for (ss = 0; ss < ss_max; ss++) {
+                       u32 n_disabled;
+
+                       if (ss_disable & (0x1 << ss))
+                               /* skip disabled subslice */
+                               continue;
+
+                       n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
+
+                       /*
+                        * Record which subslices have 7 EUs.
+                        */
+                       if (eu_max - n_disabled == 7)
+                               info->subslice_7eu[s] |= 1 << ss;
+
+                       info->eu_total += eu_max - n_disabled;
+               }
+       }
+
+       /*
+        * BDW is expected to always have a uniform distribution of EU across
+        * subslices with the exception that any one EU in any one subslice may
+        * be fused off for die recovery.
+        */
+       info->eu_per_subslice = info->subslice_total ?
+               DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+
+       /*
+        * BDW supports slice power gating on devices with more than
+        * one slice.
+        */
+       info->has_slice_pg = (info->slice_total > 1);
+       info->has_subslice_pg = 0;
+       info->has_eu_pg = 0;
+}
+
+/*
+ * Determine various intel_device_info fields at runtime.
+ *
+ * Use it when either:
+ *   - it's judged too laborious to fill n static structures with the limit
+ *     when a simple if statement does the job,
+ *   - run-time checks (eg read fuse/strap registers) are needed.
+ *
+ * This function needs to be called:
+ *   - after the MMIO has been setup as we are reading registers,
+ *   - after the PCH has been detected,
+ *   - before the first usage of the fields it can tweak.
+ */
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       enum pipe pipe;
+
+       /*
+        * Skylake and Broxton currently don't expose the topmost plane as its
+        * use is exclusive with the legacy cursor and we only want to expose
+        * one of those, not both. Until we can safely expose the topmost plane
+        * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+        * we don't expose the topmost plane at all to prevent ABI breakage
+        * down the line.
+        */
+       if (IS_BROXTON(dev_priv)) {
+               info->num_sprites[PIPE_A] = 2;
+               info->num_sprites[PIPE_B] = 2;
+               info->num_sprites[PIPE_C] = 1;
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               for_each_pipe(dev_priv, pipe)
+                       info->num_sprites[pipe] = 2;
+       else
+               for_each_pipe(dev_priv, pipe)
+                       info->num_sprites[pipe] = 1;
+
+       if (i915.disable_display) {
+               DRM_INFO("Display disabled (module parameter)\n");
+               info->num_pipes = 0;
+       } else if (info->num_pipes > 0 &&
+                  (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
+                  HAS_PCH_SPLIT(dev_priv)) {
+               u32 fuse_strap = I915_READ(FUSE_STRAP);
+               u32 sfuse_strap = I915_READ(SFUSE_STRAP);
+
+               /*
+                * SFUSE_STRAP is supposed to have a bit signalling the display
+                * is fused off. Unfortunately it seems that, at least in
+                * certain cases, fused off display means that PCH display
+                * reads don't land anywhere. In that case, we read 0s.
+                *
+                * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
+                * should be set when taking over after the firmware.
+                */
+               if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
+                   sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
+                   (dev_priv->pch_type == PCH_CPT &&
+                    !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
+                       DRM_INFO("Display fused off, disabling\n");
+                       info->num_pipes = 0;
+               } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
+                       DRM_INFO("PipeC fused off\n");
+                       info->num_pipes -= 1;
+               }
+       } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
+               u32 dfsm = I915_READ(SKL_DFSM);
+               u8 disabled_mask = 0;
+               bool invalid;
+               int num_bits;
+
+               if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
+                       disabled_mask |= BIT(PIPE_A);
+               if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
+                       disabled_mask |= BIT(PIPE_B);
+               if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
+                       disabled_mask |= BIT(PIPE_C);
+
+               num_bits = hweight8(disabled_mask);
+
+               switch (disabled_mask) {
+               case BIT(PIPE_A):
+               case BIT(PIPE_B):
+               case BIT(PIPE_A) | BIT(PIPE_B):
+               case BIT(PIPE_A) | BIT(PIPE_C):
+                       invalid = true;
+                       break;
+               default:
+                       invalid = false;
+               }
+
+               if (num_bits > info->num_pipes || invalid)
+                       DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
+                                 disabled_mask);
+               else
+                       info->num_pipes -= num_bits;
+       }
+
+       /* Initialize slice/subslice/EU info */
+       if (IS_CHERRYVIEW(dev_priv))
+               cherryview_sseu_info_init(dev_priv);
+       else if (IS_BROADWELL(dev_priv))
+               broadwell_sseu_info_init(dev_priv);
+       else if (INTEL_INFO(dev_priv)->gen >= 9)
+               gen9_sseu_info_init(dev_priv);
+
+       info->has_snoop = !info->has_llc;
+
+       /* Snooping is broken on BXT A stepping. */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+               info->has_snoop = false;
+
+       DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
+       DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
+       DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
+       DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
+       DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+       DRM_DEBUG_DRIVER("has slice power gating: %s\n",
+                        info->has_slice_pg ? "y" : "n");
+       DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
+                        info->has_subslice_pg ? "y" : "n");
+       DRM_DEBUG_DRIVER("has EU power gating: %s\n",
+                        info->has_eu_pg ? "y" : "n");
+}
index 0b2cd669ac05e6a4e1b64a8f7a57f9fbfff4c471..be3b2cab2640e5c83232cf841b3b9ae70459b742 100644 (file)
@@ -530,52 +530,6 @@ needs_modeset(struct drm_crtc_state *state)
        return drm_atomic_crtc_needs_modeset(state);
 }
 
-/**
- * Returns whether any output on the specified pipe is of the specified type
- */
-bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct intel_encoder *encoder;
-
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
-               if (encoder->type == type)
-                       return true;
-
-       return false;
-}
-
-/**
- * Returns whether any output on the specified pipe will have the specified
- * type after a staged modeset is complete, i.e., the same as
- * intel_pipe_has_type() but looking at encoder->new_crtc instead of
- * encoder->crtc.
- */
-static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
-                                     int type)
-{
-       struct drm_atomic_state *state = crtc_state->base.state;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       struct intel_encoder *encoder;
-       int i, num_connectors = 0;
-
-       for_each_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != crtc_state->base.crtc)
-                       continue;
-
-               num_connectors++;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-               if (encoder->type == type)
-                       return true;
-       }
-
-       WARN_ON(num_connectors == 0);
-
-       return false;
-}
-
 /*
  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -688,7 +642,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
 {
        struct drm_device *dev = crtc_state->base.crtc->dev;
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                /*
                 * For LVDS just rely on its current settings for dual-channel.
                 * We haven't figured out how to reliably set up different
@@ -1080,7 +1034,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
 
 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t reg = PIPEDSL(pipe);
        u32 line1, line2;
        u32 line_mask;
@@ -1116,7 +1070,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
        enum pipe pipe = crtc->pipe;
 
@@ -1124,8 +1078,9 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
                i915_reg_t reg = PIPECONF(cpu_transcoder);
 
                /* Wait for the Pipe State to go off */
-               if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
-                            100))
+               if (intel_wait_for_register(dev_priv,
+                                           reg, I965_PIPECONF_ACTIVE, 0,
+                                           100))
                        WARN(1, "pipe_off wait timed out\n");
        } else {
                /* Wait for the display line to settle */
@@ -1234,7 +1189,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
                           enum pipe pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        i915_reg_t pp_reg;
        u32 val;
        enum pipe panel_pipe = PIPE_A;
@@ -1276,7 +1231,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
 static void assert_cursor(struct drm_i915_private *dev_priv,
                          enum pipe pipe, bool state)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        bool cur_state;
 
        if (IS_845G(dev) || IS_I865G(dev))
@@ -1338,7 +1293,7 @@ static void assert_plane(struct drm_i915_private *dev_priv,
 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
                                   enum pipe pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        int i;
 
        /* Primary planes are fixed to pipes on gen4+ */
@@ -1364,7 +1319,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
                                    enum pipe pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        int sprite;
 
        if (INTEL_INFO(dev)->gen >= 9) {
@@ -1544,7 +1499,11 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
        POSTING_READ(DPLL(pipe));
        udelay(150);
 
-       if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+       if (intel_wait_for_register(dev_priv,
+                                   DPLL(pipe),
+                                   DPLL_LOCK_VLV,
+                                   DPLL_LOCK_VLV,
+                                   1))
                DRM_ERROR("DPLL %d failed to lock\n", pipe);
 }
 
@@ -1593,7 +1552,9 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
        I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
 
        /* Check PLL is locked */
-       if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+       if (intel_wait_for_register(dev_priv,
+                                   DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
+                                   1))
                DRM_ERROR("PLL %d failed to lock\n", pipe);
 }
 
@@ -1639,9 +1600,10 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
        struct intel_crtc *crtc;
        int count = 0;
 
-       for_each_intel_crtc(dev, crtc)
+       for_each_intel_crtc(dev, crtc) {
                count += crtc->base.state->active &&
-                       intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
+                       intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
+       }
 
        return count;
 }
@@ -1649,7 +1611,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
 static void i9xx_enable_pll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t reg = DPLL(crtc->pipe);
        u32 dpll = crtc->config->dpll_hw_state.dpll;
 
@@ -1721,12 +1683,12 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
 static void i9xx_disable_pll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = crtc->pipe;
 
        /* Disable DVO 2x clock on both PLLs if necessary */
        if (IS_I830(dev) &&
-           intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
+           intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
            !intel_num_dvo_pipes(dev)) {
                I915_WRITE(DPLL(PIPE_B),
                           I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1813,7 +1775,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
                BUG();
        }
 
-       if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
+       if (intel_wait_for_register(dev_priv,
+                                   dpll_reg, port_mask, expected_mask,
+                                   1000))
                WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
                     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
 }
@@ -1821,7 +1785,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
                                           enum pipe pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        i915_reg_t reg;
@@ -1854,7 +1818,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
                 * here for both 8bpc and 12bpc.
                 */
                val &= ~PIPECONF_BPC_MASK;
-               if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
+               if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
                        val |= PIPECONF_8BPC;
                else
                        val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1863,7 +1827,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
        val &= ~TRANS_INTERLACE_MASK;
        if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
                if (HAS_PCH_IBX(dev_priv) &&
-                   intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+                   intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
                        val |= TRANS_LEGACY_INTERLACED_ILK;
                else
                        val |= TRANS_INTERLACED;
@@ -1871,7 +1835,9 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
                val |= TRANS_PROGRESSIVE;
 
        I915_WRITE(reg, val | TRANS_ENABLE);
-       if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+       if (intel_wait_for_register(dev_priv,
+                                   reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
+                                   100))
                DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
 }
 
@@ -1899,14 +1865,18 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
                val |= TRANS_PROGRESSIVE;
 
        I915_WRITE(LPT_TRANSCONF, val);
-       if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
+       if (intel_wait_for_register(dev_priv,
+                                   LPT_TRANSCONF,
+                                   TRANS_STATE_ENABLE,
+                                   TRANS_STATE_ENABLE,
+                                   100))
                DRM_ERROR("Failed to enable PCH transcoder\n");
 }
 
 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
                                            enum pipe pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        i915_reg_t reg;
        uint32_t val;
 
@@ -1922,7 +1892,9 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
        val &= ~TRANS_ENABLE;
        I915_WRITE(reg, val);
        /* wait for PCH transcoder off, transcoder state */
-       if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+       if (intel_wait_for_register(dev_priv,
+                                   reg, TRANS_STATE_ENABLE, 0,
+                                   50))
                DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
 
        if (HAS_PCH_CPT(dev)) {
@@ -1942,7 +1914,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
        val &= ~TRANS_ENABLE;
        I915_WRITE(LPT_TRANSCONF, val);
        /* wait for PCH transcoder off, transcoder state */
-       if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
+       if (intel_wait_for_register(dev_priv,
+                                   LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
+                                   50))
                DRM_ERROR("Failed to disable PCH transcoder\n");
 
        /* Workaround: clear timing override bit. */
@@ -1961,7 +1935,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
 static void intel_enable_pipe(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = crtc->pipe;
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
        enum pipe pch_transcoder;
@@ -1985,7 +1959,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
         * need the check.
         */
        if (HAS_GMCH_DISPLAY(dev_priv))
-               if (crtc->config->has_dsi_encoder)
+               if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
                        assert_pll_enabled(dev_priv, pipe);
@@ -2034,7 +2008,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
  */
 static void intel_disable_pipe(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
        enum pipe pipe = crtc->pipe;
        i915_reg_t reg;
@@ -2072,15 +2046,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
                intel_wait_for_pipe_off(crtc);
 }
 
-static bool need_vtd_wa(struct drm_device *dev)
-{
-#ifdef CONFIG_INTEL_IOMMU
-       if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
-               return true;
-#endif
-       return false;
-}
-
 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
 {
        return IS_GEN2(dev_priv) ? 2048 : 4096;
@@ -2245,7 +2210,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
                           unsigned int rotation)
 {
        struct drm_device *dev = fb->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct i915_ggtt_view view;
        u32 alignment;
@@ -2262,7 +2227,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
         * we should always have valid PTE following the scanout preventing
         * the VT-d warning.
         */
-       if (need_vtd_wa(dev) && alignment < 256 * 1024)
+       if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
                alignment = 256 * 1024;
 
        /*
@@ -2547,7 +2512,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
                             struct intel_initial_plane_config *plane_config)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *c;
        struct intel_crtc *i;
        struct drm_i915_gem_object *obj;
@@ -2643,7 +2608,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
                                      const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = primary->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -2756,7 +2721,7 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary,
                                       struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int plane = intel_crtc->plane;
 
@@ -2773,7 +2738,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
                                          const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = primary->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -2901,7 +2866,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
        I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
@@ -3011,7 +2976,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
                                         const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -3095,7 +3060,7 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
                                          struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = to_intel_crtc(crtc)->pipe;
 
        I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -3118,7 +3083,7 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
 {
        struct intel_crtc *crtc;
 
-       for_each_intel_crtc(dev_priv->dev, crtc)
+       for_each_intel_crtc(&dev_priv->drm, crtc)
                intel_finish_page_flip_cs(dev_priv, crtc->pipe);
 }
 
@@ -3152,12 +3117,12 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
        if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
                return;
 
-       drm_modeset_lock_all(dev_priv->dev);
+       drm_modeset_lock_all(&dev_priv->drm);
        /*
         * Disabling the crtcs gracefully seems nicer. Also the
         * g33 docs say we should at least disable all the planes.
         */
-       intel_display_suspend(dev_priv->dev);
+       intel_display_suspend(&dev_priv->drm);
 }
 
 void intel_finish_reset(struct drm_i915_private *dev_priv)
@@ -3184,7 +3149,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
                 * FIXME: Atomic will make this obsolete since we won't schedule
                 * CS-based flips (which might get lost in gpu resets) any more.
                 */
-               intel_update_primary_planes(dev_priv->dev);
+               intel_update_primary_planes(&dev_priv->drm);
                return;
        }
 
@@ -3195,18 +3160,18 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
        intel_runtime_pm_disable_interrupts(dev_priv);
        intel_runtime_pm_enable_interrupts(dev_priv);
 
-       intel_modeset_init_hw(dev_priv->dev);
+       intel_modeset_init_hw(&dev_priv->drm);
 
        spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display.hpd_irq_setup)
                dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       intel_display_resume(dev_priv->dev);
+       intel_display_resume(&dev_priv->drm);
 
        intel_hpd_init(dev_priv);
 
-       drm_modeset_unlock_all(dev_priv->dev);
+       drm_modeset_unlock_all(&dev_priv->drm);
 }
 
 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -3231,7 +3196,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
                                     struct intel_crtc_state *old_crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_state *pipe_config =
                to_intel_crtc_state(crtc->base.state);
 
@@ -3272,7 +3237,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
 static void intel_fdi_normal_train(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
@@ -3315,7 +3280,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
@@ -3416,7 +3381,7 @@ static const int snb_b_fdi_train_param[] = {
 static void gen6_fdi_link_train(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
@@ -3549,7 +3514,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
@@ -3668,7 +3633,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
        u32 temp;
@@ -3705,7 +3670,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
        u32 temp;
@@ -3735,7 +3700,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
 static void ironlake_fdi_disable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
@@ -3831,7 +3796,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        long ret;
 
        WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
@@ -3994,7 +3959,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
                                                enum pipe pch_transcoder)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
 
        I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
@@ -4016,7 +3981,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
 
 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t temp;
 
        temp = I915_READ(SOUTH_CHICKEN1);
@@ -4066,7 +4031,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
 
        for_each_encoder_on_crtc(dev, crtc, encoder) {
-               if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+               if (encoder->type == INTEL_OUTPUT_DP ||
                    encoder->type == INTEL_OUTPUT_EDP)
                        return enc_to_dig_port(&encoder->base)->port;
        }
@@ -4085,7 +4050,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
 static void ironlake_pch_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        u32 temp;
@@ -4135,7 +4100,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
        intel_fdi_normal_train(crtc);
 
        /* For PCH DP, enable TRANS_DP_CTL */
-       if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
+       if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) {
                const struct drm_display_mode *adjusted_mode =
                        &intel_crtc->config->base.adjusted_mode;
                u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
@@ -4175,7 +4140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
 static void lpt_pch_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
@@ -4191,7 +4156,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
 
 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t dslreg = PIPEDSL(pipe);
        u32 temp;
 
@@ -4369,7 +4334,7 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
 static void skylake_pfit_enable(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = crtc->pipe;
        struct intel_crtc_scaler_state *scaler_state =
                &crtc->config->scaler_state;
@@ -4397,7 +4362,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
 static void ironlake_pfit_enable(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = crtc->pipe;
 
        if (crtc->config->pch_pfit.enabled) {
@@ -4418,7 +4383,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
 void hsw_enable_ips(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!crtc->config->ips_enabled)
                return;
@@ -4446,7 +4411,9 @@ void hsw_enable_ips(struct intel_crtc *crtc)
                 * and don't wait for vblanks until the end of crtc_enable, then
                 * the HW state readout code will complain that the expected
                 * IPS_CTL value is not the one we read. */
-               if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
+               if (intel_wait_for_register(dev_priv,
+                                           IPS_CTL, IPS_ENABLE, IPS_ENABLE,
+                                           50))
                        DRM_ERROR("Timed out waiting for IPS enable\n");
        }
 }
@@ -4454,7 +4421,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
 void hsw_disable_ips(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!crtc->config->ips_enabled)
                return;
@@ -4465,7 +4432,9 @@ void hsw_disable_ips(struct intel_crtc *crtc)
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
                mutex_unlock(&dev_priv->rps.hw_lock);
                /* wait for pcode to finish disabling IPS, which may take up to 42ms */
-               if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
+               if (intel_wait_for_register(dev_priv,
+                                           IPS_CTL, IPS_ENABLE, 0,
+                                           42))
                        DRM_ERROR("Timed out waiting for IPS disable\n");
        } else {
                I915_WRITE(IPS_CTL, 0);
@@ -4480,7 +4449,7 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
 {
        if (intel_crtc->overlay) {
                struct drm_device *dev = intel_crtc->base.dev;
-               struct drm_i915_private *dev_priv = dev->dev_private;
+               struct drm_i915_private *dev_priv = to_i915(dev);
 
                mutex_lock(&dev->struct_mutex);
                dev_priv->mm.interruptible = false;
@@ -4508,7 +4477,7 @@ static void
 intel_post_enable_primary(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
 
@@ -4540,7 +4509,7 @@ static void
 intel_pre_disable_primary(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
 
@@ -4567,7 +4536,7 @@ static void
 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
 
@@ -4626,7 +4595,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_state *pipe_config =
                to_intel_crtc_state(crtc->base.state);
        struct drm_atomic_state *old_state = old_crtc_state->base.state;
@@ -4729,7 +4698,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
@@ -4757,7 +4726,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        if (intel_crtc->config->has_pch_encoder)
                intel_prepare_shared_dpll(intel_crtc);
 
-       if (intel_crtc->config->has_dp_encoder)
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                intel_dp_set_m_n(intel_crtc, M1_N1);
 
        intel_set_pipe_timings(intel_crtc);
@@ -4826,7 +4795,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
 static void haswell_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe, hsw_workaround_pipe;
@@ -4848,10 +4817,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        if (intel_crtc->config->shared_dpll)
                intel_enable_shared_dpll(intel_crtc);
 
-       if (intel_crtc->config->has_dp_encoder)
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                intel_dp_set_m_n(intel_crtc, M1_N1);
 
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_set_pipe_timings(intel_crtc);
 
        intel_set_pipe_src_size(intel_crtc);
@@ -4867,7 +4836,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
                                     &intel_crtc->config->fdi_m_n, NULL);
        }
 
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                haswell_set_pipeconf(crtc);
 
        haswell_set_pipemisc(crtc);
@@ -4889,7 +4858,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        if (intel_crtc->config->has_pch_encoder)
                dev_priv->display.fdi_link_train(crtc);
 
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_enable_pipe_clock(intel_crtc);
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4904,7 +4873,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        intel_color_load_luts(&pipe_config->base);
 
        intel_ddi_set_pipe_settings(crtc);
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_enable_transcoder_func(crtc);
 
        if (dev_priv->display.initial_watermarks != NULL)
@@ -4913,7 +4882,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
                intel_update_watermarks(crtc);
 
        /* XXX: Do the pipe assertions at the right place for BXT DSI. */
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_enable_pipe(intel_crtc);
 
        if (intel_crtc->config->has_pch_encoder)
@@ -4950,7 +4919,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = crtc->pipe;
 
        /* To avoid upsetting the power well on haswell only disable the pfit if
@@ -4965,7 +4934,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
 static void ironlake_crtc_disable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
@@ -5028,7 +4997,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 static void haswell_crtc_disable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -5046,13 +5015,13 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        assert_vblank_disabled(crtc);
 
        /* XXX: Do the pipe assertions at the right place for BXT DSI. */
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_disable_pipe(intel_crtc);
 
        if (intel_crtc->config->dp_encoder_is_mst)
                intel_ddi_set_vc_payload_alloc(crtc, false);
 
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -5060,7 +5029,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        else
                ironlake_pfit_disable(intel_crtc, false);
 
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_disable_pipe_clock(intel_crtc);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -5080,7 +5049,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_state *pipe_config = crtc->config;
 
        if (!pipe_config->gmch_pfit.control)
@@ -5150,7 +5119,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
        case INTEL_OUTPUT_UNKNOWN:
                /* Only DDI platforms should ever use this output type */
                WARN_ON_ONCE(!HAS_DDI(dev));
-       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_HDMI:
        case INTEL_OUTPUT_EDP:
                intel_dig_port = enc_to_dig_port(&intel_encoder->base);
@@ -5184,7 +5153,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
                 * run the DP detection too.
                 */
                WARN_ON_ONCE(!HAS_DDI(dev));
-       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_EDP:
                intel_dig_port = enc_to_dig_port(&intel_encoder->base);
                return port_to_aux_power_domain(intel_dig_port->port);
@@ -5232,7 +5201,7 @@ static unsigned long
 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
                               struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum intel_display_power_domain domain;
        unsigned long domains, new_domains, old_domains;
@@ -5277,7 +5246,7 @@ static int skl_calc_cdclk(int max_pixclk, int vco);
 
 static void intel_update_max_cdclk(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
                u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
@@ -5338,7 +5307,7 @@ static void intel_update_max_cdclk(struct drm_device *dev)
 
 static void intel_update_cdclk(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
 
@@ -5395,7 +5364,9 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
        I915_WRITE(BXT_DE_PLL_ENABLE, 0);
 
        /* Timeout 200us */
-       if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
+       if (intel_wait_for_register(dev_priv,
+                                   BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
+                                   1))
                DRM_ERROR("timeout waiting for DE PLL unlock\n");
 
        dev_priv->cdclk_pll.vco = 0;
@@ -5414,7 +5385,11 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
        I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
 
        /* Timeout 200us */
-       if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
+       if (intel_wait_for_register(dev_priv,
+                                   BXT_DE_PLL_ENABLE,
+                                   BXT_DE_PLL_LOCK,
+                                   BXT_DE_PLL_LOCK,
+                                   1))
                DRM_ERROR("timeout waiting for DE PLL lock\n");
 
        dev_priv->cdclk_pll.vco = vco;
@@ -5495,14 +5470,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
                return;
        }
 
-       intel_update_cdclk(dev_priv->dev);
+       intel_update_cdclk(&dev_priv->drm);
 }
 
 static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
 {
        u32 cdctl, expected;
 
-       intel_update_cdclk(dev_priv->dev);
+       intel_update_cdclk(&dev_priv->drm);
 
        if (dev_priv->cdclk_pll.vco == 0 ||
            dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -5635,7 +5610,7 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
        dev_priv->skl_preferred_vco_freq = vco;
 
        if (changed)
-               intel_update_max_cdclk(dev_priv->dev);
+               intel_update_max_cdclk(&dev_priv->drm);
 }
 
 static void
@@ -5677,7 +5652,9 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
 
        I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
 
-       if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
+       if (intel_wait_for_register(dev_priv,
+                                   LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
+                                   5))
                DRM_ERROR("DPLL0 not locked\n");
 
        dev_priv->cdclk_pll.vco = vco;
@@ -5690,7 +5667,9 @@ static void
 skl_dpll0_disable(struct drm_i915_private *dev_priv)
 {
        I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
-       if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
+       if (intel_wait_for_register(dev_priv,
+                                  LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
+                                  1))
                DRM_ERROR("Couldn't disable DPLL0\n");
 
        dev_priv->cdclk_pll.vco = 0;
@@ -5725,7 +5704,7 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
 
 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        u32 freq_select, pcu_ack;
 
        WARN_ON((cdclk == 24000) != (vco == 0));
@@ -5823,7 +5802,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
        if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
                goto sanitize;
 
-       intel_update_cdclk(dev_priv->dev);
+       intel_update_cdclk(&dev_priv->drm);
        /* Is PLL enabled and locked ? */
        if (dev_priv->cdclk_pll.vco == 0 ||
            dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -5854,7 +5833,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
 /* Adjust CDclk dividers to allow high res or save power if possible */
 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val, cmd;
 
        WARN_ON(dev_priv->display.get_display_clock_speed(dev)
@@ -5919,7 +5898,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 
 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val, cmd;
 
        WARN_ON(dev_priv->display.get_display_clock_speed(dev)
@@ -6007,7 +5986,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
                                 struct drm_atomic_state *state)
 {
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
        unsigned max_pixclk = 0, i;
@@ -6034,7 +6013,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int max_pixclk = intel_mode_max_pixclk(dev, state);
        struct intel_atomic_state *intel_state =
                to_intel_atomic_state(state);
@@ -6102,7 +6081,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
        struct drm_device *dev = old_state->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_atomic_state *old_intel_state =
                to_intel_atomic_state(old_state);
        unsigned req_cdclk = old_intel_state->dev_cdclk;
@@ -6141,14 +6120,14 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        if (WARN_ON(intel_crtc->active))
                return;
 
-       if (intel_crtc->config->has_dp_encoder)
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                intel_dp_set_m_n(intel_crtc, M1_N1);
 
        intel_set_pipe_timings(intel_crtc);
        intel_set_pipe_src_size(intel_crtc);
 
        if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
-               struct drm_i915_private *dev_priv = dev->dev_private;
+               struct drm_i915_private *dev_priv = to_i915(dev);
 
                I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
                I915_WRITE(CHV_CANVAS(pipe), 0);
@@ -6193,7 +6172,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
        I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
@@ -6214,7 +6193,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 
        i9xx_set_pll_dividers(intel_crtc);
 
-       if (intel_crtc->config->has_dp_encoder)
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                intel_dp_set_m_n(intel_crtc, M1_N1);
 
        intel_set_pipe_timings(intel_crtc);
@@ -6250,7 +6229,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 static void i9xx_pfit_disable(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!crtc->config->gmch_pfit.control)
                return;
@@ -6265,7 +6244,7 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
 static void i9xx_crtc_disable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
@@ -6291,7 +6270,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
                if (encoder->post_disable)
                        encoder->post_disable(encoder);
 
-       if (!intel_crtc->config->has_dsi_encoder) {
+       if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
                if (IS_CHERRYVIEW(dev))
                        chv_disable_pll(dev_priv, pipe);
                else if (IS_VALLEYVIEW(dev))
@@ -6609,7 +6588,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
                                   struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        pipe_config->ips_enabled = i915.enable_ips &&
                hsw_crtc_supports_ips(crtc) &&
@@ -6629,7 +6608,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
                                     struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        int clock_limit = dev_priv->max_dotclk_freq;
 
@@ -6660,7 +6639,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
         * - LVDS dual channel mode
         * - Double wide pipe
         */
-       if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
+       if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
             intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
                pipe_config->pipe_src_w &= ~1;
 
@@ -6779,7 +6758,7 @@ static int broxton_get_display_clock_speed(struct drm_device *dev)
 
 static int broadwell_get_display_clock_speed(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t lcpll = I915_READ(LCPLL_CTL);
        uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
@@ -6799,7 +6778,7 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev)
 
 static int haswell_get_display_clock_speed(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t lcpll = I915_READ(LCPLL_CTL);
        uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
@@ -6933,7 +6912,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
 
 static unsigned int intel_hpll_vco(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        static const unsigned int blb_vco[8] = {
                [0] = 3200000,
                [1] = 4000000,
@@ -7171,7 +7150,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
        crtc_state->dpll_hw_state.fp0 = fp;
 
        crtc->lowfreq_avail = false;
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
            reduced_clock) {
                crtc_state->dpll_hw_state.fp1 = fp2;
                crtc->lowfreq_avail = true;
@@ -7213,7 +7192,7 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
                                         struct intel_link_m_n *m_n)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = crtc->pipe;
 
        I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
@@ -7227,7 +7206,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
                                         struct intel_link_m_n *m2_n2)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = crtc->pipe;
        enum transcoder transcoder = crtc->config->cpu_transcoder;
 
@@ -7290,7 +7269,7 @@ static void vlv_compute_dpll(struct intel_crtc *crtc,
                pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
        /* DPLL not used with DSI, but still need the rest set up */
-       if (!pipe_config->has_dsi_encoder)
+       if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
                pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
                        DPLL_EXT_BUFFER_ENABLE_VLV;
 
@@ -7307,7 +7286,7 @@ static void chv_compute_dpll(struct intel_crtc *crtc,
                pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
        /* DPLL not used with DSI, but still need the rest set up */
-       if (!pipe_config->has_dsi_encoder)
+       if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
                pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
 
        pipe_config->dpll_hw_state.dpll_md =
@@ -7318,7 +7297,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
                            const struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = crtc->pipe;
        u32 mdiv;
        u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -7377,15 +7356,15 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
 
        /* Set HBR and RBR LPF coefficients */
        if (pipe_config->port_clock == 162000 ||
-           intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
-           intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+           intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
+           intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
                vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x009f0003);
        else
                vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x00d0000f);
 
-       if (pipe_config->has_dp_encoder) {
+       if (intel_crtc_has_dp_encoder(pipe_config)) {
                /* Use SSC source */
                if (pipe == PIPE_A)
                        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -7405,8 +7384,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
 
        coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
        coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-           intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+       if (intel_crtc_has_dp_encoder(crtc->config))
                coreclk |= 0x01000000;
        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 
@@ -7418,7 +7396,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
                            const struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = crtc->pipe;
        enum dpio_channel port = vlv_pipe_to_channel(pipe);
        u32 loopfilter, tribuf_calcntr;
@@ -7580,19 +7558,15 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
                              struct dpll *reduced_clock)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dpll;
-       bool is_sdvo;
        struct dpll *clock = &crtc_state->dpll;
 
        i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
 
-       is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
-               intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
-
        dpll = DPLL_VGA_MODE_DIS;
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
                dpll |= DPLLB_MODE_LVDS;
        else
                dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -7602,10 +7576,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
                        << SDVO_MULTIPLIER_SHIFT_HIRES;
        }
 
-       if (is_sdvo)
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
                dpll |= DPLL_SDVO_HIGH_SPEED;
 
-       if (crtc_state->has_dp_encoder)
+       if (intel_crtc_has_dp_encoder(crtc_state))
                dpll |= DPLL_SDVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
@@ -7635,7 +7610,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
 
        if (crtc_state->sdvo_tv_clock)
                dpll |= PLL_REF_INPUT_TVCLKINBC;
-       else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+       else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
                 intel_panel_use_ssc(dev_priv))
                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
        else
@@ -7656,7 +7631,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
                              struct dpll *reduced_clock)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dpll;
        struct dpll *clock = &crtc_state->dpll;
 
@@ -7664,7 +7639,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
 
        dpll = DPLL_VGA_MODE_DIS;
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
        } else {
                if (clock->p1 == 2)
@@ -7675,10 +7650,10 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
                        dpll |= PLL_P2_DIVIDE_BY_4;
        }
 
-       if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
+       if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
                dpll |= DPLL_DVO_2X_MODE;
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
            intel_panel_use_ssc(dev_priv))
                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
        else
@@ -7691,7 +7666,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = intel_crtc->pipe;
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -7708,7 +7683,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
                crtc_vtotal -= 1;
                crtc_vblank_end -= 1;
 
-               if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+               if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
                        vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
                else
                        vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7753,7 +7728,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = intel_crtc->pipe;
 
        /* pipesrc controls the size that is scaled from, which should
@@ -7768,7 +7743,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
                                   struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
        uint32_t tmp;
 
@@ -7803,7 +7778,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
                                    struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tmp;
 
        tmp = I915_READ(PIPESRC(crtc->pipe));
@@ -7841,7 +7816,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t pipeconf;
 
        pipeconf = 0;
@@ -7887,7 +7862,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 
        if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
                if (INTEL_INFO(dev)->gen < 4 ||
-                   intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+                   intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
                        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
                else
                        pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7906,21 +7881,21 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
                                   struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const struct intel_limit *limit;
        int refclk = 48000;
 
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                if (intel_panel_use_ssc(dev_priv)) {
                        refclk = dev_priv->vbt.lvds_ssc_freq;
                        DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
                }
 
                limit = &intel_limits_i8xx_lvds;
-       } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
+       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
                limit = &intel_limits_i8xx_dvo;
        } else {
                limit = &intel_limits_i8xx_dac;
@@ -7942,14 +7917,14 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const struct intel_limit *limit;
        int refclk = 96000;
 
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                if (intel_panel_use_ssc(dev_priv)) {
                        refclk = dev_priv->vbt.lvds_ssc_freq;
                        DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7959,10 +7934,10 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
                        limit = &intel_limits_g4x_dual_channel_lvds;
                else
                        limit = &intel_limits_g4x_single_channel_lvds;
-       } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
-                  intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+                  intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
                limit = &intel_limits_g4x_hdmi;
-       } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
                limit = &intel_limits_g4x_sdvo;
        } else {
                /* The option is for other outputs */
@@ -7985,14 +7960,14 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const struct intel_limit *limit;
        int refclk = 96000;
 
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                if (intel_panel_use_ssc(dev_priv)) {
                        refclk = dev_priv->vbt.lvds_ssc_freq;
                        DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -8019,14 +7994,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
                                   struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const struct intel_limit *limit;
        int refclk = 96000;
 
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                if (intel_panel_use_ssc(dev_priv)) {
                        refclk = dev_priv->vbt.lvds_ssc_freq;
                        DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -8095,7 +8070,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
                                 struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp;
 
        if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
@@ -8122,7 +8097,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
                               struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = pipe_config->cpu_transcoder;
        struct dpll clock;
        u32 mdiv;
@@ -8150,7 +8125,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
                              struct intel_initial_plane_config *plane_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val, base, offset;
        int pipe = crtc->pipe, plane = crtc->plane;
        int fourcc, pixel_format;
@@ -8218,7 +8193,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
                               struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = pipe_config->cpu_transcoder;
        enum dpio_channel port = vlv_pipe_to_channel(pipe);
        struct dpll clock;
@@ -8252,7 +8227,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
                                 struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        uint32_t tmp;
        bool ret;
@@ -8363,7 +8338,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 
 static void ironlake_init_pch_refclk(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        int i;
        u32 val, final;
@@ -8537,16 +8512,16 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
        tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
        I915_WRITE(SOUTH_CHICKEN2, tmp);
 
-       if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
-                              FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+       if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
+                       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
                DRM_ERROR("FDI mPHY reset assert timeout\n");
 
        tmp = I915_READ(SOUTH_CHICKEN2);
        tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
        I915_WRITE(SOUTH_CHICKEN2, tmp);
 
-       if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
-                               FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+       if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
+                        FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
                DRM_ERROR("FDI mPHY reset de-assert timeout\n");
 }
 
@@ -8634,7 +8609,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
                                 bool with_fdi)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t reg, tmp;
 
        if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
@@ -8673,7 +8648,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
 /* Sequence to disable CLKOUT_DP */
 static void lpt_disable_clkout_dp(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t reg, tmp;
 
        mutex_lock(&dev_priv->sb_lock);
@@ -8794,7 +8769,7 @@ void intel_init_pch_refclk(struct drm_device *dev)
 
 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        uint32_t val;
@@ -8836,7 +8811,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
 
 static void haswell_set_pipeconf(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        u32 val = 0;
@@ -8855,7 +8830,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
 
 static void haswell_set_pipemisc(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
        if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
@@ -8908,37 +8883,13 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
 {
        struct drm_crtc *crtc = &intel_crtc->base;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_atomic_state *state = crtc_state->base.state;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       struct intel_encoder *encoder;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dpll, fp, fp2;
-       int factor, i;
-       bool is_lvds = false, is_sdvo = false;
-
-       for_each_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != crtc_state->base.crtc)
-                       continue;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-
-               switch (encoder->type) {
-               case INTEL_OUTPUT_LVDS:
-                       is_lvds = true;
-                       break;
-               case INTEL_OUTPUT_SDVO:
-               case INTEL_OUTPUT_HDMI:
-                       is_sdvo = true;
-                       break;
-               default:
-                       break;
-               }
-       }
+       int factor;
 
        /* Enable autotuning of the PLL clock (if permissible) */
        factor = 21;
-       if (is_lvds) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                if ((intel_panel_use_ssc(dev_priv) &&
                     dev_priv->vbt.lvds_ssc_freq == 100000) ||
                    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
@@ -8962,7 +8913,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
 
        dpll = 0;
 
-       if (is_lvds)
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
                dpll |= DPLLB_MODE_LVDS;
        else
                dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -8970,9 +8921,11 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
        dpll |= (crtc_state->pixel_multiplier - 1)
                << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
 
-       if (is_sdvo)
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
                dpll |= DPLL_SDVO_HIGH_SPEED;
-       if (crtc_state->has_dp_encoder)
+
+       if (intel_crtc_has_dp_encoder(crtc_state))
                dpll |= DPLL_SDVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
@@ -8995,7 +8948,8 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
                break;
        }
 
-       if (is_lvds && intel_panel_use_ssc(dev_priv))
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+           intel_panel_use_ssc(dev_priv))
                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
        else
                dpll |= PLL_REF_INPUT_DREFCLK;
@@ -9011,7 +8965,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
                                       struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct dpll reduced_clock;
        bool has_reduced_clock = false;
        struct intel_shared_dpll *pll;
@@ -9027,7 +8981,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
        if (!crtc_state->has_pch_encoder)
                return 0;
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                if (intel_panel_use_ssc(dev_priv)) {
                        DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
                                      dev_priv->vbt.lvds_ssc_freq);
@@ -9066,7 +9020,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
                return -EINVAL;
        }
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
            has_reduced_clock)
                crtc->lowfreq_avail = true;
 
@@ -9077,7 +9031,7 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
                                         struct intel_link_m_n *m_n)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = crtc->pipe;
 
        m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
@@ -9095,7 +9049,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
                                         struct intel_link_m_n *m2_n2)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = crtc->pipe;
 
        if (INTEL_INFO(dev)->gen >= 5) {
@@ -9153,7 +9107,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
                                    struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
        uint32_t ps_ctrl = 0;
        int id = -1;
@@ -9184,7 +9138,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
                                 struct intel_initial_plane_config *plane_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val, base, offset, stride_mult, tiling;
        int pipe = crtc->pipe;
        int fourcc, pixel_format;
@@ -9267,7 +9221,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
                                     struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp;
 
        tmp = I915_READ(PF_CTL(crtc->pipe));
@@ -9292,7 +9246,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
                                  struct intel_initial_plane_config *plane_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val, base, offset;
        int pipe = crtc->pipe;
        int fourcc, pixel_format;
@@ -9360,7 +9314,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
                                     struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        uint32_t tmp;
        bool ret;
@@ -9455,7 +9409,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 
 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *crtc;
 
        for_each_intel_crtc(dev, crtc)
@@ -9489,7 +9443,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
 
 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        if (IS_HASWELL(dev))
                return I915_READ(D_COMP_HSW);
@@ -9499,7 +9453,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
 
 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        if (IS_HASWELL(dev)) {
                mutex_lock(&dev_priv->rps.hw_lock);
@@ -9534,8 +9488,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
                val |= LCPLL_CD_SOURCE_FCLK;
                I915_WRITE(LCPLL_CTL, val);
 
-               if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
-                                      LCPLL_CD_SOURCE_FCLK_DONE, 1))
+               if (wait_for_us(I915_READ(LCPLL_CTL) &
+                               LCPLL_CD_SOURCE_FCLK_DONE, 1))
                        DRM_ERROR("Switching to FCLK failed\n");
 
                val = I915_READ(LCPLL_CTL);
@@ -9545,7 +9499,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
        I915_WRITE(LCPLL_CTL, val);
        POSTING_READ(LCPLL_CTL);
 
-       if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
+       if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
                DRM_ERROR("LCPLL still locked\n");
 
        val = hsw_read_dcomp(dev_priv);
@@ -9600,7 +9554,9 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
        val &= ~LCPLL_PLL_DISABLE;
        I915_WRITE(LCPLL_CTL, val);
 
-       if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
+       if (intel_wait_for_register(dev_priv,
+                                   LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
+                                   5))
                DRM_ERROR("LCPLL not locked yet\n");
 
        if (val & LCPLL_CD_SOURCE_FCLK) {
@@ -9608,13 +9564,13 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
                val &= ~LCPLL_CD_SOURCE_FCLK;
                I915_WRITE(LCPLL_CTL, val);
 
-               if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
-                                       LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+               if (wait_for_us((I915_READ(LCPLL_CTL) &
+                                LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
                        DRM_ERROR("Switching back to LCPLL failed\n");
        }
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-       intel_update_cdclk(dev_priv->dev);
+       intel_update_cdclk(&dev_priv->drm);
 }
 
 /*
@@ -9642,7 +9598,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
  */
 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        uint32_t val;
 
        DRM_DEBUG_KMS("Enabling package C8+\n");
@@ -9659,7 +9615,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
 
 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        uint32_t val;
 
        DRM_DEBUG_KMS("Disabling package C8+\n");
@@ -9688,7 +9644,7 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
 {
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = state->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(state->dev);
        struct drm_crtc *crtc;
        struct drm_crtc_state *cstate;
        struct intel_crtc_state *crtc_state;
@@ -9724,7 +9680,7 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
 
 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t val, data;
        int ret;
 
@@ -9893,10 +9849,7 @@ static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
                                      struct intel_crtc_state *crtc_state)
 {
-       struct intel_encoder *intel_encoder =
-               intel_ddi_get_crtc_new_encoder(crtc_state);
-
-       if (intel_encoder->type != INTEL_OUTPUT_DSI) {
+       if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
                if (!intel_ddi_pll_select(crtc, crtc_state))
                        return -EINVAL;
        }
@@ -10006,7 +9959,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
                                     unsigned long *power_domain_mask)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        u32 tmp;
 
@@ -10057,14 +10010,12 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
                                         unsigned long *power_domain_mask)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        enum port port;
        enum transcoder cpu_transcoder;
        u32 tmp;
 
-       pipe_config->has_dsi_encoder = false;
-
        for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
                if (port == PORT_A)
                        cpu_transcoder = TRANSCODER_DSI_A;
@@ -10096,18 +10047,17 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
                        continue;
 
                pipe_config->cpu_transcoder = cpu_transcoder;
-               pipe_config->has_dsi_encoder = true;
                break;
        }
 
-       return pipe_config->has_dsi_encoder;
+       return transcoder_is_dsi(pipe_config->cpu_transcoder);
 }
 
 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
                                       struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_shared_dpll *pll;
        enum port port;
        uint32_t tmp;
@@ -10150,7 +10100,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
                                    struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        unsigned long power_domain_mask;
        bool active;
@@ -10164,18 +10114,16 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 
        active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
 
-       if (IS_BROXTON(dev_priv)) {
-               bxt_get_dsi_transcoder_state(crtc, pipe_config,
-                                            &power_domain_mask);
-               WARN_ON(active && pipe_config->has_dsi_encoder);
-               if (pipe_config->has_dsi_encoder)
-                       active = true;
+       if (IS_BROXTON(dev_priv) &&
+           bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
+               WARN_ON(active);
+               active = true;
        }
 
        if (!active)
                goto out;
 
-       if (!pipe_config->has_dsi_encoder) {
+       if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
                haswell_get_ddi_port_state(crtc, pipe_config);
                intel_get_pipe_timings(crtc, pipe_config);
        }
@@ -10226,7 +10174,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
                               const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t cntl = 0, size = 0;
 
@@ -10289,7 +10237,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
                               const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        uint32_t cntl = 0;
@@ -10337,7 +10285,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
                                     const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        u32 base = intel_crtc->cursor_addr;
@@ -10504,7 +10452,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
                   struct drm_display_mode *mode)
 {
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        struct drm_framebuffer *fb;
 
@@ -10774,7 +10722,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 static int i9xx_pll_refclk(struct drm_device *dev,
                           const struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dpll = pipe_config->dpll_hw_state.dpll;
 
        if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
@@ -10792,7 +10740,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
                                struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = pipe_config->cpu_transcoder;
        u32 dpll = pipe_config->dpll_hw_state.dpll;
        u32 fp;
@@ -10918,7 +10866,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
                                             struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        struct drm_display_mode *mode;
@@ -10970,31 +10918,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        return mode;
 }
 
-void intel_mark_busy(struct drm_i915_private *dev_priv)
-{
-       if (dev_priv->mm.busy)
-               return;
-
-       intel_runtime_pm_get(dev_priv);
-       i915_update_gfx_val(dev_priv);
-       if (INTEL_GEN(dev_priv) >= 6)
-               gen6_rps_busy(dev_priv);
-       dev_priv->mm.busy = true;
-}
-
-void intel_mark_idle(struct drm_i915_private *dev_priv)
-{
-       if (!dev_priv->mm.busy)
-               return;
-
-       dev_priv->mm.busy = false;
-
-       if (INTEL_GEN(dev_priv) >= 6)
-               gen6_rps_idle(dev_priv);
-
-       intel_runtime_pm_put(dev_priv);
-}
-
 static void intel_crtc_destroy(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -11056,7 +10979,7 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
                                   struct intel_flip_work *work)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned reset_counter;
 
        reset_counter = i915_reset_counter(&dev_priv->gpu_error);
@@ -11132,7 +11055,7 @@ static bool pageflip_finished(struct intel_crtc *crtc,
 
 void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_flip_work *work;
@@ -11159,7 +11082,7 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
 
 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_flip_work *work;
@@ -11267,7 +11190,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
                                 uint32_t flags)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pf, pipesrc;
        int ret;
@@ -11305,7 +11228,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
                                 uint32_t flags)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pf, pipesrc;
        int ret;
@@ -11464,7 +11387,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
                             struct intel_flip_work *work)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
        const enum pipe pipe = intel_crtc->pipe;
        u32 ctl, stride, tile_height;
@@ -11516,7 +11439,7 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
                             struct intel_flip_work *work)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_framebuffer *intel_fb =
                to_intel_framebuffer(intel_crtc->base.primary->fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
@@ -11593,7 +11516,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
        vblank = intel_crtc_get_vblank_counter(intel_crtc);
        if (work->flip_ready_vblank == 0) {
                if (work->flip_queued_req &&
-                   !i915_gem_request_completed(work->flip_queued_req, true))
+                   !i915_gem_request_completed(work->flip_queued_req))
                        return false;
 
                work->flip_ready_vblank = vblank;
@@ -11618,7 +11541,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
 
 void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_flip_work *work;
@@ -11646,14 +11569,13 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
        spin_unlock(&dev->event_lock);
 }
 
-__maybe_unused
 static int intel_crtc_page_flip(struct drm_crtc *crtc,
                                struct drm_framebuffer *fb,
                                struct drm_pending_vblank_event *event,
                                uint32_t page_flip_flags)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_framebuffer *old_fb = crtc->primary->fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -11949,8 +11871,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
        struct drm_framebuffer *fb = plane_state->fb;
        int ret;
 
-       if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
-           plane->type != DRM_PLANE_TYPE_CURSOR) {
+       if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
                ret = skl_update_scaler_plane(
                        to_intel_crtc_state(crtc_state),
                        to_intel_plane_state(plane_state));
@@ -12067,31 +11988,11 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
        return true;
 }
 
-static bool check_encoder_cloning(struct drm_atomic_state *state,
-                                 struct intel_crtc *crtc)
-{
-       struct intel_encoder *encoder;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       int i;
-
-       for_each_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != &crtc->base)
-                       continue;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-               if (!check_single_encoder_cloning(state, crtc, encoder))
-                       return false;
-       }
-
-       return true;
-}
-
 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                                   struct drm_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *pipe_config =
                to_intel_crtc_state(crtc_state);
@@ -12099,11 +12000,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
        int ret;
        bool mode_changed = needs_modeset(crtc_state);
 
-       if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
-               DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
-               return -EINVAL;
-       }
-
        if (mode_changed && !crtc_state->active)
                pipe_config->update_wm_post = true;
 
@@ -12299,14 +12195,14 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
                      pipe_config->fdi_m_n.tu);
        DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
-                     pipe_config->has_dp_encoder,
+                     intel_crtc_has_dp_encoder(pipe_config),
                      pipe_config->lane_count,
                      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
                      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
                      pipe_config->dp_m_n.tu);
 
        DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
-                     pipe_config->has_dp_encoder,
+                     intel_crtc_has_dp_encoder(pipe_config),
                      pipe_config->lane_count,
                      pipe_config->dp_m2_n2.gmch_m,
                      pipe_config->dp_m2_n2.gmch_n,
@@ -12439,7 +12335,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
                case INTEL_OUTPUT_UNKNOWN:
                        if (WARN_ON(!HAS_DDI(dev)))
                                break;
-               case INTEL_OUTPUT_DISPLAYPORT:
+               case INTEL_OUTPUT_DP:
                case INTEL_OUTPUT_HDMI:
                case INTEL_OUTPUT_EDP:
                        port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
@@ -12536,6 +12432,24 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
                               &pipe_config->pipe_src_w,
                               &pipe_config->pipe_src_h);
 
+       for_each_connector_in_state(state, connector, connector_state, i) {
+               if (connector_state->crtc != crtc)
+                       continue;
+
+               encoder = to_intel_encoder(connector_state->best_encoder);
+
+               if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
+                       DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+                       goto fail;
+               }
+
+               /*
+                * Determine output_types before calling the .compute_config()
+                * hooks so that the hooks can use this information safely.
+                */
+               pipe_config->output_types |= 1 << encoder->type;
+       }
+
 encoder_retry:
        /* Ensure the port clock defaults are reset when retrying. */
        pipe_config->port_clock = 0;
@@ -12821,7 +12735,6 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_I(fdi_lanes);
        PIPE_CONF_CHECK_M_N(fdi_m_n);
 
-       PIPE_CONF_CHECK_I(has_dp_encoder);
        PIPE_CONF_CHECK_I(lane_count);
        PIPE_CONF_CHECK_X(lane_lat_optim_mask);
 
@@ -12833,7 +12746,7 @@ intel_pipe_config_compare(struct drm_device *dev,
        } else
                PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
 
-       PIPE_CONF_CHECK_I(has_dsi_encoder);
+       PIPE_CONF_CHECK_X(output_types);
 
        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
@@ -12952,7 +12865,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
                            struct drm_crtc_state *new_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_ddb_allocation hw_ddb, *sw_ddb;
        struct skl_ddb_entry *hw_entry, *sw_entry;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -13058,7 +12971,7 @@ verify_crtc_state(struct drm_crtc *crtc,
                  struct drm_crtc_state *new_crtc_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *pipe_config, *sw_config;
@@ -13101,8 +13014,10 @@ verify_crtc_state(struct drm_crtc *crtc,
                                "Encoder connected to wrong pipe %c\n",
                                pipe_name(pipe));
 
-               if (active)
+               if (active) {
+                       pipe_config->output_types |= 1 << encoder->type;
                        encoder->get_config(encoder, pipe_config);
+               }
        }
 
        if (!new_crtc_state->active)
@@ -13181,7 +13096,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
                         struct drm_crtc_state *old_crtc_state,
                         struct drm_crtc_state *new_crtc_state)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
        struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
 
@@ -13220,7 +13135,7 @@ intel_modeset_verify_crtc(struct drm_crtc *crtc,
 static void
 verify_disabled_dpll_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        for (i = 0; i < dev_priv->num_shared_dpll; i++)
@@ -13267,7 +13182,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
 
                crtc->scanline_offset = vtotal - 1;
        } else if (HAS_DDI(dev) &&
-                  intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+                  intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
                crtc->scanline_offset = 2;
        } else
                crtc->scanline_offset = 1;
@@ -13402,7 +13317,7 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
 static int intel_modeset_checks(struct drm_atomic_state *state)
 {
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = state->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(state->dev);
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
        int ret = 0, i;
@@ -13568,7 +13483,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
                                       struct drm_atomic_state *state,
                                       bool nonblock)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_plane_state *plane_state;
        struct drm_crtc_state *crtc_state;
        struct drm_plane *plane;
@@ -13697,7 +13612,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc_state *old_crtc_state;
        struct drm_crtc *crtc;
        struct intel_crtc_state *intel_cstate;
@@ -13929,7 +13844,7 @@ static int intel_atomic_commit(struct drm_device *dev,
                               bool nonblock)
 {
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
 
        if (intel_state->modeset && nonblock) {
@@ -14008,7 +13923,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
        .set_config = drm_atomic_helper_set_config,
        .set_property = drm_atomic_helper_crtc_set_property,
        .destroy = intel_crtc_destroy,
-       .page_flip = drm_atomic_helper_page_flip,
+       .page_flip = intel_crtc_page_flip,
        .atomic_duplicate_state = intel_crtc_duplicate_state,
        .atomic_destroy_state = intel_crtc_destroy_state,
 };
@@ -14136,15 +14051,11 @@ int
 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
 {
        int max_scale;
-       struct drm_device *dev;
-       struct drm_i915_private *dev_priv;
        int crtc_clock, cdclk;
 
        if (!intel_crtc || !crtc_state->base.enable)
                return DRM_PLANE_HELPER_NO_SCALING;
 
-       dev = intel_crtc->base.dev;
-       dev_priv = dev->dev_private;
        crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
        cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
 
@@ -14534,7 +14445,7 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
 
 static void intel_crtc_init(struct drm_device *dev, int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc;
        struct intel_crtc_state *crtc_state = NULL;
        struct drm_plane *primary = NULL;
@@ -14633,11 +14544,8 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
        struct intel_crtc *crtc;
 
        drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
-
-       if (!drmmode_crtc) {
-               DRM_ERROR("no such CRTC id\n");
+       if (!drmmode_crtc)
                return -ENOENT;
-       }
 
        crtc = to_intel_crtc(drmmode_crtc);
        pipe_from_crtc_id->pipe = crtc->pipe;
@@ -14664,7 +14572,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
 
 static bool has_edp_a(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!IS_MOBILE(dev))
                return false;
@@ -14680,7 +14588,7 @@ static bool has_edp_a(struct drm_device *dev)
 
 static bool intel_crt_present(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_INFO(dev)->gen >= 9)
                return false;
@@ -14706,10 +14614,15 @@ static bool intel_crt_present(struct drm_device *dev)
 
 static void intel_setup_outputs(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        bool dpd_is_edp = false;
 
+       /*
+        * intel_edp_init_connector() depends on this completing first, to
+        * prevent the registeration of both eDP and LVDS and the incorrect
+        * sharing of the PPS.
+        */
        intel_lvds_init(dev);
 
        if (intel_crt_present(dev))
@@ -15354,7 +15267,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
  */
 static void quirk_pipea_force(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->quirks |= QUIRK_PIPEA_FORCE;
        DRM_INFO("applying pipe a force quirk\n");
@@ -15362,7 +15275,7 @@ static void quirk_pipea_force(struct drm_device *dev)
 
 static void quirk_pipeb_force(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->quirks |= QUIRK_PIPEB_FORCE;
        DRM_INFO("applying pipe b force quirk\n");
@@ -15373,7 +15286,7 @@ static void quirk_pipeb_force(struct drm_device *dev)
  */
 static void quirk_ssc_force_disable(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
        DRM_INFO("applying lvds SSC disable quirk\n");
 }
@@ -15384,7 +15297,7 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
  */
 static void quirk_invert_brightness(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
        DRM_INFO("applying inverted panel brightness quirk\n");
 }
@@ -15392,7 +15305,7 @@ static void quirk_invert_brightness(struct drm_device *dev)
 /* Some VBT's incorrectly indicate no backlight is present */
 static void quirk_backlight_present(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
        DRM_INFO("applying backlight present quirk\n");
 }
@@ -15518,7 +15431,7 @@ static void intel_init_quirks(struct drm_device *dev)
 /* Disable the VGA plane that we never use */
 static void i915_disable_vga(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u8 sr1;
        i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
 
@@ -15536,7 +15449,7 @@ static void i915_disable_vga(struct drm_device *dev)
 
 void intel_modeset_init_hw(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        intel_update_cdclk(dev);
 
@@ -15784,7 +15697,7 @@ static bool
 intel_check_plane_mapping(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
 
        if (INTEL_INFO(dev)->num_pipes == 1)
@@ -15824,7 +15737,7 @@ static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
 static void intel_sanitize_crtc(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
 
        /* Clear any frame start delays used for debugging left by the BIOS */
@@ -15949,7 +15862,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 
 void i915_redisable_vga_power_on(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
 
        if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
@@ -15960,7 +15873,7 @@ void i915_redisable_vga_power_on(struct drm_device *dev)
 
 void i915_redisable_vga(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* This function can be called both from intel_modeset_setup_hw_state or
         * at a very early point in our resume sequence, where the power well
@@ -16000,7 +15913,7 @@ static void readout_plane_state(struct intel_crtc *crtc)
 
 static void intel_modeset_readout_hw_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
        struct intel_crtc *crtc;
        struct intel_encoder *encoder;
@@ -16069,6 +15982,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                if (encoder->get_hw_state(encoder, &pipe)) {
                        crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
                        encoder->base.crtc = &crtc->base;
+                       crtc->config->output_types |= 1 << encoder->type;
                        encoder->get_config(encoder, crtc->config);
                } else {
                        encoder->base.crtc = NULL;
@@ -16153,7 +16067,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 static void
 intel_modeset_setup_hw_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
        struct intel_crtc *crtc;
        struct intel_encoder *encoder;
@@ -16309,8 +16223,21 @@ void intel_modeset_gem_init(struct drm_device *dev)
                        c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
                }
        }
+}
+
+int intel_connector_register(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       int ret;
+
+       ret = intel_backlight_device_register(intel_connector);
+       if (ret)
+               goto err;
 
-       intel_backlight_register(dev);
+       return 0;
+
+err:
+       return ret;
 }
 
 void intel_connector_unregister(struct drm_connector *connector)
@@ -16323,7 +16250,7 @@ void intel_connector_unregister(struct drm_connector *connector)
 
 void intel_modeset_cleanup(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        intel_disable_gt_powersave(dev_priv);
 
@@ -16347,8 +16274,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
        /* flush any delayed tasks or pending work */
        flush_scheduled_work();
 
-       drm_connector_unregister_all(dev);
-
        drm_mode_config_cleanup(dev);
 
        intel_cleanup_overlay(dev_priv);
@@ -16371,7 +16296,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
  */
 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
        u16 gmch_ctrl;
 
@@ -16527,7 +16452,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
                                struct drm_device *dev,
                                struct intel_display_error_state *error)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        if (!error)
index 0b84f8e5df50160da2881457d0c0fe965b686ae8..0c5ba3410a1e7417320f4c814a25ade303e4cd17 100644 (file)
@@ -262,7 +262,7 @@ static void pps_lock(struct intel_dp *intel_dp)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *encoder = &intel_dig_port->base;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
 
        /*
@@ -280,7 +280,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *encoder = &intel_dig_port->base;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
 
        mutex_unlock(&dev_priv->pps_mutex);
@@ -294,7 +294,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = intel_dp->pps_pipe;
        bool pll_enabled, release_cl_override = false;
        enum dpio_phy phy = DPIO_PHY(pipe);
@@ -368,7 +368,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
        enum pipe pipe;
@@ -426,6 +426,37 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
        return intel_dp->pps_pipe;
 }
 
+static int
+bxt_power_sequencer_idx(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       lockdep_assert_held(&dev_priv->pps_mutex);
+
+       /* We should never land here with regular DP ports */
+       WARN_ON(!is_edp(intel_dp));
+
+       /*
+        * TODO: BXT has 2 PPS instances. The correct port->PPS instance
+        * mapping needs to be retrieved from VBT, for now just hard-code to
+        * use instance #0 always.
+        */
+       if (!intel_dp->pps_reset)
+               return 0;
+
+       intel_dp->pps_reset = false;
+
+       /*
+        * Only the HW needs to be reprogrammed, the SW state is fixed and
+        * has been setup during connector init.
+        */
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+
+       return 0;
+}
+
 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
                               enum pipe pipe);
 
@@ -475,7 +506,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -507,12 +538,13 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
        intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 }
 
-void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
+void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_encoder *encoder;
 
-       if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
+       if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+                   !IS_BROXTON(dev)))
                return;
 
        /*
@@ -532,34 +564,71 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
                        continue;
 
                intel_dp = enc_to_intel_dp(&encoder->base);
-               intel_dp->pps_pipe = INVALID_PIPE;
+               if (IS_BROXTON(dev))
+                       intel_dp->pps_reset = true;
+               else
+                       intel_dp->pps_pipe = INVALID_PIPE;
+       }
+}
+
+struct pps_registers {
+       i915_reg_t pp_ctrl;
+       i915_reg_t pp_stat;
+       i915_reg_t pp_on;
+       i915_reg_t pp_off;
+       i915_reg_t pp_div;
+};
+
+static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
+                                   struct intel_dp *intel_dp,
+                                   struct pps_registers *regs)
+{
+       memset(regs, 0, sizeof(*regs));
+
+       if (IS_BROXTON(dev_priv)) {
+               int idx = bxt_power_sequencer_idx(intel_dp);
+
+               regs->pp_ctrl = BXT_PP_CONTROL(idx);
+               regs->pp_stat = BXT_PP_STATUS(idx);
+               regs->pp_on = BXT_PP_ON_DELAYS(idx);
+               regs->pp_off = BXT_PP_OFF_DELAYS(idx);
+       } else if (HAS_PCH_SPLIT(dev_priv)) {
+               regs->pp_ctrl = PCH_PP_CONTROL;
+               regs->pp_stat = PCH_PP_STATUS;
+               regs->pp_on = PCH_PP_ON_DELAYS;
+               regs->pp_off = PCH_PP_OFF_DELAYS;
+               regs->pp_div = PCH_PP_DIVISOR;
+       } else {
+               enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+               regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe);
+               regs->pp_stat = VLV_PIPE_PP_STATUS(pipe);
+               regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe);
+               regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe);
+               regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe);
        }
 }
 
 static i915_reg_t
 _pp_ctrl_reg(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct pps_registers regs;
 
-       if (IS_BROXTON(dev))
-               return BXT_PP_CONTROL(0);
-       else if (HAS_PCH_SPLIT(dev))
-               return PCH_PP_CONTROL;
-       else
-               return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
+       intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
+                               &regs);
+
+       return regs.pp_ctrl;
 }
 
 static i915_reg_t
 _pp_stat_reg(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct pps_registers regs;
 
-       if (IS_BROXTON(dev))
-               return BXT_PP_STATUS(0);
-       else if (HAS_PCH_SPLIT(dev))
-               return PCH_PP_STATUS;
-       else
-               return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
+       intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
+                               &regs);
+
+       return regs.pp_stat;
 }
 
 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
@@ -570,7 +639,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
        struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
                                                 edp_notifier);
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!is_edp(intel_dp) || code != SYS_RESTART)
                return 0;
@@ -601,7 +670,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
 static bool edp_have_panel_power(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -615,7 +684,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -630,7 +699,7 @@ static void
 intel_dp_check_edp(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!is_edp(intel_dp))
                return;
@@ -648,7 +717,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
        uint32_t status;
        bool done;
@@ -658,7 +727,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
                done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
                                          msecs_to_jiffies_timeout(10));
        else
-               done = wait_for_atomic(C, 10) == 0;
+               done = wait_for(C, 10) == 0;
        if (!done)
                DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
                          has_aux_irq);
@@ -781,7 +850,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
        uint32_t aux_clock_divider;
        int i, ret, recv_bytes;
@@ -1180,35 +1249,18 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
        kfree(intel_dp->aux.name);
 }
 
-static int
+static void
 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->port;
-       int ret;
 
        intel_aux_reg_init(intel_dp);
+       drm_dp_aux_init(&intel_dp->aux);
 
+       /* Failure to allocate our preferred name is not critical */
        intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
-       if (!intel_dp->aux.name)
-               return -ENOMEM;
-
-       intel_dp->aux.dev = connector->base.kdev;
        intel_dp->aux.transfer = intel_dp_aux_transfer;
-
-       DRM_DEBUG_KMS("registering %s bus for %s\n",
-                     intel_dp->aux.name,
-                     connector->base.kdev->kobj.name);
-
-       ret = drm_dp_aux_register(&intel_dp->aux);
-       if (ret < 0) {
-               DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
-                         intel_dp->aux.name, ret);
-               kfree(intel_dp->aux.name);
-               return ret;
-       }
-
-       return 0;
 }
 
 static int
@@ -1421,7 +1473,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                        struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
@@ -1449,7 +1501,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
                pipe_config->has_pch_encoder = true;
 
-       pipe_config->has_dp_encoder = true;
        pipe_config->has_drrs = false;
        pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
 
@@ -1605,7 +1656,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
 static void intel_dp_prepare(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
@@ -1693,16 +1744,21 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
 #define IDLE_CYCLE_MASK                (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
 #define IDLE_CYCLE_VALUE       (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
 
+static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
+                                  struct intel_dp *intel_dp);
+
 static void wait_panel_status(struct intel_dp *intel_dp,
                                       u32 mask,
                                       u32 value)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t pp_stat_reg, pp_ctrl_reg;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
+       intel_pps_verify_state(dev_priv, intel_dp);
+
        pp_stat_reg = _pp_stat_reg(intel_dp);
        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
@@ -1711,8 +1767,9 @@ static void wait_panel_status(struct intel_dp *intel_dp,
                        I915_READ(pp_stat_reg),
                        I915_READ(pp_ctrl_reg));
 
-       if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
-                     5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
+       if (intel_wait_for_register(dev_priv,
+                                   pp_stat_reg, mask, value,
+                                   5000))
                DRM_ERROR("Panel status timeout: status %08x control %08x\n",
                                I915_READ(pp_stat_reg),
                                I915_READ(pp_ctrl_reg));
@@ -1772,7 +1829,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 control;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -1795,7 +1852,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        u32 pp;
        i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -1868,7 +1925,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_digital_port *intel_dig_port =
                dp_to_dig_port(intel_dp);
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -1937,8 +1994,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
  */
 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 {
-       struct drm_i915_private *dev_priv =
-               intel_dp_to_dev(intel_dp)->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -1959,7 +2015,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 static void edp_panel_on(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pp;
        i915_reg_t pp_ctrl_reg;
 
@@ -2020,7 +2076,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        u32 pp;
        i915_reg_t pp_ctrl_reg;
@@ -2072,7 +2128,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pp;
        i915_reg_t pp_ctrl_reg;
 
@@ -2113,7 +2169,7 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pp;
        i915_reg_t pp_ctrl_reg;
 
@@ -2229,7 +2285,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
         * 2. Program DP PLL enable
         */
        if (IS_GEN5(dev_priv))
-               intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
+               intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
 
        intel_dp->DP |= DP_PLL_ENABLE;
 
@@ -2294,7 +2350,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        u32 tmp;
        bool ret;
@@ -2347,7 +2403,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        u32 tmp, flags = 0;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = dp_to_dig_port(intel_dp)->port;
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
@@ -2385,8 +2441,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
            !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
                pipe_config->limited_color_range = true;
 
-       pipe_config->has_dp_encoder = true;
-
        pipe_config->lane_count =
                ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
 
@@ -2471,7 +2525,7 @@ static void chv_post_disable_dp(struct intel_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        intel_dp_link_down(intel_dp);
 
@@ -2490,7 +2544,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
 
        if (HAS_DDI(dev)) {
@@ -2570,7 +2624,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 static void intel_dp_enable_port(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc =
                to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
 
@@ -2599,7 +2653,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        uint32_t dp_reg = I915_READ(intel_dp->output_reg);
        enum pipe pipe = crtc->pipe;
@@ -2672,7 +2726,7 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
        enum pipe pipe = intel_dp->pps_pipe;
        i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
 
@@ -2698,7 +2752,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
 static void vlv_steal_power_sequencer(struct drm_device *dev,
                                      enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2736,7 +2790,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *encoder = &intel_dig_port->base;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2824,7 +2878,7 @@ uint8_t
 intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = dp_to_dig_port(intel_dp)->port;
 
        if (IS_BROXTON(dev))
@@ -3242,7 +3296,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
        uint32_t val;
 
@@ -3264,8 +3318,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
        if (port == PORT_A)
                return;
 
-       if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
-                    1))
+       if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
+                                   DP_TP_STATUS_IDLE_DONE,
+                                   DP_TP_STATUS_IDLE_DONE,
+                                   1))
                DRM_ERROR("Timed out waiting for DP idle patterns\n");
 }
 
@@ -3276,7 +3332,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
        struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
        enum port port = intel_dig_port->port;
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t DP = intel_dp->DP;
 
        if (WARN_ON(HAS_DDI(dev)))
@@ -3328,7 +3384,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
                I915_WRITE(intel_dp->output_reg, DP);
                POSTING_READ(intel_dp->output_reg);
 
-               intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
+               intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
        }
@@ -3343,7 +3399,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
                             sizeof(intel_dp->dpcd)) < 0)
@@ -4194,7 +4250,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
        }
 
        if (intel_encoder->type != INTEL_OUTPUT_EDP)
-               intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+               intel_encoder->type = INTEL_OUTPUT_DP;
 
        intel_dp_probe_oui(intel_dp);
 
@@ -4270,7 +4326,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
                /* MST devices are disconnected from a monitor POV */
                intel_dp_unset_edid(intel_dp);
                if (intel_encoder->type != INTEL_OUTPUT_EDP)
-                       intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+                       intel_encoder->type = INTEL_OUTPUT_DP;
                return connector_status_disconnected;
        }
 
@@ -4309,7 +4365,7 @@ intel_dp_force(struct drm_connector *connector)
        intel_display_power_put(dev_priv, power_domain);
 
        if (intel_encoder->type != INTEL_OUTPUT_EDP)
-               intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+               intel_encoder->type = INTEL_OUTPUT_DP;
 }
 
 static int intel_dp_get_modes(struct drm_connector *connector)
@@ -4358,7 +4414,7 @@ intel_dp_set_property(struct drm_connector *connector,
                      struct drm_property *property,
                      uint64_t val)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
        struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@@ -4446,6 +4502,25 @@ intel_dp_set_property(struct drm_connector *connector,
        return 0;
 }
 
+static int
+intel_dp_connector_register(struct drm_connector *connector)
+{
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
+       int ret;
+
+       ret = intel_connector_register(connector);
+       if (ret)
+               return ret;
+
+       i915_debugfs_connector_add(connector);
+
+       DRM_DEBUG_KMS("registering %s bus for %s\n",
+                     intel_dp->aux.name, connector->kdev->kobj.name);
+
+       intel_dp->aux.dev = connector->kdev;
+       return drm_dp_aux_register(&intel_dp->aux);
+}
+
 static void
 intel_dp_connector_unregister(struct drm_connector *connector)
 {
@@ -4521,7 +4596,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4544,13 +4619,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 
 void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
-       struct intel_dp *intel_dp;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+       if (!HAS_DDI(dev_priv))
+               intel_dp->DP = I915_READ(intel_dp->output_reg);
 
        if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
                return;
 
-       intel_dp = enc_to_intel_dp(encoder);
-
        pps_lock(intel_dp);
 
        /*
@@ -4572,6 +4649,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_dp_set_property,
        .atomic_get_property = intel_connector_atomic_get_property,
+       .late_register = intel_dp_connector_register,
        .early_unregister = intel_dp_connector_unregister,
        .destroy = intel_dp_connector_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -4594,13 +4672,13 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        enum irqreturn ret = IRQ_NONE;
 
        if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
            intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
-               intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
+               intel_dig_port->base.type = INTEL_OUTPUT_DP;
 
        if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
                /*
@@ -4622,9 +4700,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
        intel_display_power_get(dev_priv, power_domain);
 
        if (long_hpd) {
-               /* indicate that we need to restart link training */
-               intel_dp->train_set_valid = false;
-
                intel_dp_long_pulse(intel_dp->attached_connector);
                if (intel_dp->is_mst)
                        ret = IRQ_HANDLED;
@@ -4665,7 +4740,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 /* check the VBT to see whether the eDP is on another port */
 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /*
         * eDP not supported on g4x. so bail out early just
@@ -4707,82 +4782,93 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
 }
 
 static void
-intel_dp_init_panel_power_sequencer(struct drm_device *dev,
-                                   struct intel_dp *intel_dp)
+intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
+                          struct intel_dp *intel_dp, struct edp_power_seq *seq)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct edp_power_seq cur, vbt, spec,
-               *final = &intel_dp->pps_delays;
        u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
-       i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
-
-       lockdep_assert_held(&dev_priv->pps_mutex);
-
-       /* already initialized? */
-       if (final->t11_t12 != 0)
-               return;
-
-       if (IS_BROXTON(dev)) {
-               /*
-                * TODO: BXT has 2 sets of PPS registers.
-                * Correct Register for Broxton need to be identified
-                * using VBT. hardcoding for now
-                */
-               pp_ctrl_reg = BXT_PP_CONTROL(0);
-               pp_on_reg = BXT_PP_ON_DELAYS(0);
-               pp_off_reg = BXT_PP_OFF_DELAYS(0);
-       } else if (HAS_PCH_SPLIT(dev)) {
-               pp_ctrl_reg = PCH_PP_CONTROL;
-               pp_on_reg = PCH_PP_ON_DELAYS;
-               pp_off_reg = PCH_PP_OFF_DELAYS;
-               pp_div_reg = PCH_PP_DIVISOR;
-       } else {
-               enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+       struct pps_registers regs;
 
-               pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
-               pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
-               pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
-               pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
-       }
+       intel_pps_get_registers(dev_priv, intel_dp, &regs);
 
        /* Workaround: Need to write PP_CONTROL with the unlock key as
         * the very first thing. */
        pp_ctl = ironlake_get_pp_control(intel_dp);
 
-       pp_on = I915_READ(pp_on_reg);
-       pp_off = I915_READ(pp_off_reg);
-       if (!IS_BROXTON(dev)) {
-               I915_WRITE(pp_ctrl_reg, pp_ctl);
-               pp_div = I915_READ(pp_div_reg);
+       pp_on = I915_READ(regs.pp_on);
+       pp_off = I915_READ(regs.pp_off);
+       if (!IS_BROXTON(dev_priv)) {
+               I915_WRITE(regs.pp_ctrl, pp_ctl);
+               pp_div = I915_READ(regs.pp_div);
        }
 
        /* Pull timing values out of registers */
-       cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
-               PANEL_POWER_UP_DELAY_SHIFT;
+       seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+                    PANEL_POWER_UP_DELAY_SHIFT;
 
-       cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
-               PANEL_LIGHT_ON_DELAY_SHIFT;
+       seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+                 PANEL_LIGHT_ON_DELAY_SHIFT;
 
-       cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
-               PANEL_LIGHT_OFF_DELAY_SHIFT;
+       seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+                 PANEL_LIGHT_OFF_DELAY_SHIFT;
 
-       cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
-               PANEL_POWER_DOWN_DELAY_SHIFT;
+       seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+                  PANEL_POWER_DOWN_DELAY_SHIFT;
 
-       if (IS_BROXTON(dev)) {
+       if (IS_BROXTON(dev_priv)) {
                u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
                        BXT_POWER_CYCLE_DELAY_SHIFT;
                if (tmp > 0)
-                       cur.t11_t12 = (tmp - 1) * 1000;
+                       seq->t11_t12 = (tmp - 1) * 1000;
                else
-                       cur.t11_t12 = 0;
+                       seq->t11_t12 = 0;
        } else {
-               cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+               seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
                       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
        }
+}
+
+static void
+intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
+{
+       DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+                     state_name,
+                     seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+}
+
+static void
+intel_pps_verify_state(struct drm_i915_private *dev_priv,
+                      struct intel_dp *intel_dp)
+{
+       struct edp_power_seq hw;
+       struct edp_power_seq *sw = &intel_dp->pps_delays;
+
+       intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
+
+       if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
+           hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+               DRM_ERROR("PPS state mismatch\n");
+               intel_pps_dump_state("sw", sw);
+               intel_pps_dump_state("hw", &hw);
+       }
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+                                   struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct edp_power_seq cur, vbt, spec,
+               *final = &intel_dp->pps_delays;
 
-       DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
-                     cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+       lockdep_assert_held(&dev_priv->pps_mutex);
+
+       /* already initialized? */
+       if (final->t11_t12 != 0)
+               return;
+
+       intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
+
+       intel_pps_dump_state("cur", &cur);
 
        vbt = dev_priv->vbt.edp.pps;
 
@@ -4798,8 +4884,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
         * too. */
        spec.t11_t12 = (510 + 100) * 10;
 
-       DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
-                     vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+       intel_pps_dump_state("vbt", &vbt);
 
        /* Use the max of the register settings and vbt. If both are
         * unset, fall back to the spec limits. */
@@ -4827,59 +4912,41 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 
        DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
                      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+       /*
+        * We override the HW backlight delays to 1 because we do manual waits
+        * on them. For T8, even BSpec recommends doing it. For T9, if we
+        * don't do this, we'll end up waiting for the backlight off delay
+        * twice: once when we do the manual sleep, and once when we disable
+        * the panel and wait for the PP_STATUS bit to become zero.
+        */
+       final->t8 = 1;
+       final->t9 = 1;
 }
 
 static void
 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
                                              struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pp_on, pp_off, pp_div, port_sel = 0;
        int div = dev_priv->rawclk_freq / 1000;
-       i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
+       struct pps_registers regs;
        enum port port = dp_to_dig_port(intel_dp)->port;
        const struct edp_power_seq *seq = &intel_dp->pps_delays;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
-       if (IS_BROXTON(dev)) {
-               /*
-                * TODO: BXT has 2 sets of PPS registers.
-                * Correct Register for Broxton need to be identified
-                * using VBT. hardcoding for now
-                */
-               pp_ctrl_reg = BXT_PP_CONTROL(0);
-               pp_on_reg = BXT_PP_ON_DELAYS(0);
-               pp_off_reg = BXT_PP_OFF_DELAYS(0);
-
-       } else if (HAS_PCH_SPLIT(dev)) {
-               pp_on_reg = PCH_PP_ON_DELAYS;
-               pp_off_reg = PCH_PP_OFF_DELAYS;
-               pp_div_reg = PCH_PP_DIVISOR;
-       } else {
-               enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+       intel_pps_get_registers(dev_priv, intel_dp, &regs);
 
-               pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
-               pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
-               pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
-       }
-
-       /*
-        * And finally store the new values in the power sequencer. The
-        * backlight delays are set to 1 because we do manual waits on them. For
-        * T8, even BSpec recommends doing it. For T9, if we don't do this,
-        * we'll end up waiting for the backlight off delay twice: once when we
-        * do the manual sleep, and once when we disable the panel and wait for
-        * the PP_STATUS bit to become zero.
-        */
        pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
-               (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
-       pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+               (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+       pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
                 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
        /* Compute the divisor for the pp clock, simply match the Bspec
         * formula. */
        if (IS_BROXTON(dev)) {
-               pp_div = I915_READ(pp_ctrl_reg);
+               pp_div = I915_READ(regs.pp_ctrl);
                pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
                pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
                                << BXT_POWER_CYCLE_DELAY_SHIFT);
@@ -4902,19 +4969,19 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 
        pp_on |= port_sel;
 
-       I915_WRITE(pp_on_reg, pp_on);
-       I915_WRITE(pp_off_reg, pp_off);
+       I915_WRITE(regs.pp_on, pp_on);
+       I915_WRITE(regs.pp_off, pp_off);
        if (IS_BROXTON(dev))
-               I915_WRITE(pp_ctrl_reg, pp_div);
+               I915_WRITE(regs.pp_ctrl, pp_div);
        else
-               I915_WRITE(pp_div_reg, pp_div);
+               I915_WRITE(regs.pp_div, pp_div);
 
        DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
-                     I915_READ(pp_on_reg),
-                     I915_READ(pp_off_reg),
+                     I915_READ(regs.pp_on),
+                     I915_READ(regs.pp_off),
                      IS_BROXTON(dev) ?
-                     (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
-                     I915_READ(pp_div_reg));
+                     (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
+                     I915_READ(regs.pp_div));
 }
 
 /**
@@ -4931,7 +4998,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
  */
 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        struct intel_digital_port *dig_port = NULL;
        struct intel_dp *intel_dp = dev_priv->drrs.dp;
@@ -5030,7 +5097,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_crtc *crtc = dig_port->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5062,7 +5129,7 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp)
 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_crtc *crtc = dig_port->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5077,9 +5144,9 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
        }
 
        if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(dev_priv->dev,
-                       intel_dp->attached_connector->panel.
-                       fixed_mode->vrefresh);
+               intel_dp_set_drrs_state(&dev_priv->drm,
+                                       intel_dp->attached_connector->panel.
+                                       fixed_mode->vrefresh);
 
        dev_priv->drrs.dp = NULL;
        mutex_unlock(&dev_priv->drrs.mutex);
@@ -5109,9 +5176,9 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
                goto unlock;
 
        if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
-               intel_dp_set_drrs_state(dev_priv->dev,
-                       intel_dp->attached_connector->panel.
-                       downclock_mode->vrefresh);
+               intel_dp_set_drrs_state(&dev_priv->drm,
+                                       intel_dp->attached_connector->panel.
+                                       downclock_mode->vrefresh);
 
 unlock:
        mutex_unlock(&dev_priv->drrs.mutex);
@@ -5130,7 +5197,7 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
 void intel_edp_drrs_invalidate(struct drm_device *dev,
                unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
 
@@ -5153,9 +5220,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
 
        /* invalidate means busy screen hence upclock */
        if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(dev_priv->dev,
-                               dev_priv->drrs.dp->attached_connector->panel.
-                               fixed_mode->vrefresh);
+               intel_dp_set_drrs_state(&dev_priv->drm,
+                                       dev_priv->drrs.dp->attached_connector->panel.
+                                       fixed_mode->vrefresh);
 
        mutex_unlock(&dev_priv->drrs.mutex);
 }
@@ -5175,7 +5242,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
 void intel_edp_drrs_flush(struct drm_device *dev,
                unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
 
@@ -5198,9 +5265,9 @@ void intel_edp_drrs_flush(struct drm_device *dev,
 
        /* flush means busy screen hence upclock */
        if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(dev_priv->dev,
-                               dev_priv->drrs.dp->attached_connector->panel.
-                               fixed_mode->vrefresh);
+               intel_dp_set_drrs_state(&dev_priv->drm,
+                                       dev_priv->drrs.dp->attached_connector->panel.
+                                       fixed_mode->vrefresh);
 
        /*
         * flush also means no more activity hence schedule downclock, if all
@@ -5268,7 +5335,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
 {
        struct drm_connector *connector = &intel_connector->base;
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_display_mode *downclock_mode = NULL;
 
        INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
@@ -5306,7 +5373,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = intel_encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_display_mode *fixed_mode = NULL;
        struct drm_display_mode *downclock_mode = NULL;
        bool has_dpcd;
@@ -5317,8 +5384,32 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        if (!is_edp(intel_dp))
                return true;
 
+       /*
+        * On IBX/CPT we may get here with LVDS already registered. Since the
+        * driver uses the only internal power sequencer available for both
+        * eDP and LVDS bail out early in this case to prevent interfering
+        * with an already powered-on LVDS power sequencer.
+        */
+       if (intel_get_lvds_encoder(dev)) {
+               WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+               DRM_INFO("LVDS was detected, not registering eDP\n");
+
+               return false;
+       }
+
        pps_lock(intel_dp);
+
+       intel_dp_init_panel_power_timestamps(intel_dp);
+
+       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+               vlv_initial_power_sequencer_setup(intel_dp);
+       } else {
+               intel_dp_init_panel_power_sequencer(dev, intel_dp);
+               intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+       }
+
        intel_edp_panel_vdd_sanitize(intel_dp);
+
        pps_unlock(intel_dp);
 
        /* Cache DPCD and EDID for edp. */
@@ -5332,14 +5423,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        } else {
                /* if this fails, presume the device is a ghost */
                DRM_INFO("failed to retrieve link info, disabling eDP\n");
-               return false;
+               goto out_vdd_off;
        }
 
-       /* We now know it's not a ghost, init power sequence regs. */
-       pps_lock(intel_dp);
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
-       pps_unlock(intel_dp);
-
        mutex_lock(&dev->mode_config.mutex);
        edid = drm_get_edid(connector, &intel_dp->aux.ddc);
        if (edid) {
@@ -5407,6 +5493,18 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        intel_panel_setup_backlight(connector, pipe);
 
        return true;
+
+out_vdd_off:
+       cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+       /*
+        * vdd might still be enabled do to the delayed vdd off.
+        * Make sure vdd is actually turned off here.
+        */
+       pps_lock(intel_dp);
+       edp_panel_vdd_off_sync(intel_dp);
+       pps_unlock(intel_dp);
+
+       return false;
 }
 
 bool
@@ -5417,9 +5515,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = intel_encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
-       int type, ret;
+       int type;
 
        if (WARN(intel_dig_port->max_lanes < 1,
                 "Not enough lanes (%d) for DP on port %c\n",
@@ -5478,11 +5576,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
+       intel_dp_aux_init(intel_dp, intel_connector);
+
        INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
                          edp_panel_vdd_work);
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
-       drm_connector_register(connector);
 
        if (HAS_DDI(dev))
                intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -5512,22 +5611,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                BUG();
        }
 
-       if (is_edp(intel_dp)) {
-               pps_lock(intel_dp);
-               intel_dp_init_panel_power_timestamps(intel_dp);
-               if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-                       vlv_initial_power_sequencer_setup(intel_dp);
-               else
-                       intel_dp_init_panel_power_sequencer(dev, intel_dp);
-               pps_unlock(intel_dp);
-       }
-
-       ret = intel_dp_aux_init(intel_dp, intel_connector);
-       if (ret)
-               goto fail;
-
        /* init MST on ports that can support it */
-       if (HAS_DP_MST(dev) &&
+       if (HAS_DP_MST(dev) && !is_edp(intel_dp) &&
            (port == PORT_B || port == PORT_C || port == PORT_D))
                intel_dp_mst_encoder_init(intel_dig_port,
                                          intel_connector->base.base.id);
@@ -5549,22 +5634,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
        }
 
-       i915_debugfs_connector_add(connector);
-
        return true;
 
 fail:
-       if (is_edp(intel_dp)) {
-               cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
-               /*
-                * vdd might still be enabled do to the delayed vdd off.
-                * Make sure vdd is actually turned off here.
-                */
-               pps_lock(intel_dp);
-               edp_panel_vdd_off_sync(intel_dp);
-               pps_unlock(intel_dp);
-       }
-       drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
 
        return false;
@@ -5574,7 +5646,7 @@ bool intel_dp_init(struct drm_device *dev,
                   i915_reg_t output_reg,
                   enum port port)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_digital_port *intel_dig_port;
        struct intel_encoder *intel_encoder;
        struct drm_encoder *encoder;
@@ -5622,7 +5694,7 @@ bool intel_dp_init(struct drm_device *dev,
        intel_dig_port->dp.output_reg = output_reg;
        intel_dig_port->max_lanes = 4;
 
-       intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+       intel_encoder->type = INTEL_OUTPUT_DP;
        if (IS_CHERRYVIEW(dev)) {
                if (port == PORT_D)
                        intel_encoder->crtc_mask = 1 << 2;
@@ -5652,43 +5724,35 @@ bool intel_dp_init(struct drm_device *dev,
 
 void intel_dp_mst_suspend(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        /* disable MST */
        for (i = 0; i < I915_MAX_PORTS; i++) {
                struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
-               if (!intel_dig_port)
+
+               if (!intel_dig_port || !intel_dig_port->dp.can_mst)
                        continue;
 
-               if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
-                       if (!intel_dig_port->dp.can_mst)
-                               continue;
-                       if (intel_dig_port->dp.is_mst)
-                               drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
-               }
+               if (intel_dig_port->dp.is_mst)
+                       drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
        }
 }
 
 void intel_dp_mst_resume(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        for (i = 0; i < I915_MAX_PORTS; i++) {
                struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
-               if (!intel_dig_port)
-                       continue;
-               if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
-                       int ret;
+               int ret;
 
-                       if (!intel_dig_port->dp.can_mst)
-                               continue;
+               if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+                       continue;
 
-                       ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
-                       if (ret != 0) {
-                               intel_dp_check_mst_status(&intel_dig_port->dp);
-                       }
-               }
+               ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+               if (ret)
+                       intel_dp_check_mst_status(&intel_dig_port->dp);
        }
 }
index 0b8eefc2acc5d93088b960e4714bce55944df82e..60fb39cd220b4a01f5b9469a5de29ee496abe61f 100644 (file)
@@ -85,8 +85,7 @@ static bool
 intel_dp_reset_link_train(struct intel_dp *intel_dp,
                        uint8_t dp_train_pat)
 {
-       if (!intel_dp->train_set_valid)
-               memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+       memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
        intel_dp_set_signal_levels(intel_dp);
        return intel_dp_set_link_train(intel_dp, dp_train_pat);
 }
@@ -161,23 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
                        break;
                }
 
-               /*
-                * if we used previously trained voltage and pre-emphasis values
-                * and we don't get clock recovery, reset link training values
-                */
-               if (intel_dp->train_set_valid) {
-                       DRM_DEBUG_KMS("clock recovery not ok, reset");
-                       /* clear the flag as we are not reusing train set */
-                       intel_dp->train_set_valid = false;
-                       if (!intel_dp_reset_link_train(intel_dp,
-                                                      DP_TRAINING_PATTERN_1 |
-                                                      DP_LINK_SCRAMBLING_DISABLE)) {
-                               DRM_ERROR("failed to enable link training\n");
-                               return;
-                       }
-                       continue;
-               }
-
                /* Check to see if we've tried the max voltage */
                for (i = 0; i < intel_dp->lane_count; i++)
                        if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
@@ -284,7 +266,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
                /* Make sure clock is still ok */
                if (!drm_dp_clock_recovery_ok(link_status,
                                              intel_dp->lane_count)) {
-                       intel_dp->train_set_valid = false;
                        intel_dp_link_training_clock_recovery(intel_dp);
                        intel_dp_set_link_train(intel_dp,
                                                training_pattern |
@@ -301,7 +282,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 
                /* Try 5 times, then try clock recovery if that fails */
                if (tries > 5) {
-                       intel_dp->train_set_valid = false;
                        intel_dp_link_training_clock_recovery(intel_dp);
                        intel_dp_set_link_train(intel_dp,
                                                training_pattern |
@@ -322,10 +302,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 
        intel_dp_set_idle_link_train(intel_dp);
 
-       if (channel_eq) {
-               intel_dp->train_set_valid = true;
+       if (channel_eq)
                DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
-       }
 }
 
 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
index 9646816604be657d02e4f9fa56b1dc926234b795..68a005d729e90b218cc3ac4eaefcb094388370b5 100644 (file)
@@ -47,7 +47,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
        pipe_config->dp_encoder_is_mst = true;
        pipe_config->has_pch_encoder = false;
-       pipe_config->has_dp_encoder = true;
        bpp = 24;
        /*
         * for MST we always configure max link bw - the spec doesn't
@@ -140,7 +139,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
        int ret;
        uint32_t temp;
@@ -207,14 +206,17 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
        int ret;
 
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
-       if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
-                    1))
+       if (intel_wait_for_register(dev_priv,
+                                   DP_TP_STATUS(port),
+                                   DP_TP_STATUS_ACT_SENT,
+                                   DP_TP_STATUS_ACT_SENT,
+                                   1))
                DRM_ERROR("Timed out waiting for ACT sent\n");
 
        ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
@@ -239,12 +241,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
        u32 temp, flags = 0;
 
-       pipe_config->has_dp_encoder = true;
-
        temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
        if (temp & TRANS_DDI_PHSYNC)
                flags |= DRM_MODE_FLAG_PHSYNC;
@@ -336,6 +336,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_dp_mst_set_property,
        .atomic_get_property = intel_connector_atomic_get_property,
+       .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
        .destroy = intel_dp_mst_connector_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -477,9 +478,11 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector)
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_device *dev = connector->dev;
+
        drm_modeset_lock_all(dev);
        intel_connector_add_to_fbdev(intel_connector);
        drm_modeset_unlock_all(dev);
+
        drm_connector_register(&intel_connector->base);
 }
 
index 288da35572b45ff694dace802f0b1910f782b872..047f4874894483361131a1eb809cdbd8375045e1 100644 (file)
@@ -168,7 +168,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
        enum dpio_channel ch = vlv_dport_to_channel(dport);
@@ -250,7 +250,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
        enum dpio_channel ch = vlv_dport_to_channel(dport);
@@ -400,7 +400,7 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
        enum dpio_channel port = vlv_dport_to_channel(dport);
@@ -429,7 +429,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
@@ -457,7 +457,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
 void vlv_phy_reset_lanes(struct intel_encoder *encoder)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
        enum dpio_channel port = vlv_dport_to_channel(dport);
index c0eff1571731f55dd303ad02416623e1b70b69ca..5c1f2d235ffa5a66baf029f116d8ba8b65a5d8ac 100644 (file)
@@ -83,7 +83,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 void intel_prepare_shared_dpll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_shared_dpll *pll = crtc->config->shared_dpll;
 
        if (WARN_ON(pll == NULL))
@@ -112,7 +112,7 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
 void intel_enable_shared_dpll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_shared_dpll *pll = crtc->config->shared_dpll;
        unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
        unsigned old_mask;
@@ -151,7 +151,7 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
 void intel_disable_shared_dpll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_shared_dpll *pll = crtc->config->shared_dpll;
        unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
 
@@ -191,7 +191,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
                       enum intel_dpll_id range_min,
                       enum intel_dpll_id range_max)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_shared_dpll *pll;
        struct intel_shared_dpll_config *shared_dpll;
        enum intel_dpll_id i;
@@ -331,7 +331,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
                                 struct intel_shared_dpll *pll)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *crtc;
 
        /* Make sure no transcoder isn't still depending on us. */
@@ -713,7 +713,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                pll = intel_find_shared_dpll(crtc, crtc_state,
                                             DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
 
-       } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+       } else if (encoder->type == INTEL_OUTPUT_DP ||
                   encoder->type == INTEL_OUTPUT_DP_MST ||
                   encoder->type == INTEL_OUTPUT_EDP) {
                enum intel_dpll_id pll_id;
@@ -856,7 +856,11 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
        I915_WRITE(regs[pll->id].ctl,
                   I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
 
-       if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5))
+       if (intel_wait_for_register(dev_priv,
+                                   DPLL_STATUS,
+                                   DPLL_LOCK(pll->id),
+                                   DPLL_LOCK(pll->id),
+                                   5))
                DRM_ERROR("DPLL %d not locked\n", pll->id);
 }
 
@@ -1222,7 +1226,7 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                         DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
                         DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
                         wrpll_params.central_freq;
-       } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+       } else if (encoder->type == INTEL_OUTPUT_DP ||
                   encoder->type == INTEL_OUTPUT_DP_MST ||
                   encoder->type == INTEL_OUTPUT_EDP) {
                switch (crtc_state->port_clock / 2) {
@@ -1374,8 +1378,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
        I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
        POSTING_READ(BXT_PORT_PLL_ENABLE(port));
 
-       if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
-                       PORT_PLL_LOCK), 200))
+       if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
+                       200))
                DRM_ERROR("PLL %d not locked\n", port);
 
        /*
@@ -1530,7 +1534,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                clk_div.m2_frac_en = clk_div.m2_frac != 0;
 
                vco = best_clock.vco;
-       } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+       } else if (encoder->type == INTEL_OUTPUT_DP ||
                   encoder->type == INTEL_OUTPUT_EDP) {
                int i;
 
@@ -1632,7 +1636,7 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
 
 static void intel_ddi_pll_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_GEN(dev_priv) < 9) {
                uint32_t val = I915_READ(LCPLL_CTL);
@@ -1719,7 +1723,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
 
 void intel_shared_dpll_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const struct intel_dpll_mgr *dpll_mgr = NULL;
        const struct dpll_info *dpll_info;
        int i;
index 7d0e071fe3553cf185e42c846c1181d4a13cd0ee..55aeaf0417498bcfc42ca2b15d56964516b449c4 100644 (file)
 })
 
 #define wait_for(COND, MS)             _wait_for((COND), (MS) * 1000, 1000)
-#define wait_for_us(COND, US)          _wait_for((COND), (US), 1)
 
 /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
 #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
-# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic())
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
 #else
-# define _WAIT_FOR_ATOMIC_CHECK do { } while (0)
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
 #endif
 
-#define _wait_for_atomic(COND, US) ({ \
-       unsigned long end__; \
-       int ret__ = 0; \
-       _WAIT_FOR_ATOMIC_CHECK; \
+#define _wait_for_atomic(COND, US, ATOMIC) \
+({ \
+       int cpu, ret, timeout = (US) * 1000; \
+       u64 base; \
+       _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
        BUILD_BUG_ON((US) > 50000); \
-       end__ = (local_clock() >> 10) + (US) + 1; \
-       while (!(COND)) { \
-               if (time_after((unsigned long)(local_clock() >> 10), end__)) { \
-                       /* Unlike the regular wait_for(), this atomic variant \
-                        * cannot be preempted (and we'll just ignore the issue\
-                        * of irq interruptions) and so we know that no time \
-                        * has passed since the last check of COND and can \
-                        * immediately report the timeout. \
-                        */ \
-                       ret__ = -ETIMEDOUT; \
+       if (!(ATOMIC)) { \
+               preempt_disable(); \
+               cpu = smp_processor_id(); \
+       } \
+       base = local_clock(); \
+       for (;;) { \
+               u64 now = local_clock(); \
+               if (!(ATOMIC)) \
+                       preempt_enable(); \
+               if (COND) { \
+                       ret = 0; \
+                       break; \
+               } \
+               if (now - base >= timeout) { \
+                       ret = -ETIMEDOUT; \
                        break; \
                } \
                cpu_relax(); \
+               if (!(ATOMIC)) { \
+                       preempt_disable(); \
+                       if (unlikely(cpu != smp_processor_id())) { \
+                               timeout -= now - base; \
+                               cpu = smp_processor_id(); \
+                               base = local_clock(); \
+                       } \
+               } \
        } \
+       ret; \
+})
+
+#define wait_for_us(COND, US) \
+({ \
+       int ret__; \
+       BUILD_BUG_ON(!__builtin_constant_p(US)); \
+       if ((US) > 10) \
+               ret__ = _wait_for((COND), (US), 10); \
+       else \
+               ret__ = _wait_for_atomic((COND), (US), 0); \
        ret__; \
 })
 
-#define wait_for_atomic(COND, MS)      _wait_for_atomic((COND), (MS) * 1000)
-#define wait_for_atomic_us(COND, US)   _wait_for_atomic((COND), (US))
+#define wait_for_atomic(COND, MS)      _wait_for_atomic((COND), (MS) * 1000, 1)
+#define wait_for_atomic_us(COND, US)   _wait_for_atomic((COND), (US), 1)
 
 #define KHz(x) (1000 * (x))
 #define MHz(x) KHz(1000 * (x))
@@ -135,7 +159,7 @@ enum intel_output_type {
        INTEL_OUTPUT_LVDS = 4,
        INTEL_OUTPUT_TVOUT = 5,
        INTEL_OUTPUT_HDMI = 6,
-       INTEL_OUTPUT_DISPLAYPORT = 7,
+       INTEL_OUTPUT_DP = 7,
        INTEL_OUTPUT_EDP = 8,
        INTEL_OUTPUT_DSI = 9,
        INTEL_OUTPUT_UNKNOWN = 10,
@@ -159,6 +183,7 @@ struct intel_framebuffer {
 struct intel_fbdev {
        struct drm_fb_helper helper;
        struct intel_framebuffer *fb;
+       async_cookie_t cookie;
        int preferred_bpp;
 };
 
@@ -497,12 +522,10 @@ struct intel_crtc_state {
         */
        bool limited_color_range;
 
-       /* DP has a bunch of special case unfortunately, so mark the pipe
-        * accordingly. */
-       bool has_dp_encoder;
-
-       /* DSI has special cases */
-       bool has_dsi_encoder;
+       /* Bitmask of encoder types (enum intel_output_type)
+        * driven by the pipe.
+        */
+       unsigned int output_types;
 
        /* Whether we should send NULL infoframes. Required for audio. */
        bool has_hdmi_sink;
@@ -861,6 +884,11 @@ struct intel_dp {
         * this port. Only relevant on VLV/CHV.
         */
        enum pipe pps_pipe;
+       /*
+        * Set if the sequencer may be reset due to a power transition,
+        * requiring a reinitialization. Only relevant on BXT.
+        */
+       bool pps_reset;
        struct edp_power_seq pps_delays;
 
        bool can_mst; /* this port supports mst */
@@ -886,8 +914,6 @@ struct intel_dp {
        /* This is called before a link training is starterd */
        void (*prepare_link_retrain)(struct intel_dp *intel_dp);
 
-       bool train_set_valid;
-
        /* Displayport compliance testing */
        unsigned long compliance_test_type;
        unsigned long compliance_test_data;
@@ -959,14 +985,14 @@ vlv_pipe_to_channel(enum pipe pipe)
 static inline struct drm_crtc *
 intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        return dev_priv->pipe_to_crtc_mapping[pipe];
 }
 
 static inline struct drm_crtc *
 intel_get_crtc_for_plane(struct drm_device *dev, int plane)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        return dev_priv->plane_to_crtc_mapping[plane];
 }
 
@@ -1159,7 +1185,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
                                             enum pipe pipe);
-bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
+static inline bool
+intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
+                   enum intel_output_type type)
+{
+       return crtc_state->output_types & (1 << type);
+}
+static inline bool
+intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
+{
+       return crtc_state->output_types &
+               ((1 << INTEL_OUTPUT_DP) |
+                (1 << INTEL_OUTPUT_DP_MST) |
+                (1 << INTEL_OUTPUT_EDP));
+}
 static inline void
 intel_wait_for_vblank(struct drm_device *dev, int pipe)
 {
@@ -1340,7 +1379,7 @@ void intel_dp_mst_resume(struct drm_device *dev);
 int intel_dp_max_link_rate(struct intel_dp *intel_dp);
 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
 void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
-void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
+void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
 void intel_plane_destroy(struct drm_plane *plane);
 void intel_edp_drrs_enable(struct intel_dp *intel_dp);
@@ -1453,6 +1492,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
 
 /* intel_lvds.c */
 void intel_lvds_init(struct drm_device *dev);
+struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
 bool intel_is_dual_link_lvds(struct drm_device *dev);
 
 
@@ -1491,7 +1531,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
                              int fitting_mode);
 void intel_panel_set_backlight_acpi(struct intel_connector *connector,
                                    u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
+int intel_panel_setup_backlight(struct drm_connector *connector,
+                               enum pipe pipe);
 void intel_panel_enable_backlight(struct intel_connector *connector);
 void intel_panel_disable_backlight(struct intel_connector *connector);
 void intel_panel_destroy_backlight(struct drm_connector *connector);
@@ -1500,11 +1541,15 @@ extern struct drm_display_mode *intel_find_panel_downclock(
                                struct drm_device *dev,
                                struct drm_display_mode *fixed_mode,
                                struct drm_connector *connector);
-void intel_backlight_register(struct drm_device *dev);
 
 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int intel_backlight_device_register(struct intel_connector *connector);
 void intel_backlight_device_unregister(struct intel_connector *connector);
 #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+       return 0;
+}
 static inline void intel_backlight_device_unregister(struct intel_connector *connector)
 {
 }
index b444d0e35a986496fc586b33659ca034c61b03f7..de8e9fb51595b984780984ba29408c8ed0efee6b 100644 (file)
@@ -84,13 +84,15 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
 {
        struct drm_encoder *encoder = &intel_dsi->base.base;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 mask;
 
        mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
                LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
 
-       if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
+       if (intel_wait_for_register(dev_priv,
+                                   MIPI_GEN_FIFO_STAT(port), mask, mask,
+                                   100))
                DRM_ERROR("DPI FIFOs are not empty\n");
 }
 
@@ -129,7 +131,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
 {
        struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
        struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dsi_host->port;
        struct mipi_dsi_packet packet;
        ssize_t ret;
@@ -158,8 +160,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
 
        /* note: this is never true for reads */
        if (packet.payload_length) {
-
-               if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50))
+               if (intel_wait_for_register(dev_priv,
+                                           MIPI_GEN_FIFO_STAT(port),
+                                           data_mask, 0,
+                                           50))
                        DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
 
                write_data(dev_priv, data_reg, packet.payload,
@@ -170,7 +174,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
                I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
        }
 
-       if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) {
+       if (intel_wait_for_register(dev_priv,
+                                   MIPI_GEN_FIFO_STAT(port),
+                                   ctrl_mask, 0,
+                                   50)) {
                DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
        }
 
@@ -179,7 +186,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
        /* ->rx_len is set only for reads */
        if (msg->rx_len) {
                data_mask = GEN_READ_DATA_AVAIL;
-               if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50))
+               if (intel_wait_for_register(dev_priv,
+                                           MIPI_INTR_STAT(port),
+                                           data_mask, data_mask,
+                                           50))
                        DRM_ERROR("Timeout waiting for read data.\n");
 
                read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
@@ -250,7 +260,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
 {
        struct drm_encoder *encoder = &intel_dsi->base.base;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 mask;
 
        /* XXX: pipe, hs */
@@ -269,7 +279,9 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
        I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
 
        mask = SPL_PKT_SENT_INTERRUPT;
-       if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100))
+       if (intel_wait_for_register(dev_priv,
+                                   MIPI_INTR_STAT(port), mask, mask,
+                                   100))
                DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
 
        return 0;
@@ -302,7 +314,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
                                                   base);
        struct intel_connector *intel_connector = intel_dsi->attached_connector;
@@ -313,8 +325,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
 
        DRM_DEBUG_KMS("\n");
 
-       pipe_config->has_dsi_encoder = true;
-
        if (fixed_mode) {
                intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
@@ -348,7 +358,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
 
 static void bxt_dsi_device_ready(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
        u32 val;
@@ -387,7 +397,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
 
 static void vlv_dsi_device_ready(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
        u32 val;
@@ -437,7 +447,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
 static void intel_dsi_port_enable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
@@ -478,7 +488,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
 static void intel_dsi_port_disable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
 
@@ -497,7 +507,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
 static void intel_dsi_enable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
 
@@ -528,7 +538,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
 static void intel_dsi_pre_enable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        enum port port;
@@ -602,7 +612,7 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder)
 static void intel_dsi_disable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
        u32 temp;
@@ -641,7 +651,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
 static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
 
@@ -667,8 +677,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
                /* Wait till Clock lanes are in LP-00 state for MIPI Port A
                 * only. MIPI Port C has no similar bit for checking
                 */
-               if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT)
-                                               == 0x00000), 30))
+               if (intel_wait_for_register(dev_priv,
+                                           port_ctrl, AFE_LATCHOUT, 0,
+                                           30))
                        DRM_ERROR("DSI LP not going Low\n");
 
                /* Disable MIPI PHY transparent latch */
@@ -685,7 +696,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 
 static void intel_dsi_post_disable(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 
        DRM_DEBUG_KMS("\n");
@@ -720,7 +731,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
        enum intel_display_power_domain power_domain;
@@ -794,7 +805,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
                                 struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_display_mode *adjusted_mode =
                                        &pipe_config->base.adjusted_mode;
        struct drm_display_mode *adjusted_mode_sw;
@@ -954,8 +965,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
        u32 pclk;
        DRM_DEBUG_KMS("\n");
 
-       pipe_config->has_dsi_encoder = true;
-
        if (IS_BROXTON(dev))
                bxt_dsi_get_pipe_config(encoder, pipe_config);
 
@@ -1013,7 +1022,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
                            const struct drm_display_mode *adjusted_mode)
 {
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
        enum port port;
        unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1099,7 +1108,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
        const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -1390,6 +1399,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
 static const struct drm_connector_funcs intel_dsi_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_dsi_detect,
+       .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
        .destroy = intel_dsi_connector_destroy,
        .fill_modes = drm_helper_probe_single_connector_modes,
@@ -1420,7 +1430,7 @@ void intel_dsi_init(struct drm_device *dev)
        struct intel_connector *intel_connector;
        struct drm_connector *connector;
        struct drm_display_mode *scan, *fixed_mode = NULL;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port;
        unsigned int i;
 
@@ -1587,13 +1597,10 @@ void intel_dsi_init(struct drm_device *dev)
        connector->display_info.height_mm = fixed_mode->height_mm;
 
        intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+       intel_panel_setup_backlight(connector, INVALID_PIPE);
 
        intel_dsi_add_properties(intel_connector);
 
-       drm_connector_register(connector);
-
-       intel_panel_setup_backlight(connector, INVALID_PIPE);
-
        return;
 
 err:
index f0dc427743f869f2ba63c8fef892607b851633d1..ac7c6020c443f4e118670800c9281e623bd33c31 100644 (file)
@@ -159,7 +159,7 @@ static int dcs_setup_backlight(struct intel_connector *connector,
 int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
 {
        struct drm_device *dev = intel_connector->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder = intel_connector->encoder;
        struct intel_panel *panel = &intel_connector->panel;
 
index f122484bedfc2fe5b860c55fa4a102635dd49171..cd154ce6b6c1ed8edad276dec2477532bbf52415 100644 (file)
@@ -303,7 +303,7 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
 static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 {
        struct drm_device *dev = intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u8 gpio_source, gpio_index;
        bool value;
 
@@ -469,7 +469,7 @@ static int vbt_panel_get_modes(struct drm_panel *panel)
        struct vbt_panel *vbt_panel = to_vbt_panel(panel);
        struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
        struct drm_device *dev = intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_display_mode *mode;
 
        if (!panel->connector)
@@ -497,7 +497,7 @@ static const struct drm_panel_funcs vbt_panel_funcs = {
 struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
 {
        struct drm_device *dev = intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
        struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
        struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
@@ -649,14 +649,13 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
                                );
 
        /*
-        * Exit zero  is unified val ths_zero and ths_exit
+        * Exit zero is unified val ths_zero and ths_exit
         * minimum value for ths_exit = 110ns
         * min (exit_zero_cnt * 2) = 110/UI
         * exit_zero_cnt = 55/UI
         */
-        if (exit_zero_cnt < (55 * ui_den / ui_num))
-               if ((55 * ui_den) % ui_num)
-                       exit_zero_cnt += 1;
+       if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
+               exit_zero_cnt += 1;
 
        /* clk zero count */
        clk_zero_cnt = DIV_ROUND_UP(
index 1765e6e18f2c6e8aa2dfb2ac4d8f08fb9983dc40..6ab58a01b18e2ee0cb20345aac39208c020d7e5b 100644 (file)
@@ -55,12 +55,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
                        struct intel_crtc_state *config,
                        int target_dsi_clk)
 {
-       unsigned int calc_m = 0, calc_p = 0;
        unsigned int m_min, m_max, p_min = 2, p_max = 6;
        unsigned int m, n, p;
-       int ref_clk;
-       int delta = target_dsi_clk;
-       u32 m_seed;
+       unsigned int calc_m, calc_p;
+       int delta, ref_clk;
 
        /* target_dsi_clk is expected in kHz */
        if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
@@ -80,6 +78,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
                m_max = 92;
        }
 
+       calc_p = p_min;
+       calc_m = m_min;
+       delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n));
+
        for (m = m_min; m <= m_max && delta; m++) {
                for (p = p_min; p <= p_max && delta; p++) {
                        /*
@@ -97,11 +99,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
        }
 
        /* register has log2(N1), this works fine for powers of two */
-       n = ffs(n) - 1;
-       m_seed = lfsr_converts[calc_m - 62];
        config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
-       config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT |
-               m_seed << DSI_PLL_M1_DIV_SHIFT;
+       config->dsi_pll.div =
+               (ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT |
+               (u32)lfsr_converts[calc_m - 62] << DSI_PLL_M1_DIV_SHIFT;
 
        return 0;
 }
@@ -113,7 +114,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
 static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
                               struct intel_crtc_state *config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        int ret;
        u32 dsi_clk;
@@ -234,8 +235,11 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
         * PLL lock should deassert within 200us.
         * Wait up to 1ms before timing out.
         */
-       if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE)
-                                       & BXT_DSI_PLL_LOCKED) == 0, 1))
+       if (intel_wait_for_register(dev_priv,
+                                   BXT_DSI_PLL_ENABLE,
+                                   BXT_DSI_PLL_LOCKED,
+                                   0,
+                                   1))
                DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
 }
 
@@ -321,7 +325,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
        u32 dsi_clk;
        u32 dsi_ratio;
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
        /* Divide by zero */
        if (!pipe_bpp) {
@@ -356,7 +360,7 @@ u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
 static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 {
        u32 temp;
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 
        temp = I915_READ(MIPI_CTRL(port));
@@ -370,7 +374,7 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
                                   const struct intel_crtc_state *config)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tmp;
        u32 dsi_rate = 0;
        u32 pll_ratio = 0;
@@ -465,7 +469,7 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
 static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
                               const struct intel_crtc_state *config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
        u32 val;
@@ -486,7 +490,11 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
        I915_WRITE(BXT_DSI_PLL_ENABLE, val);
 
        /* Timeout and fail if PLL not locked */
-       if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) {
+       if (intel_wait_for_register(dev_priv,
+                                   BXT_DSI_PLL_ENABLE,
+                                   BXT_DSI_PLL_LOCKED,
+                                   BXT_DSI_PLL_LOCKED,
+                                   1)) {
                DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
                return;
        }
@@ -542,7 +550,7 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 {
        u32 tmp;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Clear old configurations */
        tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
index 60e4ddf2ec6d2efe80f8894e9d7ac94467d02ae5..47bdf9dad0d37c093a1b22099c4db0a5099be552 100644 (file)
@@ -122,7 +122,7 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
 static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
 {
        struct drm_device *dev = connector->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
        u32 tmp;
 
@@ -138,7 +138,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        u32 tmp;
 
@@ -155,7 +155,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
 static void intel_dvo_get_config(struct intel_encoder *encoder,
                                 struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        u32 tmp, flags = 0;
 
@@ -176,7 +176,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
 
 static void intel_disable_dvo(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
        u32 temp = I915_READ(dvo_reg);
@@ -188,7 +188,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
 
 static void intel_enable_dvo(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
@@ -256,7 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
 static void intel_dvo_pre_enable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -305,7 +305,7 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
 
 static int intel_dvo_get_modes(struct drm_connector *connector)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        const struct drm_display_mode *fixed_mode =
                to_intel_connector(connector)->panel.fixed_mode;
 
@@ -341,6 +341,7 @@ static void intel_dvo_destroy(struct drm_connector *connector)
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_dvo_detect,
+       .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
        .destroy = intel_dvo_destroy,
        .fill_modes = drm_helper_probe_single_connector_modes,
@@ -378,7 +379,7 @@ static struct drm_display_mode *
 intel_dvo_get_current_mode(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
        uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
        struct drm_display_mode *mode = NULL;
@@ -420,7 +421,7 @@ static char intel_dvo_port_name(i915_reg_t dvo_reg)
 
 void intel_dvo_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *intel_encoder;
        struct intel_dvo *intel_dvo;
        struct intel_connector *intel_connector;
@@ -550,7 +551,6 @@ void intel_dvo_init(struct drm_device *dev)
                        intel_dvo->panel_wants_dither = true;
                }
 
-               drm_connector_register(connector);
                return;
        }
 
index ecabd59ffbaf2b67df06832e945801fdc7385ebb..6a7ad3ed1463206c7b1b88e658e3103e7c58526e 100644 (file)
@@ -124,7 +124,9 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 
        /* Wait for compressing bit to clear */
-       if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+       if (intel_wait_for_register(dev_priv,
+                                   FBC_STATUS, FBC_STAT_COMPRESSING, 0,
+                                   10)) {
                DRM_DEBUG_KMS("FBC idle timed out\n");
                return;
        }
@@ -390,7 +392,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_work *work = &fbc->work;
        struct intel_crtc *crtc = fbc->crtc;
-       struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
+       struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
 
        if (drm_crtc_vblank_get(&crtc->base)) {
                DRM_ERROR("vblank not available for FBC on pipe %c\n",
@@ -443,7 +445,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
 
 static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_work *work = &fbc->work;
 
@@ -553,7 +555,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
 
 static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct drm_mm_node *uninitialized_var(compressed_llb);
        int size, fb_cpp, ret;
@@ -684,7 +686,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
  */
 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        unsigned int effective_w, effective_h, max_w, max_h;
 
@@ -711,7 +713,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                                         struct intel_crtc_state *crtc_state,
                                         struct intel_plane_state *plane_state)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_state_cache *cache = &fbc->state_cache;
        struct drm_framebuffer *fb = plane_state->base.fb;
@@ -744,7 +746,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_state_cache *cache = &fbc->state_cache;
 
@@ -816,23 +818,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 
 static bool intel_fbc_can_choose(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
-       bool enable_by_default = IS_HASWELL(dev_priv) ||
-                                IS_BROADWELL(dev_priv);
 
        if (intel_vgpu_active(dev_priv)) {
                fbc->no_fbc_reason = "VGPU is active";
                return false;
        }
 
-       if (i915.enable_fbc < 0 && !enable_by_default) {
-               fbc->no_fbc_reason = "disabled per chip default";
-               return false;
-       }
-
        if (!i915.enable_fbc) {
-               fbc->no_fbc_reason = "disabled per module param";
+               fbc->no_fbc_reason = "disabled per module param or by default";
                return false;
        }
 
@@ -852,7 +847,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
 static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
                                     struct intel_fbc_reg_params *params)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_state_cache *cache = &fbc->state_cache;
 
@@ -885,7 +880,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
                          struct intel_crtc_state *crtc_state,
                          struct intel_plane_state *plane_state)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
 
        if (!fbc_supported(dev_priv))
@@ -911,7 +906,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
 
 static void __intel_fbc_post_update(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_reg_params old_params;
 
@@ -944,7 +939,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
 
 void intel_fbc_post_update(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
 
        if (!fbc_supported(dev_priv))
@@ -993,13 +988,13 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
        if (!fbc_supported(dev_priv))
                return;
 
-       if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
-               return;
-
        mutex_lock(&fbc->lock);
 
        fbc->busy_bits &= ~frontbuffer_bits;
 
+       if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
+               goto out;
+
        if (!fbc->busy_bits && fbc->enabled &&
            (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
                if (fbc->active)
@@ -1008,6 +1003,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
                        __intel_fbc_post_update(fbc->crtc);
        }
 
+out:
        mutex_unlock(&fbc->lock);
 }
 
@@ -1089,7 +1085,7 @@ void intel_fbc_enable(struct intel_crtc *crtc,
                      struct intel_crtc_state *crtc_state,
                      struct intel_plane_state *plane_state)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
 
        if (!fbc_supported(dev_priv))
@@ -1160,7 +1156,7 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
  */
 void intel_fbc_disable(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
 
        if (!fbc_supported(dev_priv))
@@ -1214,12 +1210,32 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
        if (!no_fbc_on_multiple_pipes(dev_priv))
                return;
 
-       for_each_intel_crtc(dev_priv->dev, crtc)
+       for_each_intel_crtc(&dev_priv->drm, crtc)
                if (intel_crtc_active(&crtc->base) &&
                    to_intel_plane_state(crtc->base.primary->state)->visible)
                        dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
 }
 
+/*
+ * The DDX driver changes its behavior depending on the value it reads from
+ * i915.enable_fbc, so sanitize it by translating the default value into either
+ * 0 or 1 in order to allow it to know what's going on.
+ *
+ * Notice that this is done at driver initialization and we still allow user
+ * space to change the value during runtime without sanitizing it again. IGT
+ * relies on being able to change i915.enable_fbc at runtime.
+ */
+static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
+{
+       if (i915.enable_fbc >= 0)
+               return !!i915.enable_fbc;
+
+       if (IS_BROADWELL(dev_priv))
+               return 1;
+
+       return 0;
+}
+
 /**
  * intel_fbc_init - Initialize FBC
  * @dev_priv: the i915 device
@@ -1237,6 +1253,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
        fbc->active = false;
        fbc->work.scheduled = false;
 
+       i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
+       DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
+
        if (!HAS_FBC(dev_priv)) {
                fbc->no_fbc_reason = "unsupported by this chipset";
                return;
index 4babefc51eb2da6a29b528edc07fd0a37c2ffa28..86b00c6db1a6d694ad1beb97fcb37fc2cb899873 100644 (file)
@@ -362,23 +362,24 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                                    bool *enabled, int width, int height)
 {
        struct drm_device *dev = fb_helper->dev;
+       unsigned long conn_configured, mask;
+       unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
        int i, j;
        bool *save_enabled;
        bool fallback = true;
        int num_connectors_enabled = 0;
        int num_connectors_detected = 0;
-       uint64_t conn_configured = 0, mask;
        int pass = 0;
 
-       save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool),
-                              GFP_KERNEL);
+       save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
        if (!save_enabled)
                return false;
 
-       memcpy(save_enabled, enabled, fb_helper->connector_count);
-       mask = (1 << fb_helper->connector_count) - 1;
+       memcpy(save_enabled, enabled, count);
+       mask = BIT(count) - 1;
+       conn_configured = 0;
 retry:
-       for (i = 0; i < fb_helper->connector_count; i++) {
+       for (i = 0; i < count; i++) {
                struct drm_fb_helper_connector *fb_conn;
                struct drm_connector *connector;
                struct drm_encoder *encoder;
@@ -388,7 +389,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                fb_conn = fb_helper->connector_info[i];
                connector = fb_conn->connector;
 
-               if (conn_configured & (1 << i))
+               if (conn_configured & BIT(i))
                        continue;
 
                if (pass == 0 && !connector->has_tile)
@@ -400,7 +401,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                if (!enabled[i]) {
                        DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
                                      connector->name);
-                       conn_configured |= (1 << i);
+                       conn_configured |= BIT(i);
                        continue;
                }
 
@@ -419,7 +420,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                        DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
                                      connector->name);
                        enabled[i] = false;
-                       conn_configured |= (1 << i);
+                       conn_configured |= BIT(i);
                        continue;
                }
 
@@ -432,14 +433,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                        intel_crtc->lut_b[j] = j;
                }
 
-               new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
+               new_crtc = intel_fb_helper_crtc(fb_helper,
+                                               connector->state->crtc);
 
                /*
                 * Make sure we're not trying to drive multiple connectors
                 * with a single CRTC, since our cloning support may not
                 * match the BIOS.
                 */
-               for (j = 0; j < fb_helper->connector_count; j++) {
+               for (j = 0; j < count; j++) {
                        if (crtcs[j] == new_crtc) {
                                DRM_DEBUG_KMS("fallback: cloned configuration\n");
                                goto bail;
@@ -498,7 +500,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                              modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
 
                fallback = false;
-               conn_configured |= (1 << i);
+               conn_configured |= BIT(i);
        }
 
        if ((conn_configured & mask) != mask) {
@@ -522,7 +524,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
        if (fallback) {
 bail:
                DRM_DEBUG_KMS("Not using firmware configuration\n");
-               memcpy(enabled, save_enabled, fb_helper->connector_count);
+               memcpy(enabled, save_enabled, count);
                kfree(save_enabled);
                return false;
        }
@@ -538,8 +540,7 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
        .fb_probe = intelfb_create,
 };
 
-static void intel_fbdev_destroy(struct drm_device *dev,
-                               struct intel_fbdev *ifbdev)
+static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
 {
        /* We rely on the object-free to release the VMA pinning for
         * the info->screen_base mmaping. Leaking the VMA is simpler than
@@ -552,12 +553,14 @@ static void intel_fbdev_destroy(struct drm_device *dev,
        drm_fb_helper_fini(&ifbdev->helper);
 
        if (ifbdev->fb) {
-               mutex_lock(&dev->struct_mutex);
+               mutex_lock(&ifbdev->helper.dev->struct_mutex);
                intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
-               mutex_unlock(&dev->struct_mutex);
+               mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 
                drm_framebuffer_remove(&ifbdev->fb->base);
        }
+
+       kfree(ifbdev);
 }
 
 /*
@@ -690,9 +693,9 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 
 static void intel_fbdev_suspend_worker(struct work_struct *work)
 {
-       intel_fbdev_set_suspend(container_of(work,
-                                            struct drm_i915_private,
-                                            fbdev_suspend_work)->dev,
+       intel_fbdev_set_suspend(&container_of(work,
+                                             struct drm_i915_private,
+                                             fbdev_suspend_work)->drm,
                                FBINFO_STATE_RUNNING,
                                true);
 }
@@ -700,7 +703,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
 int intel_fbdev_init(struct drm_device *dev)
 {
        struct intel_fbdev *ifbdev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
@@ -732,38 +735,50 @@ int intel_fbdev_init(struct drm_device *dev)
 
 static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
 {
-       struct drm_i915_private *dev_priv = data;
-       struct intel_fbdev *ifbdev = dev_priv->fbdev;
+       struct intel_fbdev *ifbdev = data;
 
        /* Due to peculiar init order wrt to hpd handling this is separate. */
        if (drm_fb_helper_initial_config(&ifbdev->helper,
                                         ifbdev->preferred_bpp))
-               intel_fbdev_fini(dev_priv->dev);
+               intel_fbdev_fini(ifbdev->helper.dev);
 }
 
 void intel_fbdev_initial_config_async(struct drm_device *dev)
 {
-       async_schedule(intel_fbdev_initial_config, to_i915(dev));
+       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+       ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
+}
+
+static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
+{
+       if (!ifbdev->cookie)
+               return;
+
+       /* Only serialises with all preceding async calls, hence +1 */
+       async_synchronize_cookie(ifbdev->cookie + 1);
+       ifbdev->cookie = 0;
 }
 
 void intel_fbdev_fini(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       if (!dev_priv->fbdev)
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_fbdev *ifbdev = dev_priv->fbdev;
+
+       if (!ifbdev)
                return;
 
        flush_work(&dev_priv->fbdev_suspend_work);
-
        if (!current_is_async())
-               async_synchronize_full();
-       intel_fbdev_destroy(dev, dev_priv->fbdev);
-       kfree(dev_priv->fbdev);
+               intel_fbdev_sync(ifbdev);
+
+       intel_fbdev_destroy(ifbdev);
        dev_priv->fbdev = NULL;
 }
 
 void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_fbdev *ifbdev = dev_priv->fbdev;
        struct fb_info *info;
 
@@ -812,7 +827,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
 
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        if (dev_priv->fbdev)
                drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
 }
@@ -820,13 +835,15 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
 void intel_fbdev_restore_mode(struct drm_device *dev)
 {
        int ret;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_fbdev *ifbdev = dev_priv->fbdev;
        struct drm_fb_helper *fb_helper;
 
        if (!ifbdev)
                return;
 
+       intel_fbdev_sync(ifbdev);
+
        fb_helper = &ifbdev->helper;
 
        ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
index 9be839a242f942752ea44c1278a036c0f94f9443..2aa744081f090248bdfc721703f2d3da3692c3cc 100644 (file)
@@ -50,7 +50,7 @@
 
 static bool ivb_can_enable_err_int(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc;
        enum pipe pipe;
 
@@ -68,7 +68,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
 
 static bool cpt_can_enable_serr_int(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
        struct intel_crtc *crtc;
 
@@ -105,7 +105,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
                                             enum pipe pipe,
                                             bool enable, bool old)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t reg = PIPESTAT(pipe);
        u32 pipestat = I915_READ(reg) & 0xffff0000;
 
@@ -123,7 +123,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
                                                 enum pipe pipe, bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
                                          DE_PIPEB_FIFO_UNDERRUN;
 
@@ -154,7 +154,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
                                                  enum pipe pipe,
                                                  bool enable, bool old)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        if (enable) {
                I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
 
@@ -176,7 +176,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
                                                  enum pipe pipe, bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (enable)
                bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
@@ -188,7 +188,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
                                            enum transcoder pch_transcoder,
                                            bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
                       SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
 
@@ -220,7 +220,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
                                            enum transcoder pch_transcoder,
                                            bool enable, bool old)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (enable) {
                I915_WRITE(SERR_INT,
@@ -244,7 +244,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
                                                    enum pipe pipe, bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        bool old;
@@ -289,7 +289,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
        bool ret;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
+       ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
                                                      enable);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -334,10 +334,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
        intel_crtc->pch_fifo_underrun_disabled = !enable;
 
        if (HAS_PCH_IBX(dev_priv))
-               ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+               ibx_set_fifo_underrun_reporting(&dev_priv->drm,
+                                               pch_transcoder,
                                                enable);
        else
-               cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+               cpt_set_fifo_underrun_reporting(&dev_priv->drm,
+                                               pch_transcoder,
                                                enable, old);
 
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -405,7 +407,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
 
        spin_lock_irq(&dev_priv->irq_lock);
 
-       for_each_intel_crtc(dev_priv->dev, crtc) {
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
                if (crtc->cpu_fifo_underrun_disabled)
                        continue;
 
@@ -432,7 +434,7 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
 
        spin_lock_irq(&dev_priv->irq_lock);
 
-       for_each_intel_crtc(dev_priv->dev, crtc) {
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
                if (crtc->pch_fifo_underrun_disabled)
                        continue;
 
index 4df80cc9a29168fea10cc79e3531e80efd2ab9f5..3e3e743740c01a2651850836f4fee970ea294b42 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "intel_guc_fwif.h"
 #include "i915_guc_reg.h"
+#include "intel_ringbuffer.h"
 
 struct drm_i915_gem_request;
 
@@ -86,7 +87,7 @@ struct i915_guc_client {
        int retcode;
 
        /* Per-engine counts of GuC submissions */
-       uint64_t submissions[GUC_MAX_ENGINES_NUM];
+       uint64_t submissions[I915_NUM_ENGINES];
 };
 
 enum intel_guc_fw_status {
@@ -143,8 +144,8 @@ struct intel_guc {
        uint32_t action_fail;           /* Total number of failures     */
        int32_t action_err;             /* Last error code              */
 
-       uint64_t submissions[GUC_MAX_ENGINES_NUM];
-       uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
+       uint64_t submissions[I915_NUM_ENGINES];
+       uint32_t last_seqno[I915_NUM_ENGINES];
 };
 
 /* intel_guc_loader.c */
index 8fe96a2d989e3ce645913f37d3d2a6dfe6faa2ad..605c69658d2c1c55b2408b0bc36dd6c41d26d667 100644 (file)
@@ -65,6 +65,9 @@ MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
 #define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
 MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
 
+#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin"
+MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
+
 /* User-friendly representation of an enum */
 const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
 {
@@ -87,7 +90,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
        struct intel_engine_cs *engine;
        int irqs;
 
-       /* tell all command streamers NOT to forward interrupts and vblank to GuC */
+       /* tell all command streamers NOT to forward interrupts or vblank to GuC */
        irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
        irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
        for_each_engine(engine, dev_priv)
@@ -105,9 +108,8 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
        int irqs;
        u32 tmp;
 
-       /* tell all command streamers to forward interrupts and vblank to GuC */
-       irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
-       irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
+       /* tell all command streamers to forward interrupts (but not vblank) to GuC */
+       irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
        for_each_engine(engine, dev_priv)
                I915_WRITE(RING_MODE_GEN7(engine), irqs);
 
@@ -312,7 +314,7 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
 static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 {
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        int ret;
 
        ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
@@ -411,7 +413,7 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv)
  */
 int intel_guc_setup(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
        const char *fw_path = guc_fw->guc_fw_path;
        int retries, ret, err;
@@ -606,7 +608,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 
        /* Header and uCode will be loaded to WOPCM. Size of the two. */
        size = guc_fw->header_size + guc_fw->ucode_size;
-       if (size > guc_wopcm_size(dev->dev_private)) {
+       if (size > guc_wopcm_size(to_i915(dev))) {
                DRM_ERROR("Firmware is too large to fit in WOPCM\n");
                goto fail;
        }
@@ -679,7 +681,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
  */
 void intel_guc_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
        const char *fw_path;
 
@@ -699,6 +701,10 @@ void intel_guc_init(struct drm_device *dev)
                fw_path = I915_BXT_GUC_UCODE;
                guc_fw->guc_fw_major_wanted = 8;
                guc_fw->guc_fw_minor_wanted = 7;
+       } else if (IS_KABYLAKE(dev)) {
+               fw_path = I915_KBL_GUC_UCODE;
+               guc_fw->guc_fw_major_wanted = 9;
+               guc_fw->guc_fw_minor_wanted = 14;
        } else {
                fw_path = "";   /* unknown device */
        }
@@ -728,7 +734,7 @@ void intel_guc_init(struct drm_device *dev)
  */
 void intel_guc_fini(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 
        mutex_lock(&dev->struct_mutex);
index 9fa458ce40a6aabf3fe7097a306fd4db807d3e97..434f4d5c553ddf6d263a5af78e491b94ce1cfce3 100644 (file)
@@ -63,7 +63,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
 
        if (!is_supported_device(dev_priv)) {
                DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
-               return 0;
+               goto bail;
        }
 
        /*
@@ -72,16 +72,20 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
        ret = intel_gvt_init_host();
        if (ret) {
                DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n");
-               return 0;
+               goto bail;
        }
 
        ret = intel_gvt_init_device(dev_priv);
        if (ret) {
                DRM_DEBUG_DRIVER("Fail to init GVT device\n");
-               return 0;
+               goto bail;
        }
 
        return 0;
+
+bail:
+       i915.enable_gvt = 0;
+       return 0;
 }
 
 /**
index fb21626ada645dbb0e594e162d6b6cc308fa9ebe..4df9f384910ccd8050119873a2ae9418db57d15e 100644 (file)
@@ -47,7 +47,7 @@ static void
 assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
 {
        struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t enabled_bits;
 
        enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
@@ -138,7 +138,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
 {
        const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val = I915_READ(VIDEO_DIP_CTL);
        int i;
 
@@ -192,7 +192,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
 {
        const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
@@ -251,7 +251,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
 {
        const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
@@ -308,7 +308,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
 {
        const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
@@ -366,7 +366,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
 {
        const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
@@ -508,7 +508,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
                               bool enable,
                               const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
        i915_reg_t reg = VIDEO_DIP_CTL;
@@ -629,7 +629,7 @@ static bool gcp_default_phase_possible(int pipe_bpp,
 
 static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
        i915_reg_t reg;
        u32 val = 0;
@@ -661,7 +661,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
                               bool enable,
                               const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
@@ -713,7 +713,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
                               bool enable,
                               const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
@@ -755,7 +755,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
                               bool enable,
                               const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -807,7 +807,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
                               bool enable,
                               const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
@@ -855,7 +855,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
 static void intel_hdmi_prepare(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -894,7 +894,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
                                    enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        enum intel_display_power_domain power_domain;
        u32 tmp;
@@ -931,7 +931,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tmp, flags = 0;
        int dotclock;
 
@@ -988,7 +988,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
 static void g4x_enable_hdmi(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        u32 temp;
@@ -1009,7 +1009,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder)
 static void ibx_enable_hdmi(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        u32 temp;
@@ -1058,7 +1058,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder)
 static void cpt_enable_hdmi(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        enum pipe pipe = crtc->pipe;
@@ -1115,7 +1115,7 @@ static void vlv_enable_hdmi(struct intel_encoder *encoder)
 static void intel_disable_hdmi(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        u32 temp;
@@ -1154,7 +1154,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
                I915_WRITE(intel_hdmi->hdmi_reg, temp);
                POSTING_READ(intel_hdmi->hdmi_reg);
 
-               intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
+               intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
        }
@@ -1273,33 +1273,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
 static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc_state->base.crtc->dev;
-       struct drm_atomic_state *state;
-       struct intel_encoder *encoder;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       int count = 0, count_hdmi = 0;
-       int i;
 
        if (HAS_GMCH_DISPLAY(dev))
                return false;
 
-       state = crtc_state->base.state;
-
-       for_each_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != crtc_state->base.crtc)
-                       continue;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-
-               count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
-               count++;
-       }
-
        /*
         * HDMI 12bpc affects the clocks, so it's only possible
         * when not cloning with other encoder types.
         */
-       return count_hdmi > 0 && count_hdmi == count;
+       return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
 }
 
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
@@ -1575,7 +1557,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
        struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
        struct intel_digital_port *intel_dig_port =
                hdmi_to_dig_port(intel_hdmi);
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        int ret;
 
        ret = drm_object_property_set_value(&connector->base, property, val);
@@ -1674,7 +1656,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &dport->hdmi;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
        const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -1722,7 +1704,7 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
 static void chv_hdmi_post_disable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        mutex_lock(&dev_priv->sb_lock);
 
@@ -1737,7 +1719,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &dport->hdmi;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
        const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -1774,6 +1756,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_hdmi_set_property,
        .atomic_get_property = intel_connector_atomic_get_property,
+       .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
        .destroy = intel_hdmi_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -1806,7 +1789,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
        struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = intel_encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
        uint8_t alternate_ddc_pin;
 
@@ -1914,7 +1897,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
        intel_hdmi_add_properties(intel_hdmi, connector);
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
-       drm_connector_register(connector);
        intel_hdmi->attached_connector = intel_connector;
 
        /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
index 38eeca7a6e72fda425f2f88261e2d823100eec5a..51434ec871f217b637de8bb197190cee7aee6af1 100644 (file)
@@ -144,7 +144,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
 
 static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_connector *intel_connector;
        struct intel_encoder *intel_encoder;
@@ -191,7 +191,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
        struct drm_i915_private *dev_priv =
                container_of(work, typeof(*dev_priv),
                             hotplug.reenable_work.work);
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_mode_config *mode_config = &dev->mode_config;
        int i;
 
@@ -302,7 +302,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
                container_of(work, struct drm_i915_private, hotplug.hotplug_work);
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_connector *intel_connector;
        struct intel_encoder *intel_encoder;
@@ -455,7 +455,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
  */
 void intel_hpd_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_connector *connector;
        int i;
index 81de23098be726adf14515720e40e0eee6ecb973..1f266d7df2ec076afdffefef40ec870f9115291e 100644 (file)
@@ -113,7 +113,7 @@ to_intel_gmbus(struct i2c_adapter *i2c)
 void
 intel_i2c_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(GMBUS0, 0);
        I915_WRITE(GMBUS4, 0);
@@ -138,7 +138,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
 static u32 get_reserved(struct intel_gmbus *bus)
 {
        struct drm_i915_private *dev_priv = bus->dev_priv;
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        u32 reserved = 0;
 
        /* On most chips, these bits must be preserved in software. */
@@ -212,7 +212,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
                                               adapter);
        struct drm_i915_private *dev_priv = bus->dev_priv;
 
-       intel_i2c_reset(dev_priv->dev);
+       intel_i2c_reset(&dev_priv->drm);
        intel_i2c_quirk_set(dev_priv, true);
        set_data(bus, 1);
        set_clock(bus, 1);
@@ -298,15 +298,16 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
 {
        int ret;
 
-#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
-
        if (!HAS_GMBUS_IRQ(dev_priv))
-               return wait_for(C, 10);
+               return intel_wait_for_register(dev_priv,
+                                              GMBUS2, GMBUS_ACTIVE, 0,
+                                              10);
 
        /* Important: The hw handles only the first bit, so set only one! */
        I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
 
-       ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+       ret = wait_event_timeout(dev_priv->gmbus_wait_queue,
+                                (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0,
                                 msecs_to_jiffies_timeout(10));
 
        I915_WRITE(GMBUS4, 0);
@@ -315,7 +316,6 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
                return 0;
        else
                return -ETIMEDOUT;
-#undef C
 }
 
 static int
@@ -632,7 +632,7 @@ static const struct i2c_algorithm gmbus_algorithm = {
  */
 int intel_setup_gmbus(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_gmbus *bus;
        unsigned int pin;
        int ret;
@@ -688,7 +688,7 @@ int intel_setup_gmbus(struct drm_device *dev)
                        goto err;
        }
 
-       intel_i2c_reset(dev_priv->dev);
+       intel_i2c_reset(&dev_priv->drm);
 
        return 0;
 
@@ -736,7 +736,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
 
 void intel_teardown_gmbus(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_gmbus *bus;
        unsigned int pin;
 
index debed011a9587f119a76cd3b05ed91540189ad72..70c699043d0ef3029903f0aa158b12885e33ed7e 100644 (file)
@@ -789,9 +789,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
        intel_logical_ring_emit(ringbuf, MI_NOOP);
        intel_logical_ring_advance(ringbuf);
 
-       if (intel_engine_stopped(engine))
-               return 0;
-
        /* We keep the previous context alive until we retire the following
         * request. This ensures that any the context object is still pinned
         * for any residual writes the HW makes into it on the context switch
@@ -826,7 +823,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 {
        struct drm_device       *dev = params->dev;
        struct intel_engine_cs *engine = params->engine;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
        u64 exec_start;
        int instp_mode;
@@ -902,7 +899,7 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
        struct drm_i915_gem_request *req, *tmp;
        LIST_HEAD(cancel_list);
 
-       WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
 
        spin_lock_bh(&engine->execlist_lock);
        list_replace_init(&engine->execlist_queue, &cancel_list);
@@ -929,7 +926,10 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
 
        /* TODO: Is this correct with Execlists enabled? */
        I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
-       if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+       if (intel_wait_for_register(dev_priv,
+                                   RING_MI_MODE(engine->mmio_base),
+                                   MODE_IDLE, MODE_IDLE,
+                                   1000)) {
                DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
                return;
        }
@@ -961,7 +961,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
        u32 *lrc_reg_state;
        int ret;
 
-       lockdep_assert_held(&ctx->i915->dev->struct_mutex);
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 
        if (ce->pin_count++)
                return 0;
@@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 {
        struct intel_context *ce = &ctx->engine[engine->id];
 
-       lockdep_assert_held(&ctx->i915->dev->struct_mutex);
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
        GEM_BUG_ON(ce->pin_count == 0);
 
        if (--ce->pin_count)
@@ -1296,6 +1296,31 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
                wa_ctx_emit(batch, index, 0);
                wa_ctx_emit(batch, index, 0);
        }
+
+       /* WaMediaPoolStateCmdInWABB:bxt */
+       if (HAS_POOLED_EU(engine->i915)) {
+               /*
+                * EU pool configuration is setup along with golden context
+                * during context initialization. This value depends on
+                * device type (2x6 or 3x6) and needs to be updated based
+                * on which subslice is disabled especially for 2x6
+                * devices, however it is safe to load default
+                * configuration of 3x6 device instead of masking off
+                * corresponding bits because HW ignores bits of a disabled
+                * subslice and drops down to appropriate config. Please
+                * see render_state_setup() in i915_gem_render_state.c for
+                * possible configurations, to avoid duplication they are
+                * not shown here again.
+                */
+               u32 eu_pool_config = 0x00777000;
+               wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
+               wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
+               wa_ctx_emit(batch, index, eu_pool_config);
+               wa_ctx_emit(batch, index, 0);
+               wa_ctx_emit(batch, index, 0);
+               wa_ctx_emit(batch, index, 0);
+       }
+
        /* Pad to end of cacheline */
        while (index % CACHELINE_DWORDS)
                wa_ctx_emit(batch, index, MI_NOOP);
@@ -1353,8 +1378,8 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
 {
        int ret;
 
-       engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
-                                                  PAGE_ALIGN(size));
+       engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
+                                                   PAGE_ALIGN(size));
        if (IS_ERR(engine->wa_ctx.obj)) {
                DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
                ret = PTR_ERR(engine->wa_ctx.obj);
@@ -1614,36 +1639,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
        return 0;
 }
 
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               I915_WRITE_IMR(engine,
-                              ~(engine->irq_enable_mask | engine->irq_keep_mask));
-               POSTING_READ(RING_IMR(engine->mmio_base));
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       I915_WRITE_IMR(engine,
+                      ~(engine->irq_enable_mask | engine->irq_keep_mask));
+       POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
-               POSTING_READ(RING_IMR(engine->mmio_base));
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 }
 
 static int gen8_emit_flush(struct drm_i915_gem_request *request,
@@ -1780,16 +1787,6 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
        return 0;
 }
 
-static u32 gen8_get_seqno(struct intel_engine_cs *engine)
-{
-       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
-static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-}
-
 static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
 {
        /*
@@ -1805,14 +1802,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
        intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
-static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-
-       /* See bxt_a_get_seqno() explaining the reason for the clflush. */
-       intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
 /*
  * Reserve space for 2 NOOPs at the end of each request to be
  * used as a workaround for not being allowed to do lite
@@ -1838,7 +1827,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
                                intel_hws_seqno_address(request->engine) |
                                MI_FLUSH_DW_USE_GTT);
        intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+       intel_logical_ring_emit(ringbuf, request->seqno);
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
        intel_logical_ring_emit(ringbuf, MI_NOOP);
        return intel_logical_ring_advance_and_submit(request);
@@ -1958,6 +1947,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
        i915_cmd_parser_fini_ring(engine);
        i915_gem_batch_pool_fini(&engine->batch_pool);
 
+       intel_engine_fini_breadcrumbs(engine);
+
        if (engine->status_page.obj) {
                i915_gem_object_unpin_map(engine->status_page.obj);
                engine->status_page.obj = NULL;
@@ -1979,15 +1970,11 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
        engine->init_hw = gen8_init_common_ring;
        engine->emit_request = gen8_emit_request;
        engine->emit_flush = gen8_emit_flush;
-       engine->irq_get = gen8_logical_ring_get_irq;
-       engine->irq_put = gen8_logical_ring_put_irq;
+       engine->irq_enable = gen8_logical_ring_enable_irq;
+       engine->irq_disable = gen8_logical_ring_disable_irq;
        engine->emit_bb_start = gen8_emit_bb_start;
-       engine->get_seqno = gen8_get_seqno;
-       engine->set_seqno = gen8_set_seqno;
-       if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
                engine->irq_seqno_barrier = bxt_a_seqno_barrier;
-               engine->set_seqno = bxt_a_set_seqno;
-       }
 }
 
 static inline void
@@ -1995,7 +1982,6 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
 {
        engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
        engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
-       init_waitqueue_head(&engine->irq_queue);
 }
 
 static int
@@ -2016,12 +2002,94 @@ lrc_setup_hws(struct intel_engine_cs *engine,
        return 0;
 }
 
+static int
+logical_ring_init(struct intel_engine_cs *engine)
+{
+       struct i915_gem_context *dctx = engine->i915->kernel_context;
+       int ret;
+
+       ret = intel_engine_init_breadcrumbs(engine);
+       if (ret)
+               goto error;
+
+       ret = i915_cmd_parser_init_ring(engine);
+       if (ret)
+               goto error;
+
+       ret = execlists_context_deferred_alloc(dctx, engine);
+       if (ret)
+               goto error;
+
+       /* As this is the default context, always pin it */
+       ret = intel_lr_context_pin(dctx, engine);
+       if (ret) {
+               DRM_ERROR("Failed to pin context for %s: %d\n",
+                         engine->name, ret);
+               goto error;
+       }
+
+       /* And setup the hardware status page. */
+       ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
+       if (ret) {
+               DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
+               goto error;
+       }
+
+       return 0;
+
+error:
+       intel_logical_ring_cleanup(engine);
+       return ret;
+}
+
+static int logical_render_ring_init(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret;
+
+       if (HAS_L3_DPF(dev_priv))
+               engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
+       /* Override some for render ring. */
+       if (INTEL_GEN(dev_priv) >= 9)
+               engine->init_hw = gen9_init_render_ring;
+       else
+               engine->init_hw = gen8_init_render_ring;
+       engine->init_context = gen8_init_rcs_context;
+       engine->cleanup = intel_fini_pipe_control;
+       engine->emit_flush = gen8_emit_flush_render;
+       engine->emit_request = gen8_emit_request_render;
+
+       ret = intel_init_pipe_control(engine, 4096);
+       if (ret)
+               return ret;
+
+       ret = intel_init_workaround_bb(engine);
+       if (ret) {
+               /*
+                * We continue even if we fail to initialize WA batch
+                * because we only expect rare glitches but nothing
+                * critical to prevent us from using GPU
+                */
+               DRM_ERROR("WA batch buffer initialization failed: %d\n",
+                         ret);
+       }
+
+       ret = logical_ring_init(engine);
+       if (ret) {
+               lrc_destroy_wa_ctx_obj(engine);
+       }
+
+       return ret;
+}
+
 static const struct logical_ring_info {
        const char *name;
        unsigned exec_id;
        unsigned guc_id;
        u32 mmio_base;
        unsigned irq_shift;
+       int (*init)(struct intel_engine_cs *engine);
 } logical_rings[] = {
        [RCS] = {
                .name = "render ring",
@@ -2029,6 +2097,7 @@ static const struct logical_ring_info {
                .guc_id = GUC_RENDER_ENGINE,
                .mmio_base = RENDER_RING_BASE,
                .irq_shift = GEN8_RCS_IRQ_SHIFT,
+               .init = logical_render_ring_init,
        },
        [BCS] = {
                .name = "blitter ring",
@@ -2036,6 +2105,7 @@ static const struct logical_ring_info {
                .guc_id = GUC_BLITTER_ENGINE,
                .mmio_base = BLT_RING_BASE,
                .irq_shift = GEN8_BCS_IRQ_SHIFT,
+               .init = logical_ring_init,
        },
        [VCS] = {
                .name = "bsd ring",
@@ -2043,6 +2113,7 @@ static const struct logical_ring_info {
                .guc_id = GUC_VIDEO_ENGINE,
                .mmio_base = GEN6_BSD_RING_BASE,
                .irq_shift = GEN8_VCS1_IRQ_SHIFT,
+               .init = logical_ring_init,
        },
        [VCS2] = {
                .name = "bsd2 ring",
@@ -2050,6 +2121,7 @@ static const struct logical_ring_info {
                .guc_id = GUC_VIDEO_ENGINE2,
                .mmio_base = GEN8_BSD2_RING_BASE,
                .irq_shift = GEN8_VCS2_IRQ_SHIFT,
+               .init = logical_ring_init,
        },
        [VECS] = {
                .name = "video enhancement ring",
@@ -2057,14 +2129,14 @@ static const struct logical_ring_info {
                .guc_id = GUC_VIDEOENHANCE_ENGINE,
                .mmio_base = VEBOX_RING_BASE,
                .irq_shift = GEN8_VECS_IRQ_SHIFT,
+               .init = logical_ring_init,
        },
 };
 
 static struct intel_engine_cs *
-logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
+logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
 {
        const struct logical_ring_info *info = &logical_rings[id];
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine = &dev_priv->engine[id];
        enum forcewake_domains fw_domains;
 
@@ -2107,169 +2179,62 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
        logical_ring_default_irqs(engine, info->irq_shift);
 
        intel_engine_init_hangcheck(engine);
-       i915_gem_batch_pool_init(dev, &engine->batch_pool);
+       i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
 
        return engine;
 }
 
-static int
-logical_ring_init(struct intel_engine_cs *engine)
-{
-       struct i915_gem_context *dctx = engine->i915->kernel_context;
-       int ret;
-
-       ret = i915_cmd_parser_init_ring(engine);
-       if (ret)
-               goto error;
-
-       ret = execlists_context_deferred_alloc(dctx, engine);
-       if (ret)
-               goto error;
-
-       /* As this is the default context, always pin it */
-       ret = intel_lr_context_pin(dctx, engine);
-       if (ret) {
-               DRM_ERROR("Failed to pin context for %s: %d\n",
-                         engine->name, ret);
-               goto error;
-       }
-
-       /* And setup the hardware status page. */
-       ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
-       if (ret) {
-               DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
-               goto error;
-       }
-
-       return 0;
-
-error:
-       intel_logical_ring_cleanup(engine);
-       return ret;
-}
-
-static int logical_render_ring_init(struct drm_device *dev)
-{
-       struct intel_engine_cs *engine = logical_ring_setup(dev, RCS);
-       int ret;
-
-       if (HAS_L3_DPF(dev))
-               engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
-       /* Override some for render ring. */
-       if (INTEL_INFO(dev)->gen >= 9)
-               engine->init_hw = gen9_init_render_ring;
-       else
-               engine->init_hw = gen8_init_render_ring;
-       engine->init_context = gen8_init_rcs_context;
-       engine->cleanup = intel_fini_pipe_control;
-       engine->emit_flush = gen8_emit_flush_render;
-       engine->emit_request = gen8_emit_request_render;
-
-       ret = intel_init_pipe_control(engine);
-       if (ret)
-               return ret;
-
-       ret = intel_init_workaround_bb(engine);
-       if (ret) {
-               /*
-                * We continue even if we fail to initialize WA batch
-                * because we only expect rare glitches but nothing
-                * critical to prevent us from using GPU
-                */
-               DRM_ERROR("WA batch buffer initialization failed: %d\n",
-                         ret);
-       }
-
-       ret = logical_ring_init(engine);
-       if (ret) {
-               lrc_destroy_wa_ctx_obj(engine);
-       }
-
-       return ret;
-}
-
-static int logical_bsd_ring_init(struct drm_device *dev)
-{
-       struct intel_engine_cs *engine = logical_ring_setup(dev, VCS);
-
-       return logical_ring_init(engine);
-}
-
-static int logical_bsd2_ring_init(struct drm_device *dev)
-{
-       struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2);
-
-       return logical_ring_init(engine);
-}
-
-static int logical_blt_ring_init(struct drm_device *dev)
-{
-       struct intel_engine_cs *engine = logical_ring_setup(dev, BCS);
-
-       return logical_ring_init(engine);
-}
-
-static int logical_vebox_ring_init(struct drm_device *dev)
-{
-       struct intel_engine_cs *engine = logical_ring_setup(dev, VECS);
-
-       return logical_ring_init(engine);
-}
-
 /**
  * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
  * @dev: DRM device.
  *
- * This function inits the engines for an Execlists submission style (the equivalent in the
- * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
- * those engines that are present in the hardware.
+ * This function inits the engines for an Execlists submission style (the
+ * equivalent in the legacy ringbuffer submission world would be
+ * i915_gem_init_engines). It does it only for those engines that are present in
+ * the hardware.
  *
  * Return: non-zero if the initialization failed.
  */
 int intel_logical_rings_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       unsigned int mask = 0;
+       unsigned int i;
        int ret;
 
-       ret = logical_render_ring_init(dev);
-       if (ret)
-               return ret;
+       WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+               GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
 
-       if (HAS_BSD(dev)) {
-               ret = logical_bsd_ring_init(dev);
-               if (ret)
-                       goto cleanup_render_ring;
-       }
+       for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
+               if (!HAS_ENGINE(dev_priv, i))
+                       continue;
 
-       if (HAS_BLT(dev)) {
-               ret = logical_blt_ring_init(dev);
-               if (ret)
-                       goto cleanup_bsd_ring;
-       }
+               if (!logical_rings[i].init)
+                       continue;
 
-       if (HAS_VEBOX(dev)) {
-               ret = logical_vebox_ring_init(dev);
+               ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
                if (ret)
-                       goto cleanup_blt_ring;
+                       goto cleanup;
+
+               mask |= ENGINE_MASK(i);
        }
 
-       if (HAS_BSD2(dev)) {
-               ret = logical_bsd2_ring_init(dev);
-               if (ret)
-                       goto cleanup_vebox_ring;
+       /*
+        * Catch failures to update logical_rings table when the new engines
+        * are added to the driver by a warning and disabling the forgotten
+        * engines.
+        */
+       if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
+               struct intel_device_info *info =
+                       (struct intel_device_info *)&dev_priv->info;
+               info->ring_mask = mask;
        }
 
        return 0;
 
-cleanup_vebox_ring:
-       intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
-cleanup_blt_ring:
-       intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
-cleanup_bsd_ring:
-       intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
-cleanup_render_ring:
-       intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
+cleanup:
+       for (i = 0; i < I915_NUM_ENGINES; i++)
+               intel_logical_ring_cleanup(&dev_priv->engine[i]);
 
        return ret;
 }
@@ -2546,7 +2511,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
        /* One extra page as the sharing data between driver and GuC */
        context_size += PAGE_SIZE * LRC_PPHWSP_PN;
 
-       ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
+       ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
        if (IS_ERR(ctx_obj)) {
                DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
                return PTR_ERR(ctx_obj);
index e9082185a375ff0a0627529b49b75c29ff5e9a0f..49550470483ea6aaacfae338052fe1536ae09d1d 100644 (file)
@@ -72,7 +72,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
                                    enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        enum intel_display_power_domain power_domain;
        u32 tmp;
@@ -106,7 +106,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
                                  struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        u32 tmp, flags = 0;
 
@@ -140,7 +140,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
 {
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
        int pipe = crtc->pipe;
@@ -184,8 +184,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
         * panels behave in the two modes. For now, let's just maintain the
         * value we got from the BIOS.
         */
-        temp &= ~LVDS_A3_POWER_MASK;
-        temp |= lvds_encoder->a3_power;
+       temp &= ~LVDS_A3_POWER_MASK;
+       temp |= lvds_encoder->a3_power;
 
        /* Set the dithering flag on LVDS as needed, note that there is no
         * special lvds dither control bit on pch-split platforms, dithering is
@@ -216,7 +216,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct intel_connector *intel_connector =
                &lvds_encoder->attached_connector->base;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t ctl_reg, stat_reg;
 
        if (HAS_PCH_SPLIT(dev)) {
@@ -231,7 +231,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
 
        I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
        POSTING_READ(lvds_encoder->reg);
-       if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
+       if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000))
                DRM_ERROR("timed out waiting for panel to power on\n");
 
        intel_panel_enable_backlight(intel_connector);
@@ -241,7 +241,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t ctl_reg, stat_reg;
 
        if (HAS_PCH_SPLIT(dev)) {
@@ -253,7 +253,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
        }
 
        I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
-       if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
+       if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000))
                DRM_ERROR("timed out waiting for panel to power off\n");
 
        I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
@@ -442,7 +442,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
                container_of(nb, struct intel_lvds_connector, lid_notifier);
        struct drm_connector *connector = &lvds_connector->base.base;
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
                return NOTIFY_OK;
@@ -555,6 +555,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_lvds_set_property,
        .atomic_get_property = intel_connector_atomic_get_property,
+       .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
        .destroy = intel_lvds_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -810,27 +811,29 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
        { }     /* terminating entry */
 };
 
-bool intel_is_dual_link_lvds(struct drm_device *dev)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev)
 {
-       struct intel_encoder *encoder;
-       struct intel_lvds_encoder *lvds_encoder;
+       struct intel_encoder *intel_encoder;
 
-       for_each_intel_encoder(dev, encoder) {
-               if (encoder->type == INTEL_OUTPUT_LVDS) {
-                       lvds_encoder = to_lvds_encoder(&encoder->base);
+       for_each_intel_encoder(dev, intel_encoder)
+               if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+                       return intel_encoder;
 
-                       return lvds_encoder->is_dual_link;
-               }
-       }
+       return NULL;
+}
 
-       return false;
+bool intel_is_dual_link_lvds(struct drm_device *dev)
+{
+       struct intel_encoder *encoder = intel_get_lvds_encoder(dev);
+
+       return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
 }
 
 static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
 {
        struct drm_device *dev = lvds_encoder->base.base.dev;
        unsigned int val;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* use the module option value if specified */
        if (i915.lvds_channel_mode > 0)
@@ -880,7 +883,7 @@ static bool intel_lvds_supported(struct drm_device *dev)
  */
 void intel_lvds_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_lvds_encoder *lvds_encoder;
        struct intel_encoder *intel_encoder;
        struct intel_lvds_connector *lvds_connector;
@@ -1118,6 +1121,7 @@ void intel_lvds_init(struct drm_device *dev)
        mutex_unlock(&dev->mode_config.mutex);
 
        intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+       intel_panel_setup_backlight(connector, INVALID_PIPE);
 
        lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
        DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
@@ -1130,9 +1134,6 @@ void intel_lvds_init(struct drm_device *dev)
                DRM_DEBUG_KMS("lid notifier registration failed\n");
                lvds_connector->lid_notifier.notifier_call = NULL;
        }
-       drm_connector_register(connector);
-
-       intel_panel_setup_backlight(connector, INVALID_PIPE);
 
        return;
 
index 38a4c8ce7e63a5da4506094d682ea06310cc9de7..f2584d0a01ab61cf09bc5561071e0c7d22130ffb 100644 (file)
@@ -82,7 +82,7 @@ void
 intel_attach_force_audio_property(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_property *prop;
 
        prop = dev_priv->force_audio_property;
@@ -109,7 +109,7 @@ void
 intel_attach_broadcast_rgb_property(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_property *prop;
 
        prop = dev_priv->broadcast_rgb_property;
index f6d8a21d2c491c3ae029c09a136594d2826d15a8..c27d5eb063d0b21f8774cd786c7c40151b40471b 100644 (file)
@@ -232,11 +232,28 @@ struct opregion_asle_ext {
 #define SWSCI_SBCB_POST_VBE_PM         SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
 #define SWSCI_SBCB_ENABLE_DISABLE_AUDIO        SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
 
-#define ACPI_OTHER_OUTPUT (0<<8)
-#define ACPI_VGA_OUTPUT (1<<8)
-#define ACPI_TV_OUTPUT (2<<8)
-#define ACPI_DIGITAL_OUTPUT (3<<8)
-#define ACPI_LVDS_OUTPUT (4<<8)
+/*
+ * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
+ * Attached to the Display Adapter).
+ */
+#define ACPI_DISPLAY_INDEX_SHIFT               0
+#define ACPI_DISPLAY_INDEX_MASK                        (0xf << 0)
+#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT     4
+#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK      (0xf << 4)
+#define ACPI_DISPLAY_TYPE_SHIFT                        8
+#define ACPI_DISPLAY_TYPE_MASK                 (0xf << 8)
+#define ACPI_DISPLAY_TYPE_OTHER                        (0 << 8)
+#define ACPI_DISPLAY_TYPE_VGA                  (1 << 8)
+#define ACPI_DISPLAY_TYPE_TV                   (2 << 8)
+#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL     (3 << 8)
+#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL     (4 << 8)
+#define ACPI_VENDOR_SPECIFIC_SHIFT             12
+#define ACPI_VENDOR_SPECIFIC_MASK              (0xf << 12)
+#define ACPI_BIOS_CAN_DETECT                   (1 << 16)
+#define ACPI_DEPENDS_ON_VGA                    (1 << 17)
+#define ACPI_PIPE_ID_SHIFT                     18
+#define ACPI_PIPE_ID_MASK                      (7 << 18)
+#define ACPI_DEVICE_ID_SCHEME                  (1 << 31)
 
 #define MAX_DSLP       1500
 
@@ -244,7 +261,7 @@ static int swsci(struct drm_i915_private *dev_priv,
                 u32 function, u32 parm, u32 *parm_out)
 {
        struct opregion_swsci *swsci = dev_priv->opregion.swsci;
-       struct pci_dev *pdev = dev_priv->dev->pdev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        u32 main_function, sub_function, scic;
        u16 swsci_val;
        u32 dslp;
@@ -366,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
                type = DISPLAY_TYPE_CRT;
                break;
        case INTEL_OUTPUT_UNKNOWN:
-       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_HDMI:
        case INTEL_OUTPUT_DP_MST:
                type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
@@ -418,7 +435,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
 {
        struct intel_connector *connector;
        struct opregion_asle *asle = dev_priv->opregion.asle;
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
@@ -657,10 +674,51 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
        }
 }
 
+static u32 acpi_display_type(struct drm_connector *connector)
+{
+       u32 display_type;
+
+       switch (connector->connector_type) {
+       case DRM_MODE_CONNECTOR_VGA:
+       case DRM_MODE_CONNECTOR_DVIA:
+               display_type = ACPI_DISPLAY_TYPE_VGA;
+               break;
+       case DRM_MODE_CONNECTOR_Composite:
+       case DRM_MODE_CONNECTOR_SVIDEO:
+       case DRM_MODE_CONNECTOR_Component:
+       case DRM_MODE_CONNECTOR_9PinDIN:
+       case DRM_MODE_CONNECTOR_TV:
+               display_type = ACPI_DISPLAY_TYPE_TV;
+               break;
+       case DRM_MODE_CONNECTOR_DVII:
+       case DRM_MODE_CONNECTOR_DVID:
+       case DRM_MODE_CONNECTOR_DisplayPort:
+       case DRM_MODE_CONNECTOR_HDMIA:
+       case DRM_MODE_CONNECTOR_HDMIB:
+               display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
+               break;
+       case DRM_MODE_CONNECTOR_LVDS:
+       case DRM_MODE_CONNECTOR_eDP:
+       case DRM_MODE_CONNECTOR_DSI:
+               display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
+               break;
+       case DRM_MODE_CONNECTOR_Unknown:
+       case DRM_MODE_CONNECTOR_VIRTUAL:
+               display_type = ACPI_DISPLAY_TYPE_OTHER;
+               break;
+       default:
+               MISSING_CASE(connector->connector_type);
+               display_type = ACPI_DISPLAY_TYPE_OTHER;
+               break;
+       }
+
+       return display_type;
+}
+
 static void intel_didl_outputs(struct drm_i915_private *dev_priv)
 {
        struct intel_opregion *opregion = &dev_priv->opregion;
-       struct pci_dev *pdev = dev_priv->dev->pdev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct drm_connector *connector;
        acpi_handle handle;
        struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
@@ -724,37 +782,18 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
 
 blind_set:
        i = 0;
-       list_for_each_entry(connector, &dev_priv->dev->mode_config.connector_list, head) {
-               int output_type = ACPI_OTHER_OUTPUT;
+       list_for_each_entry(connector,
+                           &dev_priv->drm.mode_config.connector_list, head) {
+               int display_type = acpi_display_type(connector);
+
                if (i >= max_outputs) {
                        DRM_DEBUG_KMS("More than %u outputs in connector list\n",
                                      max_outputs);
                        return;
                }
-               switch (connector->connector_type) {
-               case DRM_MODE_CONNECTOR_VGA:
-               case DRM_MODE_CONNECTOR_DVIA:
-                       output_type = ACPI_VGA_OUTPUT;
-                       break;
-               case DRM_MODE_CONNECTOR_Composite:
-               case DRM_MODE_CONNECTOR_SVIDEO:
-               case DRM_MODE_CONNECTOR_Component:
-               case DRM_MODE_CONNECTOR_9PinDIN:
-                       output_type = ACPI_TV_OUTPUT;
-                       break;
-               case DRM_MODE_CONNECTOR_DVII:
-               case DRM_MODE_CONNECTOR_DVID:
-               case DRM_MODE_CONNECTOR_DisplayPort:
-               case DRM_MODE_CONNECTOR_HDMIA:
-               case DRM_MODE_CONNECTOR_HDMIB:
-                       output_type = ACPI_DIGITAL_OUTPUT;
-                       break;
-               case DRM_MODE_CONNECTOR_LVDS:
-                       output_type = ACPI_LVDS_OUTPUT;
-                       break;
-               }
+
                temp = get_did(opregion, i);
-               set_did(opregion, i, temp | (1 << 31) | output_type | i);
+               set_did(opregion, i, temp | (1 << 31) | display_type | i);
                i++;
        }
        goto end;
@@ -916,7 +955,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
 int intel_opregion_setup(struct drm_i915_private *dev_priv)
 {
        struct intel_opregion *opregion = &dev_priv->opregion;
-       struct pci_dev *pdev = dev_priv->dev->pdev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        u32 asls, mboxes;
        char buf[sizeof(OPREGION_SIGNATURE)];
        int err = 0;
index eb93f90bb74d86391f39c32533d64f474ebf5c15..3212d8806b5ad13b0f88cc1c1f56bc4ef2d953b8 100644 (file)
@@ -409,7 +409,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
        struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        int ret;
 
-       lockdep_assert_held(&dev_priv->dev->struct_mutex);
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
        /* Only wait if there is actually an old frame to release to
         * guarantee forward progress.
@@ -741,8 +741,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        u32 swidth, swidthsw, sheight, ostride;
        enum pipe pipe = overlay->crtc->pipe;
 
-       lockdep_assert_held(&dev_priv->dev->struct_mutex);
-       WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
 
        ret = intel_overlay_release_old_vid(overlay);
        if (ret != 0)
@@ -836,7 +836,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        overlay->old_vid_bo = overlay->vid_bo;
        overlay->vid_bo = new_bo;
 
-       intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe));
+       intel_frontbuffer_flip(&dev_priv->drm,
+                              INTEL_FRONTBUFFER_OVERLAY(pipe));
 
        return 0;
 
@@ -851,8 +852,8 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
        struct overlay_registers __iomem *regs;
        int ret;
 
-       lockdep_assert_held(&dev_priv->dev->struct_mutex);
-       WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
 
        ret = intel_overlay_recover_from_interrupt(overlay);
        if (ret != 0)
@@ -1084,7 +1085,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv)
 {
        struct drm_intel_overlay_put_image *put_image_rec = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_overlay *overlay;
        struct drm_crtc *drmmode_crtc;
        struct intel_crtc *crtc;
@@ -1282,7 +1283,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *file_priv)
 {
        struct drm_intel_overlay_attrs *attrs = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_overlay *overlay;
        struct overlay_registers __iomem *regs;
        int ret;
@@ -1379,7 +1380,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
        if (!overlay)
                return;
 
-       mutex_lock(&dev_priv->dev->struct_mutex);
+       mutex_lock(&dev_priv->drm.struct_mutex);
        if (WARN_ON(dev_priv->overlay))
                goto out_free;
 
@@ -1387,9 +1388,10 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
 
        reg_bo = NULL;
        if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE);
+               reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
+                                                      PAGE_SIZE);
        if (reg_bo == NULL)
-               reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE);
+               reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
        if (IS_ERR(reg_bo))
                goto out_free;
        overlay->reg_bo = reg_bo;
@@ -1434,7 +1436,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
        intel_overlay_unmap_regs(overlay, regs);
 
        dev_priv->overlay = overlay;
-       mutex_unlock(&dev_priv->dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
        DRM_INFO("initialized overlay support\n");
        return;
 
@@ -1444,7 +1446,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
 out_free_bo:
        drm_gem_object_unreference(&reg_bo->base);
 out_free:
-       mutex_unlock(&dev_priv->dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
        kfree(overlay);
        return;
 }
index bf721781c259d0e1ba716335680b46f92a28b7c1..96c65d77e8866b0c25beadb46e342ecc8e37e9d6 100644 (file)
@@ -377,7 +377,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
 enum drm_connector_status
 intel_panel_detect(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Assume that the BIOS does not lie through the OpRegion... */
        if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
@@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
        if (panel->backlight.combination_mode) {
                u8 lbpc;
 
-               pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc);
+               pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
                val *= lbpc;
        }
 
@@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
 
                lbpc = level * 0xfe / panel->backlight.max + 1;
                level /= lbpc;
-               pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc);
+               pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
        }
 
        if (IS_GEN4(dev_priv)) {
@@ -822,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
         * backlight. This will leave the backlight on unnecessarily when
         * another client is not activated.
         */
-       if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+       if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
                DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
                return;
        }
@@ -1142,7 +1142,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
 {
        struct intel_connector *connector = bl_get_data(bd);
        struct drm_device *dev = connector->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 hw_level;
        int ret;
 
@@ -1163,7 +1163,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
        .get_brightness = intel_backlight_device_get_brightness,
 };
 
-static int intel_backlight_device_register(struct intel_connector *connector)
+int intel_backlight_device_register(struct intel_connector *connector)
 {
        struct intel_panel *panel = &connector->panel;
        struct backlight_properties props;
@@ -1225,11 +1225,6 @@ void intel_backlight_device_unregister(struct intel_connector *connector)
                panel->backlight.device = NULL;
        }
 }
-#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
-{
-       return 0;
-}
 #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
 
 /*
@@ -1321,7 +1316,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 {
        struct drm_device *dev = connector->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int clock;
 
        if (IS_G4X(dev_priv))
@@ -1736,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
                panel->backlight.set = bxt_set_backlight;
                panel->backlight.get = bxt_get_backlight;
                panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
-       } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) {
+       } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
+                  HAS_PCH_KBP(dev_priv)) {
                panel->backlight.setup = lpt_setup_backlight;
                panel->backlight.enable = lpt_enable_backlight;
                panel->backlight.disable = lpt_disable_backlight;
@@ -1809,11 +1805,3 @@ void intel_panel_fini(struct intel_panel *panel)
                drm_mode_destroy(intel_connector->base.dev,
                                panel->downclock_mode);
 }
-
-void intel_backlight_register(struct drm_device *dev)
-{
-       struct intel_connector *connector;
-
-       for_each_intel_connector(dev, connector)
-               intel_backlight_device_register(connector);
-}
index 658a756596574ffc176a8d151c04bd0ed4bc6228..5a8ee0c76593993345e1951581c9304d0cba54a6 100644 (file)
@@ -57,7 +57,7 @@
 
 static void gen9_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
        I915_WRITE(CHICKEN_PAR1_1,
@@ -83,7 +83,7 @@ static void gen9_init_clock_gating(struct drm_device *dev)
 
 static void bxt_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        gen9_init_clock_gating(dev);
 
@@ -109,7 +109,7 @@ static void bxt_init_clock_gating(struct drm_device *dev)
 
 static void i915_pineview_get_mem_freq(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tmp;
 
        tmp = I915_READ(CLKCFG);
@@ -148,7 +148,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
 
 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u16 ddrpll, csipll;
 
        ddrpll = I915_READ16(DDRMPLL1);
@@ -319,7 +319,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
 
 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        u32 val;
 
        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@@ -375,7 +375,7 @@ static const int pessimal_latency_ns = 5000;
 static int vlv_get_fifo_size(struct drm_device *dev,
                              enum pipe pipe, int plane)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int sprite0_start, sprite1_start, size;
 
        switch (pipe) {
@@ -426,7 +426,7 @@ static int vlv_get_fifo_size(struct drm_device *dev,
 
 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t dsparb = I915_READ(DSPARB);
        int size;
 
@@ -442,7 +442,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
 
 static int i830_get_fifo_size(struct drm_device *dev, int plane)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t dsparb = I915_READ(DSPARB);
        int size;
 
@@ -459,7 +459,7 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
 
 static int i845_get_fifo_size(struct drm_device *dev, int plane)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t dsparb = I915_READ(DSPARB);
        int size;
 
@@ -637,7 +637,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
 static void pineview_update_wm(struct drm_crtc *unused_crtc)
 {
        struct drm_device *dev = unused_crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        const struct cxsr_latency *latency;
        u32 reg;
@@ -934,7 +934,7 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
 
 static void vlv_setup_wm_latency(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* all latencies in usec */
        dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
@@ -1325,7 +1325,7 @@ static void vlv_merge_wm(struct drm_device *dev,
 static void vlv_update_wm(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum pipe pipe = intel_crtc->pipe;
        struct vlv_wm_values wm = {};
@@ -1381,7 +1381,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        static const int sr_latency_ns = 12000;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
        int plane_sr, cursor_sr;
        unsigned int enabled = 0;
@@ -1438,7 +1438,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
 static void i965_update_wm(struct drm_crtc *unused_crtc)
 {
        struct drm_device *dev = unused_crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        int srwm = 1;
        int cursor_sr = 16;
@@ -1512,7 +1512,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 {
        struct drm_device *dev = unused_crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const struct intel_watermark_params *wm_info;
        uint32_t fwater_lo;
        uint32_t fwater_hi;
@@ -1642,7 +1642,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 static void i845_update_wm(struct drm_crtc *unused_crtc)
 {
        struct drm_device *dev = unused_crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        const struct drm_display_mode *adjusted_mode;
        uint32_t fwater_lo;
@@ -2070,7 +2070,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
 
 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (IS_GEN9(dev)) {
                uint32_t val;
@@ -2236,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
                                    uint16_t wm[5], uint16_t min)
 {
-       int level, max_level = ilk_wm_max_level(dev_priv->dev);
+       int level, max_level = ilk_wm_max_level(&dev_priv->drm);
 
        if (wm[0] >= min)
                return false;
@@ -2250,7 +2250,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 
 static void snb_wm_latency_quirk(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        bool changed;
 
        /*
@@ -2272,7 +2272,7 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
 
 static void ilk_setup_wm_latency(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
 
@@ -2294,7 +2294,7 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
 
 static void skl_setup_wm_latency(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
        intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
@@ -2330,7 +2330,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
        struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
        struct intel_pipe_wm *pipe_wm;
        struct drm_device *dev = state->dev;
-       const struct drm_i915_private *dev_priv = dev->dev_private;
+       const struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane;
        struct intel_plane_state *pristate = NULL;
        struct intel_plane_state *sprstate = NULL;
@@ -2505,7 +2505,7 @@ static void ilk_wm_merge(struct drm_device *dev,
                         const struct ilk_wm_maximums *max,
                         struct intel_pipe_wm *merged)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int level, max_level = ilk_wm_max_level(dev);
        int last_enabled_level = max_level;
 
@@ -2565,7 +2565,7 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
 /* The value we need to program into the WM_LPx latency field */
 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                return 2 * level;
@@ -2765,7 +2765,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
                                struct ilk_wm_values *results)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct ilk_wm_values *previous = &dev_priv->wm.hw;
        unsigned int dirty;
        uint32_t val;
@@ -2840,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
 
 bool ilk_disable_lp_wm(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
 }
@@ -3498,7 +3498,6 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
                     int level,
                     struct skl_wm_level *result)
 {
-       struct drm_device *dev = dev_priv->dev;
        struct drm_atomic_state *state = cstate->base.state;
        struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
        struct drm_plane *plane;
@@ -3514,7 +3513,9 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
         */
        memset(result, 0, sizeof(*result));
 
-       for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) {
+       for_each_intel_plane_mask(&dev_priv->drm,
+                                 intel_plane,
+                                 cstate->base.plane_mask) {
                int i = skl_wm_plane_id(intel_plane);
 
                plane = &intel_plane->base;
@@ -3595,7 +3596,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
                             struct skl_pipe_wm *pipe_wm)
 {
        struct drm_device *dev = cstate->base.crtc->dev;
-       const struct drm_i915_private *dev_priv = dev->dev_private;
+       const struct drm_i915_private *dev_priv = to_i915(dev);
        int level, max_level = ilk_wm_max_level(dev);
        int ret;
 
@@ -3682,7 +3683,7 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
 static void skl_write_wm_values(struct drm_i915_private *dev_priv,
                                const struct skl_wm_values *new)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *crtc;
 
        for_each_intel_crtc(dev, crtc) {
@@ -3779,7 +3780,7 @@ skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
 static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
                                struct skl_wm_values *new_values)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct skl_ddb_allocation *cur_ddb, *new_ddb;
        bool reallocated[I915_MAX_PIPES] = {};
        struct intel_crtc *crtc;
@@ -3879,6 +3880,19 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
        return 0;
 }
 
+static uint32_t
+pipes_modified(struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *cstate;
+       uint32_t i, ret = 0;
+
+       for_each_crtc_in_state(state, crtc, cstate, i)
+               ret |= drm_crtc_mask(crtc);
+
+       return ret;
+}
+
 static int
 skl_compute_ddb(struct drm_atomic_state *state)
 {
@@ -3887,7 +3901,7 @@ skl_compute_ddb(struct drm_atomic_state *state)
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct intel_crtc *intel_crtc;
        struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
-       unsigned realloc_pipes = dev_priv->active_crtcs;
+       uint32_t realloc_pipes = pipes_modified(state);
        int ret;
 
        /*
@@ -4002,7 +4016,7 @@ static void skl_update_wm(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_wm_values *results = &dev_priv->wm.skl_results;
        struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
        struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
@@ -4043,7 +4057,7 @@ static void ilk_compute_wm_config(struct drm_device *dev,
 
 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
        struct ilk_wm_maximums max;
        struct intel_wm_config config = {};
@@ -4145,7 +4159,7 @@ static void skl_pipe_wm_active_state(uint32_t val,
 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
@@ -4199,7 +4213,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 
 void skl_wm_get_hw_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
        struct drm_crtc *crtc;
 
@@ -4219,7 +4233,7 @@ void skl_wm_get_hw_state(struct drm_device *dev)
 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct ilk_wm_values *hw = &dev_priv->wm.hw;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
@@ -4423,7 +4437,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
 
 void ilk_wm_get_hw_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct ilk_wm_values *hw = &dev_priv->wm.hw;
        struct drm_crtc *crtc;
 
@@ -4485,7 +4499,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
  */
 void intel_update_watermarks(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 
        if (dev_priv->display.update_wm)
                dev_priv->display.update_wm(crtc);
@@ -4654,19 +4668,23 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
        new_power = dev_priv->rps.power;
        switch (dev_priv->rps.power) {
        case LOW_POWER:
-               if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
+               if (val > dev_priv->rps.efficient_freq + 1 &&
+                   val > dev_priv->rps.cur_freq)
                        new_power = BETWEEN;
                break;
 
        case BETWEEN:
-               if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
+               if (val <= dev_priv->rps.efficient_freq &&
+                   val < dev_priv->rps.cur_freq)
                        new_power = LOW_POWER;
-               else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
+               else if (val >= dev_priv->rps.rp0_freq &&
+                        val > dev_priv->rps.cur_freq)
                        new_power = HIGH_POWER;
                break;
 
        case HIGH_POWER:
-               if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
+               if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
+                   val < dev_priv->rps.cur_freq)
                        new_power = BETWEEN;
                break;
        }
@@ -4712,22 +4730,24 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
        }
 
        I915_WRITE(GEN6_RP_UP_EI,
-               GT_INTERVAL_FROM_US(dev_priv, ei_up));
+                  GT_INTERVAL_FROM_US(dev_priv, ei_up));
        I915_WRITE(GEN6_RP_UP_THRESHOLD,
-               GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
+                  GT_INTERVAL_FROM_US(dev_priv,
+                                      ei_up * threshold_up / 100));
 
        I915_WRITE(GEN6_RP_DOWN_EI,
-               GT_INTERVAL_FROM_US(dev_priv, ei_down));
+                  GT_INTERVAL_FROM_US(dev_priv, ei_down));
        I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
-               GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
+                  GT_INTERVAL_FROM_US(dev_priv,
+                                      ei_down * threshold_down / 100));
 
-        I915_WRITE(GEN6_RP_CONTROL,
-                   GEN6_RP_MEDIA_TURBO |
-                   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-                   GEN6_RP_MEDIA_IS_GFX |
-                   GEN6_RP_ENABLE |
-                   GEN6_RP_UP_BUSY_AVG |
-                   GEN6_RP_DOWN_IDLE_AVG);
+       I915_WRITE(GEN6_RP_CONTROL,
+                  GEN6_RP_MEDIA_TURBO |
+                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                  GEN6_RP_MEDIA_IS_GFX |
+                  GEN6_RP_ENABLE |
+                  GEN6_RP_UP_BUSY_AVG |
+                  GEN6_RP_DOWN_IDLE_AVG);
 
        dev_priv->rps.power = new_power;
        dev_priv->rps.up_threshold = threshold_up;
@@ -4844,12 +4864,27 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
                        gen6_rps_reset_ei(dev_priv);
                I915_WRITE(GEN6_PMINTRMSK,
                           gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+
+               gen6_enable_rps_interrupts(dev_priv);
+
+               /* Ensure we start at the user's desired frequency */
+               intel_set_rps(dev_priv,
+                             clamp(dev_priv->rps.cur_freq,
+                                   dev_priv->rps.min_freq_softlimit,
+                                   dev_priv->rps.max_freq_softlimit));
        }
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
 void gen6_rps_idle(struct drm_i915_private *dev_priv)
 {
+       /* Flush our bottom-half so that it does not race with us
+        * setting the idle frequency and so that it is bounded by
+        * our rpm wakeref. And then disable the interrupts to stop any
+        * futher RPS reclocking whilst we are asleep.
+        */
+       gen6_disable_rps_interrupts(dev_priv);
+
        mutex_lock(&dev_priv->rps.hw_lock);
        if (dev_priv->rps.enabled) {
                if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -4874,7 +4909,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
        /* This is intentionally racy! We peek at the state here, then
         * validate inside the RPS worker.
         */
-       if (!(dev_priv->mm.busy &&
+       if (!(dev_priv->gt.awake &&
              dev_priv->rps.enabled &&
              dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
                return;
@@ -4890,7 +4925,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
                spin_lock_irq(&dev_priv->irq_lock);
                if (dev_priv->rps.interrupts_enabled) {
                        dev_priv->rps.client_boost = true;
-                       queue_work(dev_priv->wq, &dev_priv->rps.work);
+                       schedule_work(&dev_priv->rps.work);
                }
                spin_unlock_irq(&dev_priv->irq_lock);
 
@@ -4954,14 +4989,15 @@ static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
                        mode = 0;
        }
        if (HAS_RC6p(dev_priv))
-               DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
-                             onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
-                             onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
-                             onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
+               DRM_DEBUG_DRIVER("Enabling RC6 states: "
+                                "RC6 %s RC6p %s RC6pp %s\n",
+                                onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
+                                onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
+                                onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
 
        else
-               DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
-                             onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
+               DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
+                                onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
 }
 
 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
@@ -4969,9 +5005,20 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        bool enable_rc6 = true;
        unsigned long rc6_ctx_base;
+       u32 rc_ctl;
+       int rc_sw_target;
+
+       rc_ctl = I915_READ(GEN6_RC_CONTROL);
+       rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
+                      RC_SW_TARGET_STATE_SHIFT;
+       DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+                        "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
+                        onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
+                        onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
+                        rc_sw_target);
 
        if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
-               DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
+               DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
                enable_rc6 = false;
        }
 
@@ -4983,7 +5030,7 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
        if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
              (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
                                        ggtt->stolen_reserved_size))) {
-               DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
+               DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
                enable_rc6 = false;
        }
 
@@ -4991,15 +5038,24 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
              ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
              ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
              ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
-               DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
+               DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
                enable_rc6 = false;
        }
 
-       if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
-                                           GEN6_RC_CTL_HW_ENABLE)) &&
-           ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
-            !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
-               DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
+       if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
+           !I915_READ(GEN8_PUSHBUS_ENABLE) ||
+           !I915_READ(GEN8_PUSHBUS_SHIFT)) {
+               DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+               enable_rc6 = false;
+       }
+
+       if (!I915_READ(GEN6_GFXPAUSE)) {
+               DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
+               enable_rc6 = false;
+       }
+
+       if (!I915_READ(GEN8_MISC_CTRL0)) {
+               DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
                enable_rc6 = false;
        }
 
@@ -5031,8 +5087,9 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
                        mask = INTEL_RC6_ENABLE;
 
                if ((enable_rc6 & mask) != enable_rc6)
-                       DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
-                                     enable_rc6 & mask, enable_rc6, mask);
+                       DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
+                                        "(requested %d, valid %d)\n",
+                                        enable_rc6 & mask, enable_rc6, mask);
 
                return enable_rc6 & mask;
        }
@@ -5643,7 +5700,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
        u32 pcbr;
        int pctx_size = 24*1024;
 
-       mutex_lock(&dev_priv->dev->struct_mutex);
+       mutex_lock(&dev_priv->drm.struct_mutex);
 
        pcbr = I915_READ(VLV_PCBR);
        if (pcbr) {
@@ -5651,7 +5708,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
                int pcbr_offset;
 
                pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
-               pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
+               pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
                                                                      pcbr_offset,
                                                                      I915_GTT_OFFSET_NONE,
                                                                      pctx_size);
@@ -5668,7 +5725,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
         * overlap with other ranges, such as the frame buffer, protected
         * memory, or any other relevant ranges.
         */
-       pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size);
+       pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
        if (!pctx) {
                DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
                goto out;
@@ -5680,7 +5737,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
 out:
        DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
        dev_priv->vlv_pctx = pctx;
-       mutex_unlock(&dev_priv->dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
@@ -6624,9 +6681,9 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
 
        if (IS_IRONLAKE_M(dev_priv)) {
                ironlake_enable_drps(dev_priv);
-               mutex_lock(&dev_priv->dev->struct_mutex);
+               mutex_lock(&dev_priv->drm.struct_mutex);
                intel_init_emon(dev_priv);
-               mutex_unlock(&dev_priv->dev->struct_mutex);
+               mutex_unlock(&dev_priv->drm.struct_mutex);
        } else if (INTEL_INFO(dev_priv)->gen >= 6) {
                /*
                 * PCU communication is slow and this doesn't need to be
@@ -6657,7 +6714,7 @@ void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
 
 static void ibx_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /*
         * On Ibex Peak and Cougar Point, we need to disable clock
@@ -6669,7 +6726,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
 
 static void g4x_disable_trickle_feed(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
 
        for_each_pipe(dev_priv, pipe) {
@@ -6684,7 +6741,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
 
 static void ilk_init_lp_watermarks(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
        I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6698,7 +6755,7 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
 
 static void ironlake_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
        /*
@@ -6772,7 +6829,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
 
 static void cpt_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
        uint32_t val;
 
@@ -6809,7 +6866,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
 
 static void gen6_check_mch_setup(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp;
 
        tmp = I915_READ(MCH_SSKPD);
@@ -6820,7 +6877,7 @@ static void gen6_check_mch_setup(struct drm_device *dev)
 
 static void gen6_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
        I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -6935,7 +6992,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
 
 static void lpt_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /*
         * TODO: this bit should only be enabled when really needed, then
@@ -6954,7 +7011,7 @@ static void lpt_init_clock_gating(struct drm_device *dev)
 
 static void lpt_suspend_hw(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (HAS_PCH_LPT_LP(dev)) {
                uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
@@ -6989,7 +7046,7 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
 
 static void kabylake_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        gen9_init_clock_gating(dev);
 
@@ -7010,7 +7067,7 @@ static void kabylake_init_clock_gating(struct drm_device *dev)
 
 static void skylake_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        gen9_init_clock_gating(dev);
 
@@ -7025,7 +7082,7 @@ static void skylake_init_clock_gating(struct drm_device *dev)
 
 static void broadwell_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
 
        ilk_init_lp_watermarks(dev);
@@ -7076,7 +7133,7 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
 
 static void haswell_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        ilk_init_lp_watermarks(dev);
 
@@ -7132,7 +7189,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
 
 static void ivybridge_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t snpcr;
 
        ilk_init_lp_watermarks(dev);
@@ -7230,7 +7287,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 
 static void valleyview_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* WaDisableEarlyCull:vlv */
        I915_WRITE(_3D_CHICKEN3,
@@ -7312,7 +7369,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
 
 static void cherryview_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* WaVSRefCountFullforceMissDisable:chv */
        /* WaDSRefCountFullforceMissDisable:chv */
@@ -7348,7 +7405,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
 
 static void g4x_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t dspclk_gate;
 
        I915_WRITE(RENCLK_GATE_D1, 0);
@@ -7375,7 +7432,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
 
 static void crestline_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
        I915_WRITE(RENCLK_GATE_D2, 0);
@@ -7391,7 +7448,7 @@ static void crestline_init_clock_gating(struct drm_device *dev)
 
 static void broadwater_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
                   I965_RCC_CLOCK_GATE_DISABLE |
@@ -7408,7 +7465,7 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
 
 static void gen3_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dstate = I915_READ(D_STATE);
 
        dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -7433,7 +7490,7 @@ static void gen3_init_clock_gating(struct drm_device *dev)
 
 static void i85x_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
 
@@ -7447,7 +7504,7 @@ static void i85x_init_clock_gating(struct drm_device *dev)
 
 static void i830_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 
@@ -7458,7 +7515,7 @@ static void i830_init_clock_gating(struct drm_device *dev)
 
 void intel_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->display.init_clock_gating(dev);
 }
@@ -7526,7 +7583,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
 /* Set up chip specific power management-related functions */
 void intel_init_pm(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        intel_fbc_init(dev_priv);
 
@@ -7604,46 +7661,59 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
 {
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-       if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+       /* GEN6_PCODE_* are outside of the forcewake domain, we can
+        * use te fw I915_READ variants to reduce the amount of work
+        * required when reading/writing.
+        */
+
+       if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
                DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
                return -EAGAIN;
        }
 
-       I915_WRITE(GEN6_PCODE_DATA, *val);
-       I915_WRITE(GEN6_PCODE_DATA1, 0);
-       I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+       I915_WRITE_FW(GEN6_PCODE_DATA, *val);
+       I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
+       I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 
-       if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-                    500)) {
+       if (intel_wait_for_register_fw(dev_priv,
+                                      GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+                                      500)) {
                DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
                return -ETIMEDOUT;
        }
 
-       *val = I915_READ(GEN6_PCODE_DATA);
-       I915_WRITE(GEN6_PCODE_DATA, 0);
+       *val = I915_READ_FW(GEN6_PCODE_DATA);
+       I915_WRITE_FW(GEN6_PCODE_DATA, 0);
 
        return 0;
 }
 
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
+                              u32 mbox, u32 val)
 {
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-       if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+       /* GEN6_PCODE_* are outside of the forcewake domain, we can
+        * use te fw I915_READ variants to reduce the amount of work
+        * required when reading/writing.
+        */
+
+       if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
                DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
                return -EAGAIN;
        }
 
-       I915_WRITE(GEN6_PCODE_DATA, val);
-       I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+       I915_WRITE_FW(GEN6_PCODE_DATA, val);
+       I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 
-       if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-                    500)) {
+       if (intel_wait_for_register_fw(dev_priv,
+                                      GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+                                      500)) {
                DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
                return -ETIMEDOUT;
        }
 
-       I915_WRITE(GEN6_PCODE_DATA, 0);
+       I915_WRITE_FW(GEN6_PCODE_DATA, 0);
 
        return 0;
 }
@@ -7713,7 +7783,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
        struct request_boost *boost = container_of(work, struct request_boost, work);
        struct drm_i915_gem_request *req = boost->req;
 
-       if (!i915_gem_request_completed(req, true))
+       if (!i915_gem_request_completed(req))
                gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
 
        i915_gem_request_unreference(req);
@@ -7727,7 +7797,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
        if (req == NULL || INTEL_GEN(req->i915) < 6)
                return;
 
-       if (i915_gem_request_completed(req, true))
+       if (i915_gem_request_completed(req))
                return;
 
        boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
@@ -7743,7 +7813,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
 
 void intel_pm_setup(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        mutex_init(&dev_priv->rps.hw_lock);
        spin_lock_init(&dev_priv->rps.client_lock);
index 29a09bf6bd18666a3225d6e77d93d2bc5504ebde..68bd0bb34817fa17e3ad6da0ab19d90fe5e4a19a 100644 (file)
@@ -63,7 +63,7 @@ static bool is_edp_psr(struct intel_dp *intel_dp)
 
 static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t val;
 
        val = I915_READ(VLV_PSRSTAT(pipe)) &
@@ -77,7 +77,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
        i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
@@ -107,7 +107,7 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
        uint32_t val;
@@ -173,7 +173,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t aux_clock_divider;
        i915_reg_t aux_ctl_reg;
        static const uint8_t aux_msg[] = {
@@ -220,7 +220,7 @@ static void vlv_psr_enable_source(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc = dig_port->base.base.crtc;
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
@@ -235,7 +235,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc = dig_port->base.base.crtc;
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
@@ -252,7 +252,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        uint32_t max_sleep_time = 0x1f;
        /* Lately it was identified that depending on panel idle frame count
@@ -324,7 +324,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc = dig_port->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
@@ -378,7 +378,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
        WARN_ON(dev_priv->psr.active);
@@ -407,7 +407,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
 
        if (!HAS_PSR(dev)) {
@@ -494,15 +494,18 @@ static void vlv_psr_disable(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc =
                to_intel_crtc(intel_dig_port->base.base.crtc);
        uint32_t val;
 
        if (dev_priv->psr.active) {
                /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
-               if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
-                             VLV_EDP_PSR_IN_TRANS) == 0, 1))
+               if (intel_wait_for_register(dev_priv,
+                                           VLV_PSRSTAT(intel_crtc->pipe),
+                                           VLV_EDP_PSR_IN_TRANS,
+                                           0,
+                                           1))
                        WARN(1, "PSR transition took longer than expected\n");
 
                val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
@@ -521,16 +524,18 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (dev_priv->psr.active) {
                I915_WRITE(EDP_PSR_CTL,
                           I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
 
                /* Wait till PSR is idle */
-               if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
-                              EDP_PSR_STATUS_STATE_MASK) == 0,
-                              2 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
+               if (intel_wait_for_register(dev_priv,
+                                           EDP_PSR_STATUS_CTL,
+                                           EDP_PSR_STATUS_STATE_MASK,
+                                           0,
+                                           2000))
                        DRM_ERROR("Timed out waiting for PSR Idle State\n");
 
                dev_priv->psr.active = false;
@@ -549,7 +554,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        mutex_lock(&dev_priv->psr.lock);
        if (!dev_priv->psr.enabled) {
@@ -586,14 +591,20 @@ static void intel_psr_work(struct work_struct *work)
         * and be ready for re-enable.
         */
        if (HAS_DDI(dev_priv)) {
-               if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
-                             EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+               if (intel_wait_for_register(dev_priv,
+                                           EDP_PSR_STATUS_CTL,
+                                           EDP_PSR_STATUS_STATE_MASK,
+                                           0,
+                                           50)) {
                        DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
                        return;
                }
        } else {
-               if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
-                             VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
+               if (intel_wait_for_register(dev_priv,
+                                           VLV_PSRSTAT(pipe),
+                                           VLV_EDP_PSR_IN_TRANS,
+                                           0,
+                                           1)) {
                        DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
                        return;
                }
@@ -619,7 +630,7 @@ static void intel_psr_work(struct work_struct *work)
 
 static void intel_psr_exit(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dp *intel_dp = dev_priv->psr.enabled;
        struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -674,7 +685,7 @@ static void intel_psr_exit(struct drm_device *dev)
 void intel_psr_single_frame_update(struct drm_device *dev,
                                   unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
        u32 val;
@@ -722,7 +733,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
 void intel_psr_invalidate(struct drm_device *dev,
                          unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
 
@@ -760,7 +771,7 @@ void intel_psr_invalidate(struct drm_device *dev,
 void intel_psr_flush(struct drm_device *dev,
                     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
 
@@ -796,7 +807,7 @@ void intel_psr_flush(struct drm_device *dev,
  */
 void intel_psr_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
                HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
index fedd270498140945d6b4167c8b2844833b46e2a1..61e00bf9e87f928f672ebe9c35243887725d82f4 100644 (file)
@@ -58,18 +58,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
                                            ringbuf->tail, ringbuf->size);
 }
 
-bool intel_engine_stopped(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
-}
-
 static void __intel_ring_advance(struct intel_engine_cs *engine)
 {
        struct intel_ringbuffer *ringbuf = engine->buffer;
        ringbuf->tail &= ringbuf->size - 1;
-       if (intel_engine_stopped(engine))
-               return;
        engine->write_tail(engine, ringbuf->tail);
 }
 
@@ -515,8 +507,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
                I915_WRITE(reg,
                           _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
                                              INSTPM_SYNC_FLUSH));
-               if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
-                            1000))
+               if (intel_wait_for_register(dev_priv,
+                                           reg, INSTPM_SYNC_FLUSH, 0,
+                                           1000))
                        DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
                                  engine->name);
        }
@@ -528,7 +521,11 @@ static bool stop_ring(struct intel_engine_cs *engine)
 
        if (!IS_GEN2(dev_priv)) {
                I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
-               if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+               if (intel_wait_for_register(dev_priv,
+                                           RING_MI_MODE(engine->mmio_base),
+                                           MODE_IDLE,
+                                           MODE_IDLE,
+                                           1000)) {
                        DRM_ERROR("%s : timed out trying to stop ring\n",
                                  engine->name);
                        /* Sometimes we observe that the idle flag is not
@@ -643,58 +640,42 @@ static int init_ring_common(struct intel_engine_cs *engine)
        return ret;
 }
 
-void
-intel_fini_pipe_control(struct intel_engine_cs *engine)
+void intel_fini_pipe_control(struct intel_engine_cs *engine)
 {
        if (engine->scratch.obj == NULL)
                return;
 
-       if (INTEL_GEN(engine->i915) >= 5) {
-               kunmap(sg_page(engine->scratch.obj->pages->sgl));
-               i915_gem_object_ggtt_unpin(engine->scratch.obj);
-       }
-
+       i915_gem_object_ggtt_unpin(engine->scratch.obj);
        drm_gem_object_unreference(&engine->scratch.obj->base);
        engine->scratch.obj = NULL;
 }
 
-int
-intel_init_pipe_control(struct intel_engine_cs *engine)
+int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
 {
+       struct drm_i915_gem_object *obj;
        int ret;
 
        WARN_ON(engine->scratch.obj);
 
-       engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096);
-       if (IS_ERR(engine->scratch.obj)) {
-               DRM_ERROR("Failed to allocate seqno page\n");
-               ret = PTR_ERR(engine->scratch.obj);
-               engine->scratch.obj = NULL;
+       obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
+       if (!obj)
+               obj = i915_gem_object_create(&engine->i915->drm, size);
+       if (IS_ERR(obj)) {
+               DRM_ERROR("Failed to allocate scratch page\n");
+               ret = PTR_ERR(obj);
                goto err;
        }
 
-       ret = i915_gem_object_set_cache_level(engine->scratch.obj,
-                                             I915_CACHE_LLC);
+       ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
        if (ret)
                goto err_unref;
 
-       ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
-       if (ret)
-               goto err_unref;
-
-       engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
-       engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
-       if (engine->scratch.cpu_page == NULL) {
-               ret = -ENOMEM;
-               goto err_unpin;
-       }
-
+       engine->scratch.obj = obj;
+       engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
        DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
                         engine->name, engine->scratch.gtt_offset);
        return 0;
 
-err_unpin:
-       i915_gem_object_ggtt_unpin(engine->scratch.obj);
 err_unref:
        drm_gem_object_unreference(&engine->scratch.obj->base);
 err:
@@ -1324,8 +1305,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
        if (IS_GEN(dev_priv, 6, 7))
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-       if (HAS_L3_DPF(dev_priv))
-               I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
+       I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 
        return init_workarounds_ring(engine);
 }
@@ -1362,19 +1342,17 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
                return ret;
 
        for_each_engine_id(waiter, dev_priv, id) {
-               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
                intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
                                           PIPE_CONTROL_QW_WRITE |
                                           PIPE_CONTROL_CS_STALL);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset));
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, seqno);
+               intel_ring_emit(signaller, signaller_req->seqno);
                intel_ring_emit(signaller, 0);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->hw_id));
@@ -1403,18 +1381,16 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
                return ret;
 
        for_each_engine_id(waiter, dev_priv, id) {
-               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
                                           MI_FLUSH_DW_OP_STOREDW);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
                                           MI_FLUSH_DW_USE_GTT);
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, seqno);
+               intel_ring_emit(signaller, signaller_req->seqno);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->hw_id));
                intel_ring_emit(signaller, 0);
@@ -1445,11 +1421,9 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
                i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
 
                if (i915_mmio_reg_valid(mbox_reg)) {
-                       u32 seqno = i915_gem_request_get_seqno(signaller_req);
-
                        intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
                        intel_ring_emit_reg(signaller, mbox_reg);
-                       intel_ring_emit(signaller, seqno);
+                       intel_ring_emit(signaller, signaller_req->seqno);
                }
        }
 
@@ -1485,7 +1459,7 @@ gen6_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
        intel_ring_emit(engine,
                        I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+       intel_ring_emit(engine, req->seqno);
        intel_ring_emit(engine, MI_USER_INTERRUPT);
        __intel_ring_advance(engine);
 
@@ -1542,6 +1516,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 {
        struct intel_engine_cs *waiter = waiter_req->engine;
        struct drm_i915_private *dev_priv = waiter_req->i915;
+       u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
        struct i915_hw_ppgtt *ppgtt;
        int ret;
 
@@ -1553,10 +1528,8 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
                                MI_SEMAPHORE_GLOBAL_GTT |
                                MI_SEMAPHORE_SAD_GTE_SDD);
        intel_ring_emit(waiter, seqno);
-       intel_ring_emit(waiter,
-                       lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
-       intel_ring_emit(waiter,
-                       upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+       intel_ring_emit(waiter, lower_32_bits(offset));
+       intel_ring_emit(waiter, upper_32_bits(offset));
        intel_ring_advance(waiter);
 
        /* When the !RCS engines idle waiting upon a semaphore, they lose their
@@ -1611,66 +1584,22 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
        return 0;
 }
 
-#define PIPE_CONTROL_FLUSH(ring__, addr__)                                     \
-do {                                                                   \
-       intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
-                PIPE_CONTROL_DEPTH_STALL);                             \
-       intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
-       intel_ring_emit(ring__, 0);                                                     \
-       intel_ring_emit(ring__, 0);                                                     \
-} while (0)
-
-static int
-pc_render_add_request(struct drm_i915_gem_request *req)
+static void
+gen5_seqno_barrier(struct intel_engine_cs *ring)
 {
-       struct intel_engine_cs *engine = req->engine;
-       u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
-       int ret;
-
-       /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
-        * incoherent with writes to memory, i.e. completely fubar,
-        * so we need to use PIPE_NOTIFY instead.
+       /* MI_STORE are internally buffered by the GPU and not flushed
+        * either by MI_FLUSH or SyncFlush or any other combination of
+        * MI commands.
         *
-        * However, we also need to workaround the qword write
-        * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
-        * memory before requesting an interrupt.
+        * "Only the submission of the store operation is guaranteed.
+        * The write result will be complete (coherent) some time later
+        * (this is practically a finite period but there is no guaranteed
+        * latency)."
+        *
+        * Empirically, we observe that we need a delay of at least 75us to
+        * be sure that the seqno write is visible by the CPU.
         */
-       ret = intel_ring_begin(req, 32);
-       if (ret)
-               return ret;
-
-       intel_ring_emit(engine,
-                       GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
-                       PIPE_CONTROL_WRITE_FLUSH |
-                       PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-       intel_ring_emit(engine,
-                       engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
-       intel_ring_emit(engine, 0);
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-
-       intel_ring_emit(engine,
-                       GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
-                       PIPE_CONTROL_WRITE_FLUSH |
-                       PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
-                       PIPE_CONTROL_NOTIFY);
-       intel_ring_emit(engine,
-                       engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
-       intel_ring_emit(engine, 0);
-       __intel_ring_advance(engine);
-
-       return 0;
+       usleep_range(125, 250);
 }
 
 static void
@@ -1698,127 +1627,54 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
        spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
-static u32
-ring_get_seqno(struct intel_engine_cs *engine)
-{
-       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
 static void
-ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-}
-
-static u32
-pc_render_get_seqno(struct intel_engine_cs *engine)
+gen5_irq_enable(struct intel_engine_cs *engine)
 {
-       return engine->scratch.cpu_page[0];
+       gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static void
-pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
+gen5_irq_disable(struct intel_engine_cs *engine)
 {
-       engine->scratch.cpu_page[0] = seqno;
-}
-
-static bool
-gen5_ring_get_irq(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0)
-               gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static void
-gen5_ring_put_irq(struct intel_engine_cs *engine)
+i9xx_irq_enable(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0)
-               gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-}
-
-static bool
-i9xx_ring_get_irq(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
-
-       if (!intel_irqs_enabled(dev_priv))
-               return false;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               dev_priv->irq_mask &= ~engine->irq_enable_mask;
-               I915_WRITE(IMR, dev_priv->irq_mask);
-               POSTING_READ(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       dev_priv->irq_mask &= ~engine->irq_enable_mask;
+       I915_WRITE(IMR, dev_priv->irq_mask);
+       POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
 static void
-i9xx_ring_put_irq(struct intel_engine_cs *engine)
+i9xx_irq_disable(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               dev_priv->irq_mask |= engine->irq_enable_mask;
-               I915_WRITE(IMR, dev_priv->irq_mask);
-               POSTING_READ(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       dev_priv->irq_mask |= engine->irq_enable_mask;
+       I915_WRITE(IMR, dev_priv->irq_mask);
 }
 
-static bool
-i8xx_ring_get_irq(struct intel_engine_cs *engine)
+static void
+i8xx_irq_enable(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
-
-       if (!intel_irqs_enabled(dev_priv))
-               return false;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               dev_priv->irq_mask &= ~engine->irq_enable_mask;
-               I915_WRITE16(IMR, dev_priv->irq_mask);
-               POSTING_READ16(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       dev_priv->irq_mask &= ~engine->irq_enable_mask;
+       I915_WRITE16(IMR, dev_priv->irq_mask);
+       POSTING_READ16(RING_IMR(engine->mmio_base));
 }
 
 static void
-i8xx_ring_put_irq(struct intel_engine_cs *engine)
+i8xx_irq_disable(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               dev_priv->irq_mask |= engine->irq_enable_mask;
-               I915_WRITE16(IMR, dev_priv->irq_mask);
-               POSTING_READ16(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       dev_priv->irq_mask |= engine->irq_enable_mask;
+       I915_WRITE16(IMR, dev_priv->irq_mask);
 }
 
 static int
@@ -1852,129 +1708,68 @@ i9xx_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
        intel_ring_emit(engine,
                        I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+       intel_ring_emit(engine, req->seqno);
        intel_ring_emit(engine, MI_USER_INTERRUPT);
        __intel_ring_advance(engine);
 
        return 0;
 }
 
-static bool
-gen6_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen6_irq_enable(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
-                       I915_WRITE_IMR(engine,
-                                      ~(engine->irq_enable_mask |
-                                        GT_PARITY_ERROR(dev_priv)));
-               else
-                       I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-               gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
-       return true;
+       I915_WRITE_IMR(engine,
+                      ~(engine->irq_enable_mask |
+                        engine->irq_keep_mask));
+       gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
 }
 
 static void
-gen6_ring_put_irq(struct intel_engine_cs *engine)
+gen6_irq_disable(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
-                       I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
-               else
-                       I915_WRITE_IMR(engine, ~0);
-               gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+       gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
 }
 
-static bool
-hsw_vebox_get_irq(struct intel_engine_cs *engine)
+static void
+hsw_vebox_irq_enable(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
 
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-               gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+       gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
 static void
-hsw_vebox_put_irq(struct intel_engine_cs *engine)
+hsw_vebox_irq_disable(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               I915_WRITE_IMR(engine, ~0);
-               gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       I915_WRITE_IMR(engine, ~0);
+       gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
-static bool
-gen8_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen8_irq_enable(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
 
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
-                       I915_WRITE_IMR(engine,
-                                      ~(engine->irq_enable_mask |
-                                        GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
-               } else {
-                       I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-               }
-               POSTING_READ(RING_IMR(engine->mmio_base));
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       I915_WRITE_IMR(engine,
+                      ~(engine->irq_enable_mask |
+                        engine->irq_keep_mask));
+       POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
 static void
-gen8_ring_put_irq(struct intel_engine_cs *engine)
+gen8_irq_disable(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
-                       I915_WRITE_IMR(engine,
-                                      ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
-               } else {
-                       I915_WRITE_IMR(engine, ~0);
-               }
-               POSTING_READ(RING_IMR(engine->mmio_base));
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 }
 
 static int
@@ -2093,7 +1888,7 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
        if (!dev_priv->status_page_dmah)
                return;
 
-       drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah);
+       drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
        engine->status_page.page_addr = NULL;
 }
 
@@ -2119,7 +1914,7 @@ static int init_status_page(struct intel_engine_cs *engine)
                unsigned flags;
                int ret;
 
-               obj = i915_gem_object_create(engine->i915->dev, 4096);
+               obj = i915_gem_object_create(&engine->i915->drm, 4096);
                if (IS_ERR(obj)) {
                        DRM_ERROR("Failed to allocate status page\n");
                        return PTR_ERR(obj);
@@ -2168,7 +1963,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 
        if (!dev_priv->status_page_dmah) {
                dev_priv->status_page_dmah =
-                       drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE);
+                       drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
                if (!dev_priv->status_page_dmah)
                        return -ENOMEM;
        }
@@ -2301,7 +2096,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
        ring->last_retired_head = -1;
        intel_ring_update_space(ring);
 
-       ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring);
+       ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
                                 engine->name, ret);
@@ -2321,6 +2116,57 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
        kfree(ring);
 }
 
+static int intel_ring_context_pin(struct i915_gem_context *ctx,
+                                 struct intel_engine_cs *engine)
+{
+       struct intel_context *ce = &ctx->engine[engine->id];
+       int ret;
+
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+       if (ce->pin_count++)
+               return 0;
+
+       if (ce->state) {
+               ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
+               if (ret)
+                       goto error;
+       }
+
+       /* The kernel context is only used as a placeholder for flushing the
+        * active context. It is never used for submitting user rendering and
+        * as such never requires the golden render context, and so we can skip
+        * emitting it when we switch to the kernel context. This is required
+        * as during eviction we cannot allocate and pin the renderstate in
+        * order to initialise the context.
+        */
+       if (ctx == ctx->i915->kernel_context)
+               ce->initialised = true;
+
+       i915_gem_context_reference(ctx);
+       return 0;
+
+error:
+       ce->pin_count = 0;
+       return ret;
+}
+
+static void intel_ring_context_unpin(struct i915_gem_context *ctx,
+                                    struct intel_engine_cs *engine)
+{
+       struct intel_context *ce = &ctx->engine[engine->id];
+
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+       if (--ce->pin_count)
+               return;
+
+       if (ce->state)
+               i915_gem_object_ggtt_unpin(ce->state);
+
+       i915_gem_context_unreference(ctx);
+}
+
 static int intel_init_ring_buffer(struct drm_device *dev,
                                  struct intel_engine_cs *engine)
 {
@@ -2339,7 +2185,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        memset(engine->semaphore.sync_seqno, 0,
               sizeof(engine->semaphore.sync_seqno));
 
-       init_waitqueue_head(&engine->irq_queue);
+       ret = intel_engine_init_breadcrumbs(engine);
+       if (ret)
+               goto error;
+
+       /* We may need to do things with the shrinker which
+        * require us to immediately switch back to the default
+        * context. This can cause a problem as pinning the
+        * default context also requires GTT space which may not
+        * be available. To avoid this we always pin the default
+        * context.
+        */
+       ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
+       if (ret)
+               goto error;
 
        ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
        if (IS_ERR(ringbuf)) {
@@ -2408,6 +2267,10 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
 
        i915_cmd_parser_fini_ring(engine);
        i915_gem_batch_pool_fini(&engine->batch_pool);
+       intel_engine_fini_breadcrumbs(engine);
+
+       intel_ring_context_unpin(dev_priv->kernel_context, engine);
+
        engine->i915 = NULL;
 }
 
@@ -2603,10 +2466,19 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
        memset(engine->semaphore.sync_seqno, 0,
               sizeof(engine->semaphore.sync_seqno));
 
-       engine->set_seqno(engine, seqno);
+       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+       if (engine->irq_seqno_barrier)
+               engine->irq_seqno_barrier(engine);
        engine->last_submitted_seqno = seqno;
 
        engine->hangcheck.seqno = seqno;
+
+       /* After manually advancing the seqno, fake the interrupt in case
+        * there are any waiters for that seqno.
+        */
+       rcu_read_lock();
+       intel_engine_wakeup(engine);
+       rcu_read_unlock();
 }
 
 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
@@ -2614,32 +2486,38 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
        /* Every tail move must follow the sequence below */
 
        /* Disable notification that the ring is IDLE. The GT
         * will then assume that it is busy and bring it out of rc6.
         */
-       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-                  _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+       I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
+                     _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 
        /* Clear the context id. Here be magic! */
-       I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+       I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
 
        /* Wait for the ring not to be idle, i.e. for it to wake up. */
-       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
-                     GEN6_BSD_SLEEP_INDICATOR) == 0,
-                    50))
+       if (intel_wait_for_register_fw(dev_priv,
+                                      GEN6_BSD_SLEEP_PSMI_CONTROL,
+                                      GEN6_BSD_SLEEP_INDICATOR,
+                                      0,
+                                      50))
                DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
        /* Now that the ring is fully powered up, update the tail */
-       I915_WRITE_TAIL(engine, value);
-       POSTING_READ(RING_TAIL(engine->mmio_base));
+       I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
+       POSTING_READ_FW(RING_TAIL(engine->mmio_base));
 
        /* Let the ring send IDLE messages to the GT again,
         * and so let it sleep to conserve power when idle.
         */
-       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-                  _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+       I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
+                     _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
@@ -2808,11 +2686,159 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
        return 0;
 }
 
+static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
+                                      struct intel_engine_cs *engine)
+{
+       struct drm_i915_gem_object *obj;
+       int ret, i;
+
+       if (!i915_semaphore_is_enabled(dev_priv))
+               return;
+
+       if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
+               obj = i915_gem_object_create(&dev_priv->drm, 4096);
+               if (IS_ERR(obj)) {
+                       DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+                       i915.semaphores = 0;
+               } else {
+                       i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+                       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+                       if (ret != 0) {
+                               drm_gem_object_unreference(&obj->base);
+                               DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
+                               i915.semaphores = 0;
+                       } else {
+                               dev_priv->semaphore_obj = obj;
+                       }
+               }
+       }
+
+       if (!i915_semaphore_is_enabled(dev_priv))
+               return;
+
+       if (INTEL_GEN(dev_priv) >= 8) {
+               u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
+
+               engine->semaphore.sync_to = gen8_ring_sync;
+               engine->semaphore.signal = gen8_xcs_signal;
+
+               for (i = 0; i < I915_NUM_ENGINES; i++) {
+                       u64 ring_offset;
+
+                       if (i != engine->id)
+                               ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
+                       else
+                               ring_offset = MI_SEMAPHORE_SYNC_INVALID;
+
+                       engine->semaphore.signal_ggtt[i] = ring_offset;
+               }
+       } else if (INTEL_GEN(dev_priv) >= 6) {
+               engine->semaphore.sync_to = gen6_ring_sync;
+               engine->semaphore.signal = gen6_signal;
+
+               /*
+                * The current semaphore is only applied on pre-gen8
+                * platform.  And there is no VCS2 ring on the pre-gen8
+                * platform. So the semaphore between RCS and VCS2 is
+                * initialized as INVALID.  Gen8 will initialize the
+                * sema between VCS2 and RCS later.
+                */
+               for (i = 0; i < I915_NUM_ENGINES; i++) {
+                       static const struct {
+                               u32 wait_mbox;
+                               i915_reg_t mbox_reg;
+                       } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
+                               [RCS] = {
+                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
+                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
+                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
+                               },
+                               [VCS] = {
+                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
+                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
+                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
+                               },
+                               [BCS] = {
+                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
+                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
+                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
+                               },
+                               [VECS] = {
+                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
+                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
+                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
+                               },
+                       };
+                       u32 wait_mbox;
+                       i915_reg_t mbox_reg;
+
+                       if (i == engine->id || i == VCS2) {
+                               wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
+                               mbox_reg = GEN6_NOSYNC;
+                       } else {
+                               wait_mbox = sem_data[engine->id][i].wait_mbox;
+                               mbox_reg = sem_data[engine->id][i].mbox_reg;
+                       }
+
+                       engine->semaphore.mbox.wait[i] = wait_mbox;
+                       engine->semaphore.mbox.signal[i] = mbox_reg;
+               }
+       }
+}
+
+static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
+                               struct intel_engine_cs *engine)
+{
+       if (INTEL_GEN(dev_priv) >= 8) {
+               engine->irq_enable = gen8_irq_enable;
+               engine->irq_disable = gen8_irq_disable;
+               engine->irq_seqno_barrier = gen6_seqno_barrier;
+       } else if (INTEL_GEN(dev_priv) >= 6) {
+               engine->irq_enable = gen6_irq_enable;
+               engine->irq_disable = gen6_irq_disable;
+               engine->irq_seqno_barrier = gen6_seqno_barrier;
+       } else if (INTEL_GEN(dev_priv) >= 5) {
+               engine->irq_enable = gen5_irq_enable;
+               engine->irq_disable = gen5_irq_disable;
+               engine->irq_seqno_barrier = gen5_seqno_barrier;
+       } else if (INTEL_GEN(dev_priv) >= 3) {
+               engine->irq_enable = i9xx_irq_enable;
+               engine->irq_disable = i9xx_irq_disable;
+       } else {
+               engine->irq_enable = i8xx_irq_enable;
+               engine->irq_disable = i8xx_irq_disable;
+       }
+}
+
+static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
+                                     struct intel_engine_cs *engine)
+{
+       engine->init_hw = init_ring_common;
+       engine->write_tail = ring_write_tail;
+
+       engine->add_request = i9xx_add_request;
+       if (INTEL_GEN(dev_priv) >= 6)
+               engine->add_request = gen6_add_request;
+
+       if (INTEL_GEN(dev_priv) >= 8)
+               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+       else if (INTEL_GEN(dev_priv) >= 6)
+               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+       else if (INTEL_GEN(dev_priv) >= 4)
+               engine->dispatch_execbuffer = i965_dispatch_execbuffer;
+       else if (IS_I830(dev_priv) || IS_845G(dev_priv))
+               engine->dispatch_execbuffer = i830_dispatch_execbuffer;
+       else
+               engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+
+       intel_ring_init_irq(dev_priv, engine);
+       intel_ring_init_semaphores(dev_priv, engine);
+}
+
 int intel_init_render_ring_buffer(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine = &dev_priv->engine[RCS];
-       struct drm_i915_gem_object *obj;
        int ret;
 
        engine->name = "render ring";
@@ -2821,139 +2847,49 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
        engine->hw_id = 0;
        engine->mmio_base = RENDER_RING_BASE;
 
-       if (INTEL_GEN(dev_priv) >= 8) {
-               if (i915_semaphore_is_enabled(dev_priv)) {
-                       obj = i915_gem_object_create(dev, 4096);
-                       if (IS_ERR(obj)) {
-                               DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
-                               i915.semaphores = 0;
-                       } else {
-                               i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-                               ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
-                               if (ret != 0) {
-                                       drm_gem_object_unreference(&obj->base);
-                                       DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
-                                       i915.semaphores = 0;
-                               } else
-                                       dev_priv->semaphore_obj = obj;
-                       }
-               }
+       intel_ring_default_vfuncs(dev_priv, engine);
+
+       engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+       if (HAS_L3_DPF(dev_priv))
+               engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
+       if (INTEL_GEN(dev_priv) >= 8) {
                engine->init_context = intel_rcs_ctx_init;
                engine->add_request = gen8_render_add_request;
                engine->flush = gen8_render_ring_flush;
-               engine->irq_get = gen8_ring_get_irq;
-               engine->irq_put = gen8_ring_put_irq;
-               engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (i915_semaphore_is_enabled(dev_priv)) {
-                       WARN_ON(!dev_priv->semaphore_obj);
-                       engine->semaphore.sync_to = gen8_ring_sync;
+               if (i915_semaphore_is_enabled(dev_priv))
                        engine->semaphore.signal = gen8_rcs_signal;
-                       GEN8_RING_SEMAPHORE_INIT(engine);
-               }
        } else if (INTEL_GEN(dev_priv) >= 6) {
                engine->init_context = intel_rcs_ctx_init;
-               engine->add_request = gen6_add_request;
                engine->flush = gen7_render_ring_flush;
                if (IS_GEN6(dev_priv))
                        engine->flush = gen6_render_ring_flush;
-               engine->irq_get = gen6_ring_get_irq;
-               engine->irq_put = gen6_ring_put_irq;
-               engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-               engine->irq_seqno_barrier = gen6_seqno_barrier;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (i915_semaphore_is_enabled(dev_priv)) {
-                       engine->semaphore.sync_to = gen6_ring_sync;
-                       engine->semaphore.signal = gen6_signal;
-                       /*
-                        * The current semaphore is only applied on pre-gen8
-                        * platform.  And there is no VCS2 ring on the pre-gen8
-                        * platform. So the semaphore between RCS and VCS2 is
-                        * initialized as INVALID.  Gen8 will initialize the
-                        * sema between VCS2 and RCS later.
-                        */
-                       engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
-                       engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
-                       engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
-                       engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
-                       engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
-                       engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
-                       engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
-                       engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-               }
        } else if (IS_GEN5(dev_priv)) {
-               engine->add_request = pc_render_add_request;
                engine->flush = gen4_render_ring_flush;
-               engine->get_seqno = pc_render_get_seqno;
-               engine->set_seqno = pc_render_set_seqno;
-               engine->irq_get = gen5_ring_get_irq;
-               engine->irq_put = gen5_ring_put_irq;
-               engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
-                                       GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
        } else {
-               engine->add_request = i9xx_add_request;
                if (INTEL_GEN(dev_priv) < 4)
                        engine->flush = gen2_render_ring_flush;
                else
                        engine->flush = gen4_render_ring_flush;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (IS_GEN2(dev_priv)) {
-                       engine->irq_get = i8xx_ring_get_irq;
-                       engine->irq_put = i8xx_ring_put_irq;
-               } else {
-                       engine->irq_get = i9xx_ring_get_irq;
-                       engine->irq_put = i9xx_ring_put_irq;
-               }
                engine->irq_enable_mask = I915_USER_INTERRUPT;
        }
-       engine->write_tail = ring_write_tail;
 
        if (IS_HASWELL(dev_priv))
                engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
-       else if (IS_GEN8(dev_priv))
-               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-       else if (INTEL_GEN(dev_priv) >= 6)
-               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-       else if (INTEL_GEN(dev_priv) >= 4)
-               engine->dispatch_execbuffer = i965_dispatch_execbuffer;
-       else if (IS_I830(dev_priv) || IS_845G(dev_priv))
-               engine->dispatch_execbuffer = i830_dispatch_execbuffer;
-       else
-               engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+
        engine->init_hw = init_render_ring;
        engine->cleanup = render_ring_cleanup;
 
-       /* Workaround batchbuffer to combat CS tlb bug. */
-       if (HAS_BROKEN_CS_TLB(dev_priv)) {
-               obj = i915_gem_object_create(dev, I830_WA_SIZE);
-               if (IS_ERR(obj)) {
-                       DRM_ERROR("Failed to allocate batch bo\n");
-                       return PTR_ERR(obj);
-               }
-
-               ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
-               if (ret != 0) {
-                       drm_gem_object_unreference(&obj->base);
-                       DRM_ERROR("Failed to ping batch bo\n");
-                       return ret;
-               }
-
-               engine->scratch.obj = obj;
-               engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
-       }
-
        ret = intel_init_ring_buffer(dev, engine);
        if (ret)
                return ret;
 
-       if (INTEL_GEN(dev_priv) >= 5) {
-               ret = intel_init_pipe_control(engine);
+       if (INTEL_GEN(dev_priv) >= 6) {
+               ret = intel_init_pipe_control(engine, 4096);
+               if (ret)
+                       return ret;
+       } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
+               ret = intel_init_pipe_control(engine, I830_WA_SIZE);
                if (ret)
                        return ret;
        }
@@ -2963,7 +2899,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine = &dev_priv->engine[VCS];
 
        engine->name = "bsd ring";
@@ -2971,68 +2907,27 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
        engine->exec_id = I915_EXEC_BSD;
        engine->hw_id = 1;
 
-       engine->write_tail = ring_write_tail;
+       intel_ring_default_vfuncs(dev_priv, engine);
+
        if (INTEL_GEN(dev_priv) >= 6) {
                engine->mmio_base = GEN6_BSD_RING_BASE;
                /* gen6 bsd needs a special wa for tail updates */
                if (IS_GEN6(dev_priv))
                        engine->write_tail = gen6_bsd_ring_write_tail;
                engine->flush = gen6_bsd_ring_flush;
-               engine->add_request = gen6_add_request;
-               engine->irq_seqno_barrier = gen6_seqno_barrier;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (INTEL_GEN(dev_priv) >= 8) {
+               if (INTEL_GEN(dev_priv) >= 8)
                        engine->irq_enable_mask =
                                GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-                       engine->irq_get = gen8_ring_get_irq;
-                       engine->irq_put = gen8_ring_put_irq;
-                       engine->dispatch_execbuffer =
-                               gen8_ring_dispatch_execbuffer;
-                       if (i915_semaphore_is_enabled(dev_priv)) {
-                               engine->semaphore.sync_to = gen8_ring_sync;
-                               engine->semaphore.signal = gen8_xcs_signal;
-                               GEN8_RING_SEMAPHORE_INIT(engine);
-                       }
-               } else {
+               else
                        engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
-                       engine->irq_get = gen6_ring_get_irq;
-                       engine->irq_put = gen6_ring_put_irq;
-                       engine->dispatch_execbuffer =
-                               gen6_ring_dispatch_execbuffer;
-                       if (i915_semaphore_is_enabled(dev_priv)) {
-                               engine->semaphore.sync_to = gen6_ring_sync;
-                               engine->semaphore.signal = gen6_signal;
-                               engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
-                               engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
-                               engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
-                               engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
-                               engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                               engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
-                               engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
-                               engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
-                               engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
-                               engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-                       }
-               }
        } else {
                engine->mmio_base = BSD_RING_BASE;
                engine->flush = bsd_ring_flush;
-               engine->add_request = i9xx_add_request;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (IS_GEN5(dev_priv)) {
+               if (IS_GEN5(dev_priv))
                        engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
-                       engine->irq_get = gen5_ring_get_irq;
-                       engine->irq_put = gen5_ring_put_irq;
-               } else {
+               else
                        engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
-                       engine->irq_get = i9xx_ring_get_irq;
-                       engine->irq_put = i9xx_ring_put_irq;
-               }
-               engine->dispatch_execbuffer = i965_dispatch_execbuffer;
        }
-       engine->init_hw = init_ring_common;
 
        return intel_init_ring_buffer(dev, engine);
 }
@@ -3042,147 +2937,70 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
  */
 int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
 
        engine->name = "bsd2 ring";
        engine->id = VCS2;
        engine->exec_id = I915_EXEC_BSD;
        engine->hw_id = 4;
-
-       engine->write_tail = ring_write_tail;
        engine->mmio_base = GEN8_BSD2_RING_BASE;
+
+       intel_ring_default_vfuncs(dev_priv, engine);
+
        engine->flush = gen6_bsd_ring_flush;
-       engine->add_request = gen6_add_request;
-       engine->irq_seqno_barrier = gen6_seqno_barrier;
-       engine->get_seqno = ring_get_seqno;
-       engine->set_seqno = ring_set_seqno;
        engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
-       engine->irq_get = gen8_ring_get_irq;
-       engine->irq_put = gen8_ring_put_irq;
-       engine->dispatch_execbuffer =
-                       gen8_ring_dispatch_execbuffer;
-       if (i915_semaphore_is_enabled(dev_priv)) {
-               engine->semaphore.sync_to = gen8_ring_sync;
-               engine->semaphore.signal = gen8_xcs_signal;
-               GEN8_RING_SEMAPHORE_INIT(engine);
-       }
-       engine->init_hw = init_ring_common;
 
        return intel_init_ring_buffer(dev, engine);
 }
 
 int intel_init_blt_ring_buffer(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine = &dev_priv->engine[BCS];
 
        engine->name = "blitter ring";
        engine->id = BCS;
        engine->exec_id = I915_EXEC_BLT;
        engine->hw_id = 2;
-
        engine->mmio_base = BLT_RING_BASE;
-       engine->write_tail = ring_write_tail;
+
+       intel_ring_default_vfuncs(dev_priv, engine);
+
        engine->flush = gen6_ring_flush;
-       engine->add_request = gen6_add_request;
-       engine->irq_seqno_barrier = gen6_seqno_barrier;
-       engine->get_seqno = ring_get_seqno;
-       engine->set_seqno = ring_set_seqno;
-       if (INTEL_GEN(dev_priv) >= 8) {
+       if (INTEL_GEN(dev_priv) >= 8)
                engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-               engine->irq_get = gen8_ring_get_irq;
-               engine->irq_put = gen8_ring_put_irq;
-               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev_priv)) {
-                       engine->semaphore.sync_to = gen8_ring_sync;
-                       engine->semaphore.signal = gen8_xcs_signal;
-                       GEN8_RING_SEMAPHORE_INIT(engine);
-               }
-       } else {
+       else
                engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
-               engine->irq_get = gen6_ring_get_irq;
-               engine->irq_put = gen6_ring_put_irq;
-               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev_priv)) {
-                       engine->semaphore.signal = gen6_signal;
-                       engine->semaphore.sync_to = gen6_ring_sync;
-                       /*
-                        * The current semaphore is only applied on pre-gen8
-                        * platform.  And there is no VCS2 ring on the pre-gen8
-                        * platform. So the semaphore between BCS and VCS2 is
-                        * initialized as INVALID.  Gen8 will initialize the
-                        * sema between BCS and VCS2 later.
-                        */
-                       engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
-                       engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
-                       engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
-                       engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
-                       engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
-                       engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
-                       engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
-                       engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-               }
-       }
-       engine->init_hw = init_ring_common;
 
        return intel_init_ring_buffer(dev, engine);
 }
 
 int intel_init_vebox_ring_buffer(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine = &dev_priv->engine[VECS];
 
        engine->name = "video enhancement ring";
        engine->id = VECS;
        engine->exec_id = I915_EXEC_VEBOX;
        engine->hw_id = 3;
-
        engine->mmio_base = VEBOX_RING_BASE;
-       engine->write_tail = ring_write_tail;
+
+       intel_ring_default_vfuncs(dev_priv, engine);
+
        engine->flush = gen6_ring_flush;
-       engine->add_request = gen6_add_request;
-       engine->irq_seqno_barrier = gen6_seqno_barrier;
-       engine->get_seqno = ring_get_seqno;
-       engine->set_seqno = ring_set_seqno;
 
        if (INTEL_GEN(dev_priv) >= 8) {
                engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-               engine->irq_get = gen8_ring_get_irq;
-               engine->irq_put = gen8_ring_put_irq;
-               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev_priv)) {
-                       engine->semaphore.sync_to = gen8_ring_sync;
-                       engine->semaphore.signal = gen8_xcs_signal;
-                       GEN8_RING_SEMAPHORE_INIT(engine);
-               }
        } else {
                engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
-               engine->irq_get = hsw_vebox_get_irq;
-               engine->irq_put = hsw_vebox_put_irq;
-               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev_priv)) {
-                       engine->semaphore.sync_to = gen6_ring_sync;
-                       engine->semaphore.signal = gen6_signal;
-                       engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
-                       engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
-                       engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
-                       engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
-                       engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
-                       engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
-                       engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
-                       engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-               }
+               engine->irq_enable = hsw_vebox_irq_enable;
+               engine->irq_disable = hsw_vebox_irq_disable;
        }
-       engine->init_hw = init_ring_common;
 
        return intel_init_ring_buffer(dev, engine);
 }
index b33c876fed20d7f0fd29cc05b8ddc2ebff1e4bbd..12cb7ed90014bd7a1b974a026b73487d0d3e2d1a 100644 (file)
@@ -62,18 +62,6 @@ struct  intel_hw_status_page {
        (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
         GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
 
-#define GEN8_RING_SEMAPHORE_INIT(e) do { \
-       if (!dev_priv->semaphore_obj) { \
-               break; \
-       } \
-       (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
-       (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
-       (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
-       (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
-       (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
-       (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
-       } while(0)
-
 enum intel_ring_hangcheck_action {
        HANGCHECK_IDLE = 0,
        HANGCHECK_WAIT,
@@ -86,8 +74,8 @@ enum intel_ring_hangcheck_action {
 
 struct intel_ring_hangcheck {
        u64 acthd;
+       unsigned long user_interrupts;
        u32 seqno;
-       unsigned user_interrupts;
        int score;
        enum intel_ring_hangcheck_action action;
        int deadlock;
@@ -141,6 +129,8 @@ struct  i915_ctx_workarounds {
        struct drm_i915_gem_object *obj;
 };
 
+struct drm_i915_gem_request;
+
 struct intel_engine_cs {
        struct drm_i915_private *i915;
        const char      *name;
@@ -160,6 +150,39 @@ struct intel_engine_cs {
        struct intel_ringbuffer *buffer;
        struct list_head buffers;
 
+       /* Rather than have every client wait upon all user interrupts,
+        * with the herd waking after every interrupt and each doing the
+        * heavyweight seqno dance, we delegate the task (of being the
+        * bottom-half of the user interrupt) to the first client. After
+        * every interrupt, we wake up one client, who does the heavyweight
+        * coherent seqno read and either goes back to sleep (if incomplete),
+        * or wakes up all the completed clients in parallel, before then
+        * transferring the bottom-half status to the next client in the queue.
+        *
+        * Compared to walking the entire list of waiters in a single dedicated
+        * bottom-half, we reduce the latency of the first waiter by avoiding
+        * a context switch, but incur additional coherent seqno reads when
+        * following the chain of request breadcrumbs. Since it is most likely
+        * that we have a single client waiting on each seqno, then reducing
+        * the overhead of waking that client is much preferred.
+        */
+       struct intel_breadcrumbs {
+               struct task_struct *irq_seqno_bh; /* bh for user interrupts */
+               unsigned long irq_wakeups;
+               bool irq_posted;
+
+               spinlock_t lock; /* protects the lists of requests */
+               struct rb_root waiters; /* sorted by retirement, priority */
+               struct rb_root signals; /* sorted by retirement */
+               struct intel_wait *first_wait; /* oldest waiter by retirement */
+               struct task_struct *signaler; /* used for fence signalling */
+               struct drm_i915_gem_request *first_signal;
+               struct timer_list fake_irq; /* used after a missed interrupt */
+
+               bool irq_enabled : 1;
+               bool rpm_wakelock : 1;
+       } breadcrumbs;
+
        /*
         * A pool of objects to use as shadow copies of client batch buffers
         * when the command parser is enabled. Prevents the client from
@@ -170,11 +193,10 @@ struct intel_engine_cs {
        struct intel_hw_status_page status_page;
        struct i915_ctx_workarounds wa_ctx;
 
-       unsigned irq_refcount; /* protected by dev_priv->irq_lock */
-       u32             irq_enable_mask;        /* bitmask to enable ring interrupt */
-       struct drm_i915_gem_request *trace_irq_req;
-       bool __must_check (*irq_get)(struct intel_engine_cs *ring);
-       void            (*irq_put)(struct intel_engine_cs *ring);
+       u32             irq_keep_mask; /* always keep these interrupts */
+       u32             irq_enable_mask; /* bitmask to enable ring interrupt */
+       void            (*irq_enable)(struct intel_engine_cs *ring);
+       void            (*irq_disable)(struct intel_engine_cs *ring);
 
        int             (*init_hw)(struct intel_engine_cs *ring);
 
@@ -193,9 +215,6 @@ struct intel_engine_cs {
         * monotonic, even if not coherent.
         */
        void            (*irq_seqno_barrier)(struct intel_engine_cs *ring);
-       u32             (*get_seqno)(struct intel_engine_cs *ring);
-       void            (*set_seqno)(struct intel_engine_cs *ring,
-                                    u32 seqno);
        int             (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
                                               u64 offset, u32 length,
                                               unsigned dispatch_flags);
@@ -272,7 +291,6 @@ struct intel_engine_cs {
        unsigned int idle_lite_restore_wa;
        bool disable_lite_restore_wa;
        u32 ctx_desc_template;
-       u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
        int             (*emit_request)(struct drm_i915_gem_request *request);
        int             (*emit_flush)(struct drm_i915_gem_request *request,
                                      u32 invalidate_domains,
@@ -304,12 +322,9 @@ struct intel_engine_cs {
         * inspecting request list.
         */
        u32 last_submitted_seqno;
-       unsigned user_interrupts;
 
        bool gpu_caches_dirty;
 
-       wait_queue_head_t irq_queue;
-
        struct i915_gem_context *last_context;
 
        struct intel_ring_hangcheck hangcheck;
@@ -317,7 +332,6 @@ struct intel_engine_cs {
        struct {
                struct drm_i915_gem_object *obj;
                u32 gtt_offset;
-               volatile u32 *cpu_page;
        } scratch;
 
        bool needs_cmd_parser;
@@ -348,13 +362,13 @@ struct intel_engine_cs {
 };
 
 static inline bool
-intel_engine_initialized(struct intel_engine_cs *engine)
+intel_engine_initialized(const struct intel_engine_cs *engine)
 {
        return engine->i915 != NULL;
 }
 
 static inline unsigned
-intel_engine_flag(struct intel_engine_cs *engine)
+intel_engine_flag(const struct intel_engine_cs *engine)
 {
        return 1 << engine->id;
 }
@@ -456,15 +470,14 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
 }
 int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
-bool intel_engine_stopped(struct intel_engine_cs *engine);
 
 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
 int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
 int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
 
+int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
 void intel_fini_pipe_control(struct intel_engine_cs *engine);
-int intel_init_pipe_control(struct intel_engine_cs *engine);
 
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -473,6 +486,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
 int intel_init_vebox_ring_buffer(struct drm_device *dev);
 
 u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
+{
+       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+}
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
 
@@ -495,4 +512,62 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
        return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
 }
 
+/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
+struct intel_wait {
+       struct rb_node node;
+       struct task_struct *tsk;
+       u32 seqno;
+};
+
+struct intel_signal_node {
+       struct rb_node node;
+       struct intel_wait wait;
+};
+
+int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
+
+static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
+{
+       wait->tsk = current;
+       wait->seqno = seqno;
+}
+
+static inline bool intel_wait_complete(const struct intel_wait *wait)
+{
+       return RB_EMPTY_NODE(&wait->node);
+}
+
+bool intel_engine_add_wait(struct intel_engine_cs *engine,
+                          struct intel_wait *wait);
+void intel_engine_remove_wait(struct intel_engine_cs *engine,
+                             struct intel_wait *wait);
+void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
+
+static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
+{
+       return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+}
+
+static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
+{
+       bool wakeup = false;
+       struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+       /* Note that for this not to dangerously chase a dangling pointer,
+        * the caller is responsible for ensure that the task remain valid for
+        * wake_up_process() i.e. that the RCU grace period cannot expire.
+        *
+        * Also note that tsk is likely to be in !TASK_RUNNING state so an
+        * early test for tsk->state != TASK_RUNNING before wake_up_process()
+        * is unlikely to be beneficial.
+        */
+       if (tsk)
+               wakeup = wake_up_process(tsk);
+       return wakeup;
+}
+
+void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
+unsigned int intel_kick_waiters(struct drm_i915_private *i915);
+unsigned int intel_kick_signalers(struct drm_i915_private *i915);
+
 #endif /* _INTEL_RINGBUFFER_H_ */
index e856d49d6dc3304a0c3f5feeb9796c37f575ea09..6b78295f53dbf4e290ee16d6154c2a848b7d7518 100644 (file)
@@ -287,7 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  */
 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        /*
         * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -318,7 +318,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
                                       struct i915_power_well *power_well)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        /*
         * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -365,8 +365,11 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
 
                if (!is_enabled) {
                        DRM_DEBUG_KMS("Enabling power well\n");
-                       if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
-                                     HSW_PWR_WELL_STATE_ENABLED), 20))
+                       if (intel_wait_for_register(dev_priv,
+                                                   HSW_PWR_WELL_DRIVER,
+                                                   HSW_PWR_WELL_STATE_ENABLED,
+                                                   HSW_PWR_WELL_STATE_ENABLED,
+                                                   20))
                                DRM_ERROR("Timeout enabling power well\n");
                        hsw_power_well_post_enable(dev_priv);
                }
@@ -578,6 +581,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
 
        DRM_DEBUG_KMS("Enabling DC9\n");
 
+       intel_power_sequencer_reset(dev_priv);
        gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
 }
 
@@ -699,8 +703,11 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
 
        switch (power_well->data) {
        case SKL_DISP_PW_1:
-               if (wait_for((I915_READ(SKL_FUSE_STATUS) &
-                       SKL_FUSE_PG0_DIST_STATUS), 1)) {
+               if (intel_wait_for_register(dev_priv,
+                                           SKL_FUSE_STATUS,
+                                           SKL_FUSE_PG0_DIST_STATUS,
+                                           SKL_FUSE_PG0_DIST_STATUS,
+                                           1)) {
                        DRM_ERROR("PG0 not enabled\n");
                        return;
                }
@@ -761,12 +768,18 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
 
        if (check_fuse_status) {
                if (power_well->data == SKL_DISP_PW_1) {
-                       if (wait_for((I915_READ(SKL_FUSE_STATUS) &
-                               SKL_FUSE_PG1_DIST_STATUS), 1))
+                       if (intel_wait_for_register(dev_priv,
+                                                   SKL_FUSE_STATUS,
+                                                   SKL_FUSE_PG1_DIST_STATUS,
+                                                   SKL_FUSE_PG1_DIST_STATUS,
+                                                   1))
                                DRM_ERROR("PG1 distributing status timeout\n");
                } else if (power_well->data == SKL_DISP_PW_2) {
-                       if (wait_for((I915_READ(SKL_FUSE_STATUS) &
-                               SKL_FUSE_PG2_DIST_STATUS), 1))
+                       if (intel_wait_for_register(dev_priv,
+                                                   SKL_FUSE_STATUS,
+                                                   SKL_FUSE_PG2_DIST_STATUS,
+                                                   SKL_FUSE_PG2_DIST_STATUS,
+                                                   1))
                                DRM_ERROR("PG2 distributing status timeout\n");
                }
        }
@@ -917,7 +930,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
        WARN_ON(dev_priv->cdclk_freq !=
-               dev_priv->display.get_display_clock_speed(dev_priv->dev));
+               dev_priv->display.get_display_clock_speed(&dev_priv->drm));
 
        gen9_assert_dbuf_enabled(dev_priv);
 
@@ -1075,7 +1088,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
         *
         * CHV DPLL B/C have some issues if VGA mode is enabled.
         */
-       for_each_pipe(dev_priv->dev, pipe) {
+       for_each_pipe(&dev_priv->drm, pipe) {
                u32 val = I915_READ(DPLL(pipe));
 
                val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1100,7 +1113,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 
        intel_hpd_init(dev_priv);
 
-       i915_redisable_vga_power_on(dev_priv->dev);
+       i915_redisable_vga_power_on(&dev_priv->drm);
 }
 
 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
@@ -1110,9 +1123,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
        spin_unlock_irq(&dev_priv->irq_lock);
 
        /* make sure we're done processing display irqs */
-       synchronize_irq(dev_priv->dev->irq);
+       synchronize_irq(dev_priv->drm.irq);
 
-       vlv_power_sequencer_reset(dev_priv);
+       intel_power_sequencer_reset(dev_priv);
 }
 
 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
@@ -1205,7 +1218,6 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
        u32 phy_control = dev_priv->chv_phy_control;
        u32 phy_status = 0;
        u32 phy_status_mask = 0xffffffff;
-       u32 tmp;
 
        /*
         * The BIOS can leave the PHY is some weird state
@@ -1293,10 +1305,14 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
         * The PHY may be busy with some initial calibration and whatnot,
         * so the power state can take a while to actually change.
         */
-       if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
-               WARN(phy_status != tmp,
-                    "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
-                    tmp, phy_status, dev_priv->chv_phy_control);
+       if (intel_wait_for_register(dev_priv,
+                                   DISPLAY_PHY_STATUS,
+                                   phy_status_mask,
+                                   phy_status,
+                                   10))
+               DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+                         I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
+                          phy_status, dev_priv->chv_phy_control);
 }
 
 #undef BITS_SET
@@ -1324,7 +1340,11 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
        vlv_set_power_well(dev_priv, power_well, true);
 
        /* Poll for phypwrgood signal */
-       if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
+       if (intel_wait_for_register(dev_priv,
+                                   DISPLAY_PHY_STATUS,
+                                   PHY_POWERGOOD(phy),
+                                   PHY_POWERGOOD(phy),
+                                   1))
                DRM_ERROR("Display PHY %d is not power up\n", phy);
 
        mutex_lock(&dev_priv->sb_lock);
@@ -2255,7 +2275,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
  */
 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
 {
-       struct device *device = &dev_priv->dev->pdev->dev;
+       struct device *device = &dev_priv->drm.pdev->dev;
 
        /*
         * The i915.ko module is still not prepared to be loaded when
@@ -2556,7 +2576,7 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
  */
 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
        power_domains->initializing = true;
@@ -2618,7 +2638,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct device *device = &dev->pdev->dev;
 
        pm_runtime_get_sync(device);
@@ -2639,7 +2659,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  */
 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct device *device = &dev->pdev->dev;
 
        if (IS_ENABLED(CONFIG_PM)) {
@@ -2681,7 +2701,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct device *device = &dev->pdev->dev;
 
        assert_rpm_wakelock_held(dev_priv);
@@ -2700,7 +2720,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct device *device = &dev->pdev->dev;
 
        assert_rpm_wakelock_held(dev_priv);
@@ -2723,7 +2743,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct device *device = &dev->pdev->dev;
 
        pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
index 02b4a669552887fbfd2f36505113f07e2e712e30..e378f35365a2e5b36dd3c193d0971e0db6f7e1f5 100644 (file)
@@ -240,7 +240,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
 static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
 {
        struct drm_device *dev = intel_sdvo->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 bval = val, cval = val;
        int i;
 
@@ -1195,7 +1195,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
 static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
 {
        struct drm_device *dev = intel_encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
        struct drm_display_mode *mode = &crtc->config->base.mode;
@@ -1330,7 +1330,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
                                    enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        u16 active_outputs = 0;
        u32 tmp;
@@ -1353,7 +1353,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
                                  struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct intel_sdvo_dtd dtd;
        int encoder_pixel_multiplier = 0;
@@ -1436,7 +1436,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 
 static void intel_disable_sdvo(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        u32 temp;
@@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
                temp &= ~SDVO_ENABLE;
                intel_sdvo_write_sdvox(intel_sdvo, temp);
 
-               intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
+               intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
        }
@@ -1489,7 +1489,7 @@ static void pch_post_disable_sdvo(struct intel_encoder *encoder)
 static void intel_enable_sdvo(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        u32 temp;
@@ -1633,7 +1633,7 @@ intel_sdvo_get_edid(struct drm_connector *connector)
 static struct edid *
 intel_sdvo_get_analog_edid(struct drm_connector *connector)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
 
        return drm_get_edid(connector,
                            intel_gmbus_get_adapter(dev_priv,
@@ -1916,7 +1916,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
 static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
 {
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct drm_display_mode *newmode;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2001,7 +2001,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
 {
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        uint16_t temp_value;
        uint8_t cmd;
        int ret;
@@ -2177,6 +2177,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
 #undef CHECK_PROPERTY
 }
 
+static int
+intel_sdvo_connector_register(struct drm_connector *connector)
+{
+       struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+       int ret;
+
+       ret = intel_connector_register(connector);
+       if (ret)
+               return ret;
+
+       return sysfs_create_link(&connector->kdev->kobj,
+                                &sdvo->ddc.dev.kobj,
+                                sdvo->ddc.dev.kobj.name);
+}
+
 static void
 intel_sdvo_connector_unregister(struct drm_connector *connector)
 {
@@ -2193,6 +2208,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_sdvo_set_property,
        .atomic_get_property = intel_connector_atomic_get_property,
+       .late_register = intel_sdvo_connector_register,
        .early_unregister = intel_sdvo_connector_unregister,
        .destroy = intel_sdvo_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -2322,7 +2338,7 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
 static u8
 intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct sdvo_device_mapping *my_mapping, *other_mapping;
 
        if (sdvo->port == PORT_B) {
@@ -2380,24 +2396,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
        connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
 
        intel_connector_attach_encoder(&connector->base, &encoder->base);
-       ret = drm_connector_register(drm_connector);
-       if (ret < 0)
-               goto err1;
-
-       ret = sysfs_create_link(&drm_connector->kdev->kobj,
-                               &encoder->ddc.dev.kobj,
-                               encoder->ddc.dev.kobj.name);
-       if (ret < 0)
-               goto err2;
 
        return 0;
-
-err2:
-       drm_connector_unregister(drm_connector);
-err1:
-       drm_connector_cleanup(drm_connector);
-
-       return ret;
 }
 
 static void
@@ -2524,7 +2524,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
        return true;
 
 err:
-       drm_connector_unregister(connector);
        intel_sdvo_destroy(connector);
        return false;
 }
@@ -2603,7 +2602,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
        return true;
 
 err:
-       drm_connector_unregister(connector);
        intel_sdvo_destroy(connector);
        return false;
 }
@@ -2954,7 +2952,7 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
 bool intel_sdvo_init(struct drm_device *dev,
                     i915_reg_t sdvo_reg, enum port port)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *intel_encoder;
        struct intel_sdvo *intel_sdvo;
        int i;
index c3998188cf35de5111c735282106da8f59c9c5c9..1a840bf92eeac888f2baadae0ef257b8d3f8bdb0 100644 (file)
@@ -51,7 +51,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
 
        WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
 
-       if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+       if (intel_wait_for_register(dev_priv,
+                                   VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+                                   5)) {
                DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
                                 is_read ? "read" : "write");
                return -EAGAIN;
@@ -62,7 +64,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
                I915_WRITE(VLV_IOSF_DATA, *val);
        I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
 
-       if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+       if (intel_wait_for_register(dev_priv,
+                                   VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+                                   5)) {
                DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
                                 is_read ? "read" : "write");
                return -ETIMEDOUT;
@@ -202,8 +206,9 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
        u32 value = 0;
        WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
 
-       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
-                               100)) {
+       if (intel_wait_for_register(dev_priv,
+                                   SBI_CTL_STAT, SBI_BUSY, 0,
+                                   100)) {
                DRM_ERROR("timeout waiting for SBI to become ready\n");
                return 0;
        }
@@ -216,8 +221,11 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
                value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
        I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
 
-       if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
-                               100)) {
+       if (intel_wait_for_register(dev_priv,
+                                   SBI_CTL_STAT,
+                                   SBI_BUSY | SBI_RESPONSE_FAIL,
+                                   0,
+                                   100)) {
                DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
                return 0;
        }
@@ -232,8 +240,9 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 
        WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
 
-       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
-                               100)) {
+       if (intel_wait_for_register(dev_priv,
+                                   SBI_CTL_STAT, SBI_BUSY, 0,
+                                   100)) {
                DRM_ERROR("timeout waiting for SBI to become ready\n");
                return;
        }
@@ -247,8 +256,11 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
                tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
        I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
 
-       if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
-                               100)) {
+       if (intel_wait_for_register(dev_priv,
+                                   SBI_CTL_STAT,
+                                   SBI_BUSY | SBI_RESPONSE_FAIL,
+                                   0,
+                                   100)) {
                DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
                return;
        }
index fc654173c491220f05330cfa827dfe24f844fcb1..0de935ad01c2ae78f4e6c0bbfc77860e191b876d 100644 (file)
@@ -199,7 +199,7 @@ skl_update_plane(struct drm_plane *drm_plane,
                 const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = drm_plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(drm_plane);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -303,7 +303,7 @@ static void
 skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = dplane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(dplane);
        const int pipe = intel_plane->pipe;
        const int plane = intel_plane->plane + 1;
@@ -317,7 +317,7 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 static void
 chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
 {
-       struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
        int plane = intel_plane->plane;
 
        /* Seems RGB data bypasses the CSC always */
@@ -359,7 +359,7 @@ vlv_update_plane(struct drm_plane *dplane,
                 const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = dplane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(dplane);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -485,7 +485,7 @@ static void
 vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = dplane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(dplane);
        int pipe = intel_plane->pipe;
        int plane = intel_plane->plane;
@@ -502,7 +502,7 @@ ivb_update_plane(struct drm_plane *plane,
                 const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -624,7 +624,7 @@ static void
 ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(plane);
        int pipe = intel_plane->pipe;
 
@@ -643,7 +643,7 @@ ilk_update_plane(struct drm_plane *plane,
                 const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -753,7 +753,7 @@ static void
 ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(plane);
        int pipe = intel_plane->pipe;
 
index 4ce70a9f9df2daa92e2704c13eef4e91b94426ef..49136ad5473ede52222ac7ec383d766a377bab5c 100644 (file)
@@ -826,7 +826,7 @@ static bool
 intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tmp = I915_READ(TV_CTL);
 
        if (!(tmp & TV_ENC_ENABLE))
@@ -841,7 +841,7 @@ static void
 intel_enable_tv(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Prevents vblank waits from timing out in intel_tv_detect_type() */
        intel_wait_for_vblank(encoder->base.dev,
@@ -854,7 +854,7 @@ static void
 intel_disable_tv(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
 }
@@ -1013,7 +1013,7 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
 static void intel_tv_pre_enable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_tv *intel_tv = enc_to_tv(encoder);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1173,7 +1173,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
        struct drm_crtc *crtc = connector->state->crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tv_ctl, save_tv_ctl;
        u32 tv_dac, save_tv_dac;
        int type;
@@ -1501,6 +1501,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_tv_detect,
+       .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
        .destroy = intel_tv_destroy,
        .set_property = intel_tv_set_property,
@@ -1522,7 +1523,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
 void
 intel_tv_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_connector *connector;
        struct intel_tv *intel_tv;
        struct intel_encoder *intel_encoder;
@@ -1641,5 +1642,4 @@ intel_tv_init(struct drm_device *dev)
        drm_object_attach_property(&connector->base,
                                   dev->mode_config.tv_bottom_margin_property,
                                   intel_tv->margin[TV_MARGIN_BOTTOM]);
-       drm_connector_register(connector);
 }
index c1ca458d688e508d5413b6807d3e1756930369f4..ff80a81b1a84f929cf9a7aacda59608a23e0dc8a 100644 (file)
@@ -1299,9 +1299,11 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
                               FORCEWAKE_MT, FORCEWAKE_MT_ACK);
 
+               spin_lock_irq(&dev_priv->uncore.lock);
                fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
                ecobus = __raw_i915_read32(dev_priv, ECOBUS);
                fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
+               spin_unlock_irq(&dev_priv->uncore.lock);
 
                if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
                        DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
@@ -1407,7 +1409,7 @@ static const struct register_whitelist {
 int i915_reg_read_ioctl(struct drm_device *dev,
                        void *data, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_reg_read *reg = data;
        struct register_whitelist const *entry = whitelist;
        unsigned size;
@@ -1469,7 +1471,7 @@ static int i915_reset_complete(struct pci_dev *pdev)
 
 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 {
-       struct pci_dev *pdev = dev_priv->dev->pdev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
 
        /* assert reset for at least 20 usec */
        pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
@@ -1488,14 +1490,14 @@ static int g4x_reset_complete(struct pci_dev *pdev)
 
 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 {
-       struct pci_dev *pdev = dev_priv->dev->pdev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
        return wait_for(g4x_reset_complete(pdev), 500);
 }
 
 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 {
-       struct pci_dev *pdev = dev_priv->dev->pdev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        int ret;
 
        pci_write_config_byte(pdev, I915_GDRST,
@@ -1530,15 +1532,17 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
 
        I915_WRITE(ILK_GDSR,
                   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
-       ret = wait_for((I915_READ(ILK_GDSR) &
-                       ILK_GRDOM_RESET_ENABLE) == 0, 500);
+       ret = intel_wait_for_register(dev_priv,
+                                     ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
+                                     500);
        if (ret)
                return ret;
 
        I915_WRITE(ILK_GDSR,
                   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
-       ret = wait_for((I915_READ(ILK_GDSR) &
-                       ILK_GRDOM_RESET_ENABLE) == 0, 500);
+       ret = intel_wait_for_register(dev_priv,
+                                     ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
+                                     500);
        if (ret)
                return ret;
 
@@ -1551,20 +1555,16 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
                                u32 hw_domain_mask)
 {
-       int ret;
-
        /* GEN6_GDRST is not in the gt power well, no need to check
         * for fifo space for the write or forcewake the chip for
         * the read
         */
        __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
 
-#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
        /* Spin waiting for the device to ack the reset requests */
-       ret = wait_for(ACKED, 500);
-#undef ACKED
-
-       return ret;
+       return intel_wait_for_register_fw(dev_priv,
+                                         GEN6_GDRST, hw_domain_mask, 0,
+                                         500);
 }
 
 /**
@@ -1609,13 +1609,74 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
        return ret;
 }
 
-static int wait_for_register_fw(struct drm_i915_private *dev_priv,
-                               i915_reg_t reg,
-                               const u32 mask,
-                               const u32 value,
-                               const unsigned long timeout_ms)
+/**
+ * intel_wait_for_register_fw - wait until register matches expected state
+ * @dev_priv: the i915 device
+ * @reg: the register to read
+ * @mask: mask to apply to register value
+ * @value: expected value
+ * @timeout_ms: timeout in millisecond
+ *
+ * This routine waits until the target register @reg contains the expected
+ * @value after applying the @mask, i.e. it waits until
+ *   (I915_READ_FW(@reg) & @mask) == @value
+ * Otherwise, the wait will timeout after @timeout_ms milliseconds.
+ *
+ * Note that this routine assumes the caller holds forcewake asserted, it is
+ * not suitable for very long waits. See intel_wait_for_register() if you
+ * wish to wait without holding forcewake for the duration (i.e. you expect
+ * the wait to be slow).
+ *
+ * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ */
+int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+                              i915_reg_t reg,
+                              const u32 mask,
+                              const u32 value,
+                              const unsigned long timeout_ms)
+{
+#define done ((I915_READ_FW(reg) & mask) == value)
+       int ret = wait_for_us(done, 2);
+       if (ret)
+               ret = wait_for(done, timeout_ms);
+       return ret;
+#undef done
+}
+
+/**
+ * intel_wait_for_register - wait until register matches expected state
+ * @dev_priv: the i915 device
+ * @reg: the register to read
+ * @mask: mask to apply to register value
+ * @value: expected value
+ * @timeout_ms: timeout in millisecond
+ *
+ * This routine waits until the target register @reg contains the expected
+ * @value after applying the @mask, i.e. it waits until
+ *   (I915_READ(@reg) & @mask) == @value
+ * Otherwise, the wait will timeout after @timeout_ms milliseconds.
+ *
+ * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ */
+int intel_wait_for_register(struct drm_i915_private *dev_priv,
+                           i915_reg_t reg,
+                           const u32 mask,
+                           const u32 value,
+                           const unsigned long timeout_ms)
 {
-       return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms);
+
+       unsigned fw =
+               intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+       int ret;
+
+       intel_uncore_forcewake_get(dev_priv, fw);
+       ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
+       intel_uncore_forcewake_put(dev_priv, fw);
+       if (ret)
+               ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
+                              timeout_ms);
+
+       return ret;
 }
 
 static int gen8_request_engine_reset(struct intel_engine_cs *engine)
@@ -1626,11 +1687,11 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
        I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
                      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
 
-       ret = wait_for_register_fw(dev_priv,
-                                  RING_RESET_CTL(engine->mmio_base),
-                                  RESET_CTL_READY_TO_RESET,
-                                  RESET_CTL_READY_TO_RESET,
-                                  700);
+       ret = intel_wait_for_register_fw(dev_priv,
+                                        RING_RESET_CTL(engine->mmio_base),
+                                        RESET_CTL_READY_TO_RESET,
+                                        RESET_CTL_READY_TO_RESET,
+                                        700);
        if (ret)
                DRM_ERROR("%s: reset request timeout\n", engine->name);
 
index a24631fdf4add310de0eeadc7001c6a12a711866..359cd276555264fcc5346a1dfc1e54da85192f61 100644 (file)
@@ -28,6 +28,11 @@ struct imx_hdmi {
        struct regmap *regmap;
 };
 
+static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
+{
+       return container_of(e, struct imx_hdmi, encoder);
+}
+
 static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
        {
                45250000, {
@@ -109,15 +114,9 @@ static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder)
 {
 }
 
-static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder,
-                                        struct drm_display_mode *mode,
-                                        struct drm_display_mode *adj_mode)
+static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
 {
-}
-
-static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
-{
-       struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+       struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
        int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder);
 
        regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
@@ -125,16 +124,23 @@ static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
                           mux << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT);
 }
 
-static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder)
+static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder,
+                                   struct drm_crtc_state *crtc_state,
+                                   struct drm_connector_state *conn_state)
 {
-       imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24);
+       struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+
+       imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+       imx_crtc_state->di_hsync_pin = 2;
+       imx_crtc_state->di_vsync_pin = 3;
+
+       return 0;
 }
 
 static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
-       .mode_set   = dw_hdmi_imx_encoder_mode_set,
-       .prepare    = dw_hdmi_imx_encoder_prepare,
-       .commit     = dw_hdmi_imx_encoder_commit,
+       .enable     = dw_hdmi_imx_encoder_enable,
        .disable    = dw_hdmi_imx_encoder_disable,
+       .atomic_check = dw_hdmi_imx_atomic_check,
 };
 
 static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
index 7746418a4c08597a219e611500bb87200bd32135..9f7dafce3a4cf8aa94e5ac8c64ca210124609fbd 100644 (file)
  */
 #include <linux/component.h>
 #include <linux/device.h>
+#include <linux/dma-buf.h>
 #include <linux/fb.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/reservation.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_cma_helper.h>
@@ -41,6 +45,7 @@ struct imx_drm_device {
        struct imx_drm_crtc                     *crtc[MAX_CRTC];
        unsigned int                            pipes;
        struct drm_fbdev_cma                    *fbhelper;
+       struct drm_atomic_state                 *state;
 };
 
 struct imx_drm_crtc {
@@ -85,45 +90,6 @@ static int imx_drm_driver_unload(struct drm_device *drm)
        return 0;
 }
 
-static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
-{
-       struct imx_drm_device *imxdrm = crtc->dev->dev_private;
-       unsigned i;
-
-       for (i = 0; i < MAX_CRTC; i++)
-               if (imxdrm->crtc[i] && imxdrm->crtc[i]->crtc == crtc)
-                       return imxdrm->crtc[i];
-
-       return NULL;
-}
-
-int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
-               int hsync_pin, int vsync_pin, u32 bus_flags)
-{
-       struct imx_drm_crtc_helper_funcs *helper;
-       struct imx_drm_crtc *imx_crtc;
-
-       imx_crtc = imx_drm_find_crtc(encoder->crtc);
-       if (!imx_crtc)
-               return -EINVAL;
-
-       helper = &imx_crtc->imx_drm_helper_funcs;
-       if (helper->set_interface_pix_fmt)
-               return helper->set_interface_pix_fmt(encoder->crtc,
-                                       bus_format, hsync_pin, vsync_pin,
-                                       bus_flags);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_config);
-
-int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
-{
-       return imx_drm_set_bus_config(encoder, bus_format, 2, 3,
-                                     DRM_BUS_FLAG_DE_HIGH |
-                                     DRM_BUS_FLAG_PIXDATA_NEGEDGE);
-}
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
-
 int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
 {
        return drm_crtc_vblank_get(imx_drm_crtc->crtc);
@@ -208,6 +174,63 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
 static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
        .fb_create = drm_fb_cma_create,
        .output_poll_changed = imx_drm_output_poll_changed,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+       struct drm_device *dev = state->dev;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_plane_state *plane_state;
+       struct drm_gem_cma_object *cma_obj;
+       struct fence *excl;
+       unsigned shared_count;
+       struct fence **shared;
+       unsigned int i, j;
+       int ret;
+
+       /* Wait for fences. */
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               plane_state = crtc->primary->state;
+               if (plane_state->fb) {
+                       cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
+                       if (cma_obj->base.dma_buf) {
+                               ret = reservation_object_get_fences_rcu(
+                                       cma_obj->base.dma_buf->resv, &excl,
+                                       &shared_count, &shared);
+                               if (unlikely(ret))
+                                       DRM_ERROR("failed to get fences "
+                                                 "for buffer\n");
+
+                               if (excl) {
+                                       fence_wait(excl, false);
+                                       fence_put(excl);
+                               }
+                               for (j = 0; j < shared_count; i++) {
+                                       fence_wait(shared[j], false);
+                                       fence_put(shared[j]);
+                               }
+                       }
+               }
+       }
+
+       drm_atomic_helper_commit_modeset_disables(dev, state);
+
+       drm_atomic_helper_commit_planes(dev, state, true);
+
+       drm_atomic_helper_commit_modeset_enables(dev, state);
+
+       drm_atomic_helper_commit_hw_done(state);
+
+       drm_atomic_helper_wait_for_vblanks(dev, state);
+
+       drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
+       .atomic_commit_tail = imx_drm_atomic_commit_tail,
 };
 
 /*
@@ -249,6 +272,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
        drm->mode_config.max_width = 4096;
        drm->mode_config.max_height = 4096;
        drm->mode_config.funcs = &imx_drm_mode_config_funcs;
+       drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
 
        drm_mode_config_init(drm);
 
@@ -279,6 +303,8 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
                }
        }
 
+       drm_mode_config_reset(drm);
+
        /*
         * All components are now initialised, so setup the fb helper.
         * The fb helper takes copies of key hardware information, so the
@@ -289,7 +315,6 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
                dev_warn(drm->dev, "Invalid legacyfb_depth.  Defaulting to 16bpp\n");
                legacyfb_depth = 16;
        }
-       drm_helper_disable_unused_functions(drm);
        imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
                                drm->mode_config.num_crtc, MAX_CRTC);
        if (IS_ERR(imxdrm->fbhelper)) {
@@ -403,7 +428,8 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
 };
 
 static struct drm_driver imx_drm_driver = {
-       .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+       .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+                                 DRIVER_ATOMIC,
        .load                   = imx_drm_driver_load,
        .unload                 = imx_drm_driver_unload,
        .lastclose              = imx_drm_driver_lastclose,
@@ -491,6 +517,7 @@ static int imx_drm_platform_remove(struct platform_device *pdev)
 static int imx_drm_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct imx_drm_device *imxdrm;
 
        /* The drm_dev is NULL before .load hook is called */
        if (drm_dev == NULL)
@@ -498,17 +525,26 @@ static int imx_drm_suspend(struct device *dev)
 
        drm_kms_helper_poll_disable(drm_dev);
 
+       imxdrm = drm_dev->dev_private;
+       imxdrm->state = drm_atomic_helper_suspend(drm_dev);
+       if (IS_ERR(imxdrm->state)) {
+               drm_kms_helper_poll_enable(drm_dev);
+               return PTR_ERR(imxdrm->state);
+       }
+
        return 0;
 }
 
 static int imx_drm_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct imx_drm_device *imx_drm;
 
        if (drm_dev == NULL)
                return 0;
 
-       drm_helper_resume_force_mode(drm_dev);
+       imx_drm = drm_dev->dev_private;
+       drm_atomic_helper_resume(drm_dev, imx_drm->state);
        drm_kms_helper_poll_enable(drm_dev);
 
        return 0;
index 74320a1723b70122bfd85d34cd5a013cbbe57105..07d33e45f90f5538982697839685f8e0a1adca78 100644 (file)
@@ -15,12 +15,22 @@ struct platform_device;
 
 unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
 
+struct imx_crtc_state {
+       struct drm_crtc_state                   base;
+       u32                                     bus_format;
+       u32                                     bus_flags;
+       int                                     di_hsync_pin;
+       int                                     di_vsync_pin;
+};
+
+static inline struct imx_crtc_state *to_imx_crtc_state(struct drm_crtc_state *s)
+{
+       return container_of(s, struct imx_crtc_state, base);
+}
+
 struct imx_drm_crtc_helper_funcs {
        int (*enable_vblank)(struct drm_crtc *crtc);
        void (*disable_vblank)(struct drm_crtc *crtc);
-       int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
-                       u32 bus_format, int hsync_pin, int vsync_pin,
-                       u32 bus_flags);
        const struct drm_crtc_helper_funcs *crtc_helper_funcs;
        const struct drm_crtc_funcs *crtc_funcs;
 };
@@ -42,11 +52,6 @@ void imx_drm_mode_config_init(struct drm_device *drm);
 
 struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
 
-int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
-               int hsync_pin, int vsync_pin, u32 bus_flags);
-int imx_drm_set_bus_format(struct drm_encoder *encoder,
-               u32 bus_format);
-
 int imx_drm_encoder_parse_of(struct drm_device *drm,
        struct drm_encoder *encoder, struct device_node *np);
 
index beff793bb717592daf045b45e171ca72dab737bd..5d2831dfb8b9842acff6dd194337fee8bacee1f7 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
@@ -49,9 +51,6 @@
 #define LDB_DI1_VS_POL_ACT_LOW         (1 << 10)
 #define LDB_BGREF_RMODE_INT            (1 << 15)
 
-#define con_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, connector)
-#define enc_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, encoder)
-
 struct imx_ldb;
 
 struct imx_ldb_channel {
@@ -66,9 +65,19 @@ struct imx_ldb_channel {
        int edid_len;
        struct drm_display_mode mode;
        int mode_valid;
-       int bus_format;
+       u32 bus_format;
 };
 
+static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
+{
+       return container_of(c, struct imx_ldb_channel, connector);
+}
+
+static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
+{
+       return container_of(e, struct imx_ldb_channel, encoder);
+}
+
 struct bus_mux {
        int reg;
        int shift;
@@ -93,6 +102,32 @@ static enum drm_connector_status imx_ldb_connector_detect(
        return connector_status_connected;
 }
 
+static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
+                                     u32 bus_format)
+{
+       struct imx_ldb *ldb = imx_ldb_ch->ldb;
+       int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+
+       switch (bus_format) {
+       case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+               break;
+       case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+               if (imx_ldb_ch->chno == 0 || dual)
+                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
+               if (imx_ldb_ch->chno == 1 || dual)
+                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
+               break;
+       case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+               if (imx_ldb_ch->chno == 0 || dual)
+                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
+                                        LDB_BIT_MAP_CH0_JEIDA;
+               if (imx_ldb_ch->chno == 1 || dual)
+                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
+                                        LDB_BIT_MAP_CH1_JEIDA;
+               break;
+       }
+}
+
 static int imx_ldb_connector_get_modes(struct drm_connector *connector)
 {
        struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
@@ -100,11 +135,7 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
 
        if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs &&
            imx_ldb_ch->panel->funcs->get_modes) {
-               struct drm_display_info *di = &connector->display_info;
-
                num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel);
-               if (!imx_ldb_ch->bus_format && di->num_bus_formats)
-                       imx_ldb_ch->bus_format = di->bus_formats[0];
                if (num_modes > 0)
                        return num_modes;
        }
@@ -141,10 +172,6 @@ static struct drm_encoder *imx_ldb_connector_best_encoder(
        return &imx_ldb_ch->encoder;
 }
 
-static void imx_ldb_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
 static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
                unsigned long serial_clk, unsigned long di_clk)
 {
@@ -173,43 +200,7 @@ static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
                        chno);
 }
 
-static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
-{
-       struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
-       struct imx_ldb *ldb = imx_ldb_ch->ldb;
-       int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
-       u32 bus_format;
-
-       switch (imx_ldb_ch->bus_format) {
-       default:
-               dev_warn(ldb->dev,
-                        "could not determine data mapping, default to 18-bit \"spwg\"\n");
-               /* fallthrough */
-       case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
-               bus_format = MEDIA_BUS_FMT_RGB666_1X18;
-               break;
-       case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
-               bus_format = MEDIA_BUS_FMT_RGB888_1X24;
-               if (imx_ldb_ch->chno == 0 || dual)
-                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
-               if (imx_ldb_ch->chno == 1 || dual)
-                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
-               break;
-       case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
-               bus_format = MEDIA_BUS_FMT_RGB888_1X24;
-               if (imx_ldb_ch->chno == 0 || dual)
-                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
-                                        LDB_BIT_MAP_CH0_JEIDA;
-               if (imx_ldb_ch->chno == 1 || dual)
-                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
-                                        LDB_BIT_MAP_CH1_JEIDA;
-               break;
-       }
-
-       imx_drm_set_bus_format(encoder, bus_format);
-}
-
-static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
+static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
 {
        struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
        struct imx_ldb *ldb = imx_ldb_ch->ldb;
@@ -219,8 +210,13 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
        drm_panel_prepare(imx_ldb_ch->panel);
 
        if (dual) {
+               clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]);
+               clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]);
+
                clk_prepare_enable(ldb->clk[0]);
                clk_prepare_enable(ldb->clk[1]);
+       } else {
+               clk_set_parent(ldb->clk_sel[mux], ldb->clk[imx_ldb_ch->chno]);
        }
 
        if (imx_ldb_ch == &ldb->channel[0] || dual) {
@@ -265,6 +261,7 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
        unsigned long serial_clk;
        unsigned long di_clk = mode->clock * 1000;
        int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+       u32 bus_format = imx_ldb_ch->bus_format;
 
        if (mode->clock > 170000) {
                dev_warn(ldb->dev,
@@ -286,18 +283,36 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
        }
 
        /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
-       if (imx_ldb_ch == &ldb->channel[0]) {
+       if (imx_ldb_ch == &ldb->channel[0] || dual) {
                if (mode->flags & DRM_MODE_FLAG_NVSYNC)
                        ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW;
                else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
                        ldb->ldb_ctrl &= ~LDB_DI0_VS_POL_ACT_LOW;
        }
-       if (imx_ldb_ch == &ldb->channel[1]) {
+       if (imx_ldb_ch == &ldb->channel[1] || dual) {
                if (mode->flags & DRM_MODE_FLAG_NVSYNC)
                        ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW;
                else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
                        ldb->ldb_ctrl &= ~LDB_DI1_VS_POL_ACT_LOW;
        }
+
+       if (!bus_format) {
+               struct drm_connector_state *conn_state;
+               struct drm_connector *connector;
+               int i;
+
+               for_each_connector_in_state(encoder->crtc->state->state,
+                                           connector, conn_state, i) {
+                       struct drm_display_info *di = &connector->display_info;
+
+                       if (conn_state->crtc == encoder->crtc &&
+                           di->num_bus_formats) {
+                               bus_format = di->bus_formats[0];
+                               break;
+                       }
+               }
+       }
+       imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format);
 }
 
 static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
@@ -357,11 +372,45 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
        drm_panel_unprepare(imx_ldb_ch->panel);
 }
 
+static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
+                                       struct drm_crtc_state *crtc_state,
+                                       struct drm_connector_state *conn_state)
+{
+       struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+       struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+       struct drm_display_info *di = &conn_state->connector->display_info;
+       u32 bus_format = imx_ldb_ch->bus_format;
+
+       /* Bus format description in DT overrides connector display info. */
+       if (!bus_format && di->num_bus_formats)
+               bus_format = di->bus_formats[0];
+       switch (bus_format) {
+       case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+               imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+               break;
+       case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+       case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+               imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       imx_crtc_state->di_hsync_pin = 2;
+       imx_crtc_state->di_vsync_pin = 3;
+
+       return 0;
+}
+
+
 static const struct drm_connector_funcs imx_ldb_connector_funcs = {
-       .dpms = drm_helper_connector_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = imx_ldb_connector_detect,
        .destroy = imx_drm_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
@@ -374,11 +423,10 @@ static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
 };
 
 static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
-       .dpms = imx_ldb_encoder_dpms,
-       .prepare = imx_ldb_encoder_prepare,
-       .commit = imx_ldb_encoder_commit,
        .mode_set = imx_ldb_encoder_mode_set,
+       .enable = imx_ldb_encoder_enable,
        .disable = imx_ldb_encoder_disable,
+       .atomic_check = imx_ldb_encoder_atomic_check,
 };
 
 static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno)
@@ -400,10 +448,10 @@ static int imx_ldb_register(struct drm_device *drm,
        struct imx_ldb_channel *imx_ldb_ch)
 {
        struct imx_ldb *ldb = imx_ldb_ch->ldb;
+       struct drm_encoder *encoder = &imx_ldb_ch->encoder;
        int ret;
 
-       ret = imx_drm_encoder_parse_of(drm, &imx_ldb_ch->encoder,
-                                      imx_ldb_ch->child);
+       ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child);
        if (ret)
                return ret;
 
@@ -417,9 +465,8 @@ static int imx_ldb_register(struct drm_device *drm,
                        return ret;
        }
 
-       drm_encoder_helper_add(&imx_ldb_ch->encoder,
-                       &imx_ldb_encoder_helper_funcs);
-       drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs,
+       drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
+       drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
                         DRM_MODE_ENCODER_LVDS, NULL);
 
        drm_connector_helper_add(&imx_ldb_ch->connector,
@@ -427,11 +474,14 @@ static int imx_ldb_register(struct drm_device *drm,
        drm_connector_init(drm, &imx_ldb_ch->connector,
                           &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
 
-       if (imx_ldb_ch->panel)
-               drm_panel_attach(imx_ldb_ch->panel, &imx_ldb_ch->connector);
+       if (imx_ldb_ch->panel) {
+               ret = drm_panel_attach(imx_ldb_ch->panel,
+                                      &imx_ldb_ch->connector);
+               if (ret)
+                       return ret;
+       }
 
-       drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
-                       &imx_ldb_ch->encoder);
+       drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
 
        return 0;
 }
@@ -560,6 +610,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                struct imx_ldb_channel *channel;
                struct device_node *ddc_node;
                struct device_node *ep;
+               int bus_format;
 
                ret = of_property_read_u32(child, "reg", &i);
                if (ret || i < 0 || i > 1)
@@ -632,21 +683,22 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                        }
                }
 
-               channel->bus_format = of_get_bus_format(dev, child);
-               if (channel->bus_format == -EINVAL) {
+               bus_format = of_get_bus_format(dev, child);
+               if (bus_format == -EINVAL) {
                        /*
                         * If no bus format was specified in the device tree,
                         * we can still get it from the connected panel later.
                         */
                        if (channel->panel && channel->panel->funcs &&
                            channel->panel->funcs->get_modes)
-                               channel->bus_format = 0;
+                               bus_format = 0;
                }
-               if (channel->bus_format < 0) {
+               if (bus_format < 0) {
                        dev_err(dev, "could not determine data mapping: %d\n",
-                               channel->bus_format);
-                       return channel->bus_format;
+                               bus_format);
+                       return bus_format;
                }
+               channel->bus_format = bus_format;
 
                ret = imx_ldb_register(drm, channel);
                if (ret)
index baf788121287ec8af7147d86d497872d742007c1..5e875944ffa214f48f06d7ed8b687aabd1705ffb 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/spinlock.h>
 #include <linux/videodev2.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <video/imx-ipu-v3.h>
@@ -97,9 +98,6 @@
 /* TVE_TST_MODE_REG */
 #define TVE_TVDAC_TEST_MODE_MASK       (0x7 << 0)
 
-#define con_to_tve(x) container_of(x, struct imx_tve, connector)
-#define enc_to_tve(x) container_of(x, struct imx_tve, encoder)
-
 enum {
        TVE_MODE_TVOUT,
        TVE_MODE_VGA,
@@ -112,6 +110,8 @@ struct imx_tve {
        spinlock_t lock;        /* register lock */
        bool enabled;
        int mode;
+       int di_hsync_pin;
+       int di_vsync_pin;
 
        struct regmap *regmap;
        struct regulator *dac_reg;
@@ -120,10 +120,18 @@ struct imx_tve {
        struct clk *di_sel_clk;
        struct clk_hw clk_hw_di;
        struct clk *di_clk;
-       int vsync_pin;
-       int hsync_pin;
 };
 
+static inline struct imx_tve *con_to_tve(struct drm_connector *c)
+{
+       return container_of(c, struct imx_tve, connector);
+}
+
+static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
+{
+       return container_of(e, struct imx_tve, encoder);
+}
+
 static void tve_lock(void *__tve)
 __acquires(&tve->lock)
 {
@@ -148,8 +156,7 @@ static void tve_enable(struct imx_tve *tve)
                tve->enabled = true;
                clk_prepare_enable(tve->clk);
                ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
-                                        TVE_IPU_CLK_EN | TVE_EN,
-                                        TVE_IPU_CLK_EN | TVE_EN);
+                                        TVE_EN, TVE_EN);
        }
 
        /* clear interrupt status register */
@@ -172,7 +179,7 @@ static void tve_disable(struct imx_tve *tve)
        if (tve->enabled) {
                tve->enabled = false;
                ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
-                                        TVE_IPU_CLK_EN | TVE_EN, 0);
+                                        TVE_EN, 0);
                clk_disable_unprepare(tve->clk);
        }
 }
@@ -275,36 +282,6 @@ static struct drm_encoder *imx_tve_connector_best_encoder(
        return &tve->encoder;
 }
 
-static void imx_tve_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct imx_tve *tve = enc_to_tve(encoder);
-       int ret;
-
-       ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
-                                TVE_TV_OUT_MODE_MASK, TVE_TV_OUT_DISABLE);
-       if (ret < 0)
-               dev_err(tve->dev, "failed to disable TVOUT: %d\n", ret);
-}
-
-static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
-{
-       struct imx_tve *tve = enc_to_tve(encoder);
-
-       tve_disable(tve);
-
-       switch (tve->mode) {
-       case TVE_MODE_VGA:
-               imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24,
-                                      tve->hsync_pin, tve->vsync_pin,
-                                      DRM_BUS_FLAG_DE_HIGH |
-                                      DRM_BUS_FLAG_PIXDATA_NEGEDGE);
-               break;
-       case TVE_MODE_TVOUT:
-               imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
-               break;
-       }
-}
-
 static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
                                     struct drm_display_mode *orig_mode,
                                     struct drm_display_mode *mode)
@@ -333,6 +310,9 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
                        ret);
        }
 
+       regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
+                          TVE_IPU_CLK_EN, TVE_IPU_CLK_EN);
+
        if (tve->mode == TVE_MODE_VGA)
                ret = tve_setup_vga(tve);
        else
@@ -341,7 +321,7 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
                dev_err(tve->dev, "failed to set configuration: %d\n", ret);
 }
 
-static void imx_tve_encoder_commit(struct drm_encoder *encoder)
+static void imx_tve_encoder_enable(struct drm_encoder *encoder)
 {
        struct imx_tve *tve = enc_to_tve(encoder);
 
@@ -355,11 +335,28 @@ static void imx_tve_encoder_disable(struct drm_encoder *encoder)
        tve_disable(tve);
 }
 
+static int imx_tve_atomic_check(struct drm_encoder *encoder,
+                               struct drm_crtc_state *crtc_state,
+                               struct drm_connector_state *conn_state)
+{
+       struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+       struct imx_tve *tve = enc_to_tve(encoder);
+
+       imx_crtc_state->bus_format = MEDIA_BUS_FMT_GBR888_1X24;
+       imx_crtc_state->di_hsync_pin = tve->di_hsync_pin;
+       imx_crtc_state->di_vsync_pin = tve->di_vsync_pin;
+
+       return 0;
+}
+
 static const struct drm_connector_funcs imx_tve_connector_funcs = {
-       .dpms = drm_helper_connector_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = imx_tve_connector_detect,
        .destroy = imx_drm_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
@@ -373,11 +370,10 @@ static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
 };
 
 static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
-       .dpms = imx_tve_encoder_dpms,
-       .prepare = imx_tve_encoder_prepare,
        .mode_set = imx_tve_encoder_mode_set,
-       .commit = imx_tve_encoder_commit,
+       .enable = imx_tve_encoder_enable,
        .disable = imx_tve_encoder_disable,
+       .atomic_check = imx_tve_atomic_check,
 };
 
 static irqreturn_t imx_tve_irq_handler(int irq, void *data)
@@ -495,8 +491,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
        encoder_type = tve->mode == TVE_MODE_VGA ?
                                DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
 
-       ret = imx_drm_encoder_parse_of(drm, &tve->encoder,
-                                      tve->dev->of_node);
+       ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node);
        if (ret)
                return ret;
 
@@ -587,15 +582,15 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
 
        if (tve->mode == TVE_MODE_VGA) {
                ret = of_property_read_u32(np, "fsl,hsync-pin",
-                                          &tve->hsync_pin);
+                                          &tve->di_hsync_pin);
 
                if (ret < 0) {
-                       dev_err(dev, "failed to get vsync pin\n");
+                       dev_err(dev, "failed to get hsync pin\n");
                        return ret;
                }
 
-               ret |= of_property_read_u32(np, "fsl,vsync-pin",
-                                           &tve->vsync_pin);
+               ret = of_property_read_u32(np, "fsl,vsync-pin",
+                                          &tve->di_vsync_pin);
 
                if (ret < 0) {
                        dev_err(dev, "failed to get vsync pin\n");
@@ -633,7 +628,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
 
        tve->dac_reg = devm_regulator_get(dev, "dac");
        if (!IS_ERR(tve->dac_reg)) {
-               regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
+               ret = regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
+               if (ret)
+                       return ret;
                ret = regulator_enable(tve->dac_reg);
                if (ret)
                        return ret;
index fc040417e1e8366cddf116cad95b058df80489ab..08e188bc10fc6200501d4f13fccc999970a35566 100644 (file)
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <linux/fb.h>
 #include <linux/clk.h>
 #include <linux/errno.h>
-#include <linux/reservation.h>
-#include <linux/dma-buf.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 
 
 #define DRIVER_DESC            "i.MX IPUv3 Graphics"
 
-enum ipu_flip_status {
-       IPU_FLIP_NONE,
-       IPU_FLIP_PENDING,
-       IPU_FLIP_SUBMITTED,
-};
-
-struct ipu_flip_work {
-       struct work_struct              unref_work;
-       struct drm_gem_object           *bo;
-       struct drm_pending_vblank_event *page_flip_event;
-       struct work_struct              fence_work;
-       struct ipu_crtc                 *crtc;
-       struct fence                    *excl;
-       unsigned                        shared_count;
-       struct fence                    **shared;
-};
-
 struct ipu_crtc {
        struct device           *dev;
        struct drm_crtc         base;
@@ -60,201 +43,166 @@ struct ipu_crtc {
 
        struct ipu_dc           *dc;
        struct ipu_di           *di;
-       int                     enabled;
-       enum ipu_flip_status    flip_state;
-       struct workqueue_struct *flip_queue;
-       struct ipu_flip_work    *flip_work;
        int                     irq;
-       u32                     bus_format;
-       u32                     bus_flags;
-       int                     di_hsync_pin;
-       int                     di_vsync_pin;
 };
 
-#define to_ipu_crtc(x) container_of(x, struct ipu_crtc, base)
+static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
+{
+       return container_of(crtc, struct ipu_crtc, base);
+}
 
-static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
+static void ipu_crtc_enable(struct drm_crtc *crtc)
 {
+       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
        struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
 
-       if (ipu_crtc->enabled)
-               return;
-
        ipu_dc_enable(ipu);
-       ipu_plane_enable(ipu_crtc->plane[0]);
-       /* Start DC channel and DI after IDMAC */
        ipu_dc_enable_channel(ipu_crtc->dc);
        ipu_di_enable(ipu_crtc->di);
-       drm_crtc_vblank_on(&ipu_crtc->base);
-
-       ipu_crtc->enabled = 1;
 }
 
-static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
+static void ipu_crtc_disable(struct drm_crtc *crtc)
 {
+       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
        struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
 
-       if (!ipu_crtc->enabled)
-               return;
-
-       /* Stop DC channel and DI before IDMAC */
        ipu_dc_disable_channel(ipu_crtc->dc);
        ipu_di_disable(ipu_crtc->di);
-       ipu_plane_disable(ipu_crtc->plane[0]);
        ipu_dc_disable(ipu);
-       drm_crtc_vblank_off(&ipu_crtc->base);
 
-       ipu_crtc->enabled = 0;
+       spin_lock_irq(&crtc->dev->event_lock);
+       if (crtc->state->event) {
+               drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               crtc->state->event = NULL;
+       }
+       spin_unlock_irq(&crtc->dev->event_lock);
 }
 
-static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void imx_drm_crtc_reset(struct drm_crtc *crtc)
 {
-       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+       struct imx_crtc_state *state;
 
-       dev_dbg(ipu_crtc->dev, "%s mode: %d\n", __func__, mode);
-
-       switch (mode) {
-       case DRM_MODE_DPMS_ON:
-               ipu_fb_enable(ipu_crtc);
-               break;
-       case DRM_MODE_DPMS_STANDBY:
-       case DRM_MODE_DPMS_SUSPEND:
-       case DRM_MODE_DPMS_OFF:
-               ipu_fb_disable(ipu_crtc);
-               break;
+       if (crtc->state) {
+               if (crtc->state->mode_blob)
+                       drm_property_unreference_blob(crtc->state->mode_blob);
+
+               state = to_imx_crtc_state(crtc->state);
+               memset(state, 0, sizeof(*state));
+       } else {
+               state = kzalloc(sizeof(*state), GFP_KERNEL);
+               if (!state)
+                       return;
+               crtc->state = &state->base;
        }
+
+       state->base.crtc = crtc;
 }
 
-static void ipu_flip_unref_work_func(struct work_struct *__work)
+static struct drm_crtc_state *imx_drm_crtc_duplicate_state(struct drm_crtc *crtc)
 {
-       struct ipu_flip_work *work =
-                       container_of(__work, struct ipu_flip_work, unref_work);
+       struct imx_crtc_state *state;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return NULL;
 
-       drm_gem_object_unreference_unlocked(work->bo);
-       kfree(work);
+       __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+       WARN_ON(state->base.crtc != crtc);
+       state->base.crtc = crtc;
+
+       return &state->base;
 }
 
-static void ipu_flip_fence_work_func(struct work_struct *__work)
+static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc,
+                                      struct drm_crtc_state *state)
 {
-       struct ipu_flip_work *work =
-                       container_of(__work, struct ipu_flip_work, fence_work);
-       int i;
-
-       /* wait for all fences attached to the FB obj to signal */
-       if (work->excl) {
-               fence_wait(work->excl, false);
-               fence_put(work->excl);
-       }
-       for (i = 0; i < work->shared_count; i++) {
-               fence_wait(work->shared[i], false);
-               fence_put(work->shared[i]);
-       }
+       __drm_atomic_helper_crtc_destroy_state(state);
+       kfree(to_imx_crtc_state(state));
+}
+
+static const struct drm_crtc_funcs ipu_crtc_funcs = {
+       .set_config = drm_atomic_helper_set_config,
+       .destroy = drm_crtc_cleanup,
+       .page_flip = drm_atomic_helper_page_flip,
+       .reset = imx_drm_crtc_reset,
+       .atomic_duplicate_state = imx_drm_crtc_duplicate_state,
+       .atomic_destroy_state = imx_drm_crtc_destroy_state,
+};
 
-       work->crtc->flip_state = IPU_FLIP_SUBMITTED;
+static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
+{
+       struct ipu_crtc *ipu_crtc = dev_id;
+
+       imx_drm_handle_vblank(ipu_crtc->imx_crtc);
+
+       return IRQ_HANDLED;
 }
 
-static int ipu_page_flip(struct drm_crtc *crtc,
-               struct drm_framebuffer *fb,
-               struct drm_pending_vblank_event *event,
-               uint32_t page_flip_flags)
+static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 const struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
 {
-       struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
        struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-       struct ipu_flip_work *flip_work;
+       struct videomode vm;
        int ret;
 
-       if (ipu_crtc->flip_state != IPU_FLIP_NONE)
-               return -EBUSY;
-
-       ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
-       if (ret) {
-               dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n");
-               list_del(&event->base.link);
-
-               return ret;
-       }
+       drm_display_mode_to_videomode(adjusted_mode, &vm);
 
-       flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL);
-       if (!flip_work) {
-               ret = -ENOMEM;
-               goto put_vblank;
-       }
-       INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func);
-       flip_work->page_flip_event = event;
+       ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
+       if (ret)
+               return false;
 
-       /* get BO backing the old framebuffer and take a reference */
-       flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base;
-       drm_gem_object_reference(flip_work->bo);
+       if ((vm.vsync_len == 0) || (vm.hsync_len == 0))
+               return false;
 
-       ipu_crtc->flip_work = flip_work;
-       /*
-        * If the object has a DMABUF attached, we need to wait on its fences
-        * if there are any.
-        */
-       if (cma_obj->base.dma_buf) {
-               INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func);
-               flip_work->crtc = ipu_crtc;
+       drm_display_mode_from_videomode(&vm, adjusted_mode);
 
-               ret = reservation_object_get_fences_rcu(
-                               cma_obj->base.dma_buf->resv, &flip_work->excl,
-                               &flip_work->shared_count, &flip_work->shared);
+       return true;
+}
 
-               if (unlikely(ret)) {
-                       DRM_ERROR("failed to get fences for buffer\n");
-                       goto free_flip_work;
-               }
+static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
+                                struct drm_crtc_state *state)
+{
+       u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary);
 
-               /* No need to queue the worker if the are no fences */
-               if (!flip_work->excl && !flip_work->shared_count) {
-                       ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
-               } else {
-                       ipu_crtc->flip_state = IPU_FLIP_PENDING;
-                       queue_work(ipu_crtc->flip_queue,
-                                  &flip_work->fence_work);
-               }
-       } else {
-               ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
-       }
+       if (state->active && (primary_plane_mask & state->plane_mask) == 0)
+               return -EINVAL;
 
        return 0;
-
-free_flip_work:
-       drm_gem_object_unreference_unlocked(flip_work->bo);
-       kfree(flip_work);
-       ipu_crtc->flip_work = NULL;
-put_vblank:
-       imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
-
-       return ret;
 }
 
-static const struct drm_crtc_funcs ipu_crtc_funcs = {
-       .set_config = drm_crtc_helper_set_config,
-       .destroy = drm_crtc_cleanup,
-       .page_flip = ipu_page_flip,
-};
+static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
+                                 struct drm_crtc_state *old_crtc_state)
+{
+       spin_lock_irq(&crtc->dev->event_lock);
+       if (crtc->state->event) {
+               WARN_ON(drm_crtc_vblank_get(crtc));
+               drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+               crtc->state->event = NULL;
+       }
+       spin_unlock_irq(&crtc->dev->event_lock);
+}
 
-static int ipu_crtc_mode_set(struct drm_crtc *crtc,
-                              struct drm_display_mode *orig_mode,
-                              struct drm_display_mode *mode,
-                              int x, int y,
-                              struct drm_framebuffer *old_fb)
+static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_encoder *encoder;
        struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+       struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
        struct ipu_di_signal_cfg sig_cfg = {};
        unsigned long encoder_types = 0;
-       int ret;
 
        dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
                        mode->hdisplay);
        dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__,
                        mode->vdisplay);
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                if (encoder->crtc == crtc)
                        encoder_types |= BIT(encoder->encoder_type);
+       }
 
        dev_dbg(ipu_crtc->dev, "%s: attached to encoder types 0x%lx\n",
                __func__, encoder_types);
@@ -272,114 +220,30 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
        else
                sig_cfg.clkflags = 0;
 
-       sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW);
+       sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW);
        /* Default to driving pixel data on negative clock edges */
-       sig_cfg.clk_pol = !!(ipu_crtc->bus_flags &
+       sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags &
                             DRM_BUS_FLAG_PIXDATA_POSEDGE);
-       sig_cfg.bus_format = ipu_crtc->bus_format;
+       sig_cfg.bus_format = imx_crtc_state->bus_format;
        sig_cfg.v_to_h_sync = 0;
-       sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
-       sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin;
+       sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin;
+       sig_cfg.vsync_pin = imx_crtc_state->di_vsync_pin;
 
        drm_display_mode_to_videomode(mode, &sig_cfg.mode);
 
-       ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
-                              mode->flags & DRM_MODE_FLAG_INTERLACE,
-                              ipu_crtc->bus_format, mode->hdisplay);
-       if (ret) {
-               dev_err(ipu_crtc->dev,
-                               "initializing display controller failed with %d\n",
-                               ret);
-               return ret;
-       }
-
-       ret = ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
-       if (ret) {
-               dev_err(ipu_crtc->dev,
-                               "initializing panel failed with %d\n", ret);
-               return ret;
-       }
-
-       return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode,
-                                 crtc->primary->fb,
-                                 0, 0, mode->hdisplay, mode->vdisplay,
-                                 x, y, mode->hdisplay, mode->vdisplay,
-                                 mode->flags & DRM_MODE_FLAG_INTERLACE);
-}
-
-static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
-{
-       unsigned long flags;
-       struct drm_device *drm = ipu_crtc->base.dev;
-       struct ipu_flip_work *work = ipu_crtc->flip_work;
-
-       spin_lock_irqsave(&drm->event_lock, flags);
-       if (work->page_flip_event)
-               drm_crtc_send_vblank_event(&ipu_crtc->base,
-                                          work->page_flip_event);
-       imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
-       spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
-static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
-{
-       struct ipu_crtc *ipu_crtc = dev_id;
-
-       imx_drm_handle_vblank(ipu_crtc->imx_crtc);
-
-       if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) {
-               struct ipu_plane *plane = ipu_crtc->plane[0];
-
-               ipu_plane_set_base(plane, ipu_crtc->base.primary->fb,
-                                  plane->x, plane->y);
-               ipu_crtc_handle_pageflip(ipu_crtc);
-               queue_work(ipu_crtc->flip_queue,
-                          &ipu_crtc->flip_work->unref_work);
-               ipu_crtc->flip_state = IPU_FLIP_NONE;
-       }
-
-       return IRQ_HANDLED;
-}
-
-static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
-                                 const struct drm_display_mode *mode,
-                                 struct drm_display_mode *adjusted_mode)
-{
-       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-       struct videomode vm;
-       int ret;
-
-       drm_display_mode_to_videomode(adjusted_mode, &vm);
-
-       ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
-       if (ret)
-               return false;
-
-       drm_display_mode_from_videomode(&vm, adjusted_mode);
-
-       return true;
-}
-
-static void ipu_crtc_prepare(struct drm_crtc *crtc)
-{
-       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
-       ipu_fb_disable(ipu_crtc);
-}
-
-static void ipu_crtc_commit(struct drm_crtc *crtc)
-{
-       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
-       ipu_fb_enable(ipu_crtc);
+       ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
+                        mode->flags & DRM_MODE_FLAG_INTERLACE,
+                        imx_crtc_state->bus_format, mode->hdisplay);
+       ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
 }
 
 static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
-       .dpms = ipu_crtc_dpms,
        .mode_fixup = ipu_crtc_mode_fixup,
-       .mode_set = ipu_crtc_mode_set,
-       .prepare = ipu_crtc_prepare,
-       .commit = ipu_crtc_commit,
+       .mode_set_nofb = ipu_crtc_mode_set_nofb,
+       .atomic_check = ipu_crtc_atomic_check,
+       .atomic_begin = ipu_crtc_atomic_begin,
+       .disable = ipu_crtc_disable,
+       .enable = ipu_crtc_enable,
 };
 
 static int ipu_enable_vblank(struct drm_crtc *crtc)
@@ -398,23 +262,9 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
        disable_irq_nosync(ipu_crtc->irq);
 }
 
-static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
-               u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags)
-{
-       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
-       ipu_crtc->bus_format = bus_format;
-       ipu_crtc->bus_flags = bus_flags;
-       ipu_crtc->di_hsync_pin = hsync_pin;
-       ipu_crtc->di_vsync_pin = vsync_pin;
-
-       return 0;
-}
-
 static const struct imx_drm_crtc_helper_funcs ipu_crtc_helper_funcs = {
        .enable_vblank = ipu_enable_vblank,
        .disable_vblank = ipu_disable_vblank,
-       .set_interface_pix_fmt = ipu_set_interface_pix_fmt,
        .crtc_funcs = &ipu_crtc_funcs,
        .crtc_helper_funcs = &ipu_helper_funcs,
 };
@@ -496,8 +346,16 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
                                                IPU_DP_FLOW_SYNC_FG,
                                                drm_crtc_mask(&ipu_crtc->base),
                                                DRM_PLANE_TYPE_OVERLAY);
-               if (IS_ERR(ipu_crtc->plane[1]))
+               if (IS_ERR(ipu_crtc->plane[1])) {
                        ipu_crtc->plane[1] = NULL;
+               } else {
+                       ret = ipu_plane_get_resources(ipu_crtc->plane[1]);
+                       if (ret) {
+                               dev_err(ipu_crtc->dev, "getting plane 1 "
+                                       "resources failed with %d.\n", ret);
+                               goto err_put_plane0_res;
+                       }
+               }
        }
 
        ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
@@ -505,16 +363,17 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
                        "imx_drm", ipu_crtc);
        if (ret < 0) {
                dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
-               goto err_put_plane_res;
+               goto err_put_plane1_res;
        }
        /* Only enable IRQ when we actually need it to trigger work. */
        disable_irq(ipu_crtc->irq);
 
-       ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip");
-
        return 0;
 
-err_put_plane_res:
+err_put_plane1_res:
+       if (ipu_crtc->plane[1])
+               ipu_plane_put_resources(ipu_crtc->plane[1]);
+err_put_plane0_res:
        ipu_plane_put_resources(ipu_crtc->plane[0]);
 err_remove_crtc:
        imx_drm_remove_crtc(ipu_crtc->imx_crtc);
@@ -553,9 +412,10 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
 
        imx_drm_remove_crtc(ipu_crtc->imx_crtc);
 
-       destroy_workqueue(ipu_crtc->flip_queue);
-       ipu_plane_put_resources(ipu_crtc->plane[0]);
        ipu_put_resources(ipu_crtc);
+       if (ipu_crtc->plane[1])
+               ipu_plane_put_resources(ipu_crtc->plane[1]);
+       ipu_plane_put_resources(ipu_crtc->plane[0]);
 }
 
 static const struct component_ops ipu_crtc_ops = {
index a4bb44118d331179385b3a7824347cebde7db3dc..4ad67d015ec7fdcb32dc84b17ebb99eb185590d2 100644 (file)
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
 
 #include "video/imx-ipu-v3.h"
 #include "ipuv3-plane.h"
 
-#define to_ipu_plane(x)        container_of(x, struct ipu_plane, base)
+static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p)
+{
+       return container_of(p, struct ipu_plane, base);
+}
 
 static const uint32_t ipu_plane_formats[] = {
        DRM_FORMAT_ARGB1555,
@@ -53,62 +59,67 @@ int ipu_plane_irq(struct ipu_plane *ipu_plane)
                                     IPU_IRQ_EOF);
 }
 
-static int calc_vref(struct drm_display_mode *mode)
+static inline unsigned long
+drm_plane_state_to_eba(struct drm_plane_state *state)
 {
-       unsigned long htotal, vtotal;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_gem_cma_object *cma_obj;
 
-       htotal = mode->htotal;
-       vtotal = mode->vtotal;
+       cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+       BUG_ON(!cma_obj);
 
-       if (!htotal || !vtotal)
-               return 60;
-
-       return DIV_ROUND_UP(mode->clock * 1000, vtotal * htotal);
+       return cma_obj->paddr + fb->offsets[0] +
+              fb->pitches[0] * (state->src_y >> 16) +
+              (fb->bits_per_pixel >> 3) * (state->src_x >> 16);
 }
 
-static inline int calc_bandwidth(int width, int height, unsigned int vref)
+static inline unsigned long
+drm_plane_state_to_ubo(struct drm_plane_state *state)
 {
-       return width * height * vref;
-}
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_gem_cma_object *cma_obj;
+       unsigned long eba = drm_plane_state_to_eba(state);
 
-int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
-                      int x, int y)
-{
-       struct drm_gem_cma_object *cma_obj[3];
-       unsigned long eba, ubo, vbo;
-       int active, i;
+       cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
+       BUG_ON(!cma_obj);
 
-       for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
-               cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
-               if (!cma_obj[i]) {
-                       DRM_DEBUG_KMS("plane %d entry is null.\n", i);
-                       return -EFAULT;
-               }
-       }
+       return cma_obj->paddr + fb->offsets[1] +
+              fb->pitches[1] * (state->src_y >> 16) / 2 +
+              (state->src_x >> 16) / 2 - eba;
+}
 
-       eba = cma_obj[0]->paddr + fb->offsets[0] +
-             fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
+static inline unsigned long
+drm_plane_state_to_vbo(struct drm_plane_state *state)
+{
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_gem_cma_object *cma_obj;
+       unsigned long eba = drm_plane_state_to_eba(state);
 
-       if (eba & 0x7) {
-               DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
-               return -EINVAL;
-       }
+       cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
+       BUG_ON(!cma_obj);
 
-       if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
-               DRM_DEBUG_KMS("pitches out of range.\n");
-               return -EINVAL;
-       }
+       return cma_obj->paddr + fb->offsets[2] +
+              fb->pitches[2] * (state->src_y >> 16) / 2 +
+              (state->src_x >> 16) / 2 - eba;
+}
 
-       if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
-               DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
-               return -EINVAL;
-       }
+static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
+                                     struct drm_plane_state *old_state)
+{
+       struct drm_plane *plane = &ipu_plane->base;
+       struct drm_plane_state *state = plane->state;
+       struct drm_framebuffer *fb = state->fb;
+       unsigned long eba, ubo, vbo;
+       int active;
 
-       ipu_plane->stride[0] = fb->pitches[0];
+       eba = drm_plane_state_to_eba(state);
 
        switch (fb->pixel_format) {
        case DRM_FORMAT_YUV420:
        case DRM_FORMAT_YVU420:
+               if (old_state->fb)
+                       break;
+
                /*
                 * Multiplanar formats have to meet the following restrictions:
                 * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
@@ -117,59 +128,28 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
                 * - Only EBA may be changed while scanout is active
                 * - The strides of U and V planes must be identical.
                 */
-               ubo = cma_obj[1]->paddr + fb->offsets[1] +
-                     fb->pitches[1] * y / 2 + x / 2 - eba;
-               vbo = cma_obj[2]->paddr + fb->offsets[2] +
-                     fb->pitches[2] * y / 2 + x / 2 - eba;
+               ubo = drm_plane_state_to_ubo(state);
+               vbo = drm_plane_state_to_vbo(state);
 
-               if ((ubo & 0x7) || (vbo & 0x7)) {
-                       DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
-                       return -EINVAL;
-               }
-
-               if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
-                       DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
-                       return -EINVAL;
-               }
-
-               if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
-                                          (ipu_plane->v_offset != vbo))) {
-                       DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
-                       return -EINVAL;
-               }
-
-               if (fb->pitches[1] != fb->pitches[2]) {
-                       DRM_DEBUG_KMS("U/V pitches must be identical.\n");
-                       return -EINVAL;
-               }
-
-               if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
-                       DRM_DEBUG_KMS("U/V pitches out of range.\n");
-                       return -EINVAL;
-               }
-
-               if (ipu_plane->enabled &&
-                   (ipu_plane->stride[1] != fb->pitches[1])) {
-                       DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
-                       return -EINVAL;
-               }
-
-               ipu_plane->u_offset = ubo;
-               ipu_plane->v_offset = vbo;
-               ipu_plane->stride[1] = fb->pitches[1];
+               if (fb->pixel_format == DRM_FORMAT_YUV420)
+                       ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+                                                     fb->pitches[1], ubo, vbo);
+               else
+                       ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+                                                     fb->pitches[1], vbo, ubo);
 
                dev_dbg(ipu_plane->base.dev->dev,
-                       "phys = %pad %pad %pad, x = %d, y = %d",
-                       &cma_obj[0]->paddr, &cma_obj[1]->paddr,
-                       &cma_obj[2]->paddr, x, y);
+                       "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
+                       state->src_x >> 16, state->src_y >> 16);
                break;
        default:
-               dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
-                       &cma_obj[0]->paddr, x, y);
+               dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
+                       eba, state->src_x >> 16, state->src_y >> 16);
+
                break;
        }
 
-       if (ipu_plane->enabled) {
+       if (old_state->fb) {
                active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
                ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
                ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
@@ -177,155 +157,6 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
                ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
                ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
        }
-
-       /* cache offsets for subsequent pageflips */
-       ipu_plane->x = x;
-       ipu_plane->y = y;
-
-       return 0;
-}
-
-int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
-                      struct drm_display_mode *mode,
-                      struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                      unsigned int crtc_w, unsigned int crtc_h,
-                      uint32_t src_x, uint32_t src_y,
-                      uint32_t src_w, uint32_t src_h, bool interlaced)
-{
-       struct device *dev = ipu_plane->base.dev->dev;
-       int ret;
-
-       /* no scaling */
-       if (src_w != crtc_w || src_h != crtc_h)
-               return -EINVAL;
-
-       /* clip to crtc bounds */
-       if (crtc_x < 0) {
-               if (-crtc_x > crtc_w)
-                       return -EINVAL;
-               src_x += -crtc_x;
-               src_w -= -crtc_x;
-               crtc_w -= -crtc_x;
-               crtc_x = 0;
-       }
-       if (crtc_y < 0) {
-               if (-crtc_y > crtc_h)
-                       return -EINVAL;
-               src_y += -crtc_y;
-               src_h -= -crtc_y;
-               crtc_h -= -crtc_y;
-               crtc_y = 0;
-       }
-       if (crtc_x + crtc_w > mode->hdisplay) {
-               if (crtc_x > mode->hdisplay)
-                       return -EINVAL;
-               crtc_w = mode->hdisplay - crtc_x;
-               src_w = crtc_w;
-       }
-       if (crtc_y + crtc_h > mode->vdisplay) {
-               if (crtc_y > mode->vdisplay)
-                       return -EINVAL;
-               crtc_h = mode->vdisplay - crtc_y;
-               src_h = crtc_h;
-       }
-       /* full plane minimum width is 13 pixels */
-       if (crtc_w < 13 && (ipu_plane->dp_flow != IPU_DP_FLOW_SYNC_FG))
-               return -EINVAL;
-       if (crtc_h < 2)
-               return -EINVAL;
-
-       /*
-        * since we cannot touch active IDMAC channels, we do not support
-        * resizing the enabled plane or changing its format
-        */
-       if (ipu_plane->enabled) {
-               if (src_w != ipu_plane->w || src_h != ipu_plane->h ||
-                   fb->pixel_format != ipu_plane->base.fb->pixel_format)
-                       return -EINVAL;
-
-               return ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
-       }
-
-       switch (ipu_plane->dp_flow) {
-       case IPU_DP_FLOW_SYNC_BG:
-               ret = ipu_dp_setup_channel(ipu_plane->dp,
-                               IPUV3_COLORSPACE_RGB,
-                               IPUV3_COLORSPACE_RGB);
-               if (ret) {
-                       dev_err(dev,
-                               "initializing display processor failed with %d\n",
-                               ret);
-                       return ret;
-               }
-               ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
-               break;
-       case IPU_DP_FLOW_SYNC_FG:
-               ipu_dp_setup_channel(ipu_plane->dp,
-                               ipu_drm_fourcc_to_colorspace(fb->pixel_format),
-                               IPUV3_COLORSPACE_UNKNOWN);
-               ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y);
-               /* Enable local alpha on partial plane */
-               switch (fb->pixel_format) {
-               case DRM_FORMAT_ARGB1555:
-               case DRM_FORMAT_ABGR1555:
-               case DRM_FORMAT_RGBA5551:
-               case DRM_FORMAT_BGRA5551:
-               case DRM_FORMAT_ARGB4444:
-               case DRM_FORMAT_ARGB8888:
-               case DRM_FORMAT_ABGR8888:
-               case DRM_FORMAT_RGBA8888:
-               case DRM_FORMAT_BGRA8888:
-                       ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
-                       break;
-               default:
-                       break;
-               }
-       }
-
-       ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
-                       calc_bandwidth(crtc_w, crtc_h,
-                                      calc_vref(mode)), 64);
-       if (ret) {
-               dev_err(dev, "allocating dmfc bandwidth failed with %d\n", ret);
-               return ret;
-       }
-
-       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
-
-       ipu_cpmem_zero(ipu_plane->ipu_ch);
-       ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
-       ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
-       if (ret < 0) {
-               dev_err(dev, "unsupported pixel format 0x%08x\n",
-                       fb->pixel_format);
-               return ret;
-       }
-       ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
-       ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
-       ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
-
-       ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
-       if (ret < 0)
-               return ret;
-       if (interlaced)
-               ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
-
-       if (fb->pixel_format == DRM_FORMAT_YUV420) {
-               ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
-                                             ipu_plane->stride[1],
-                                             ipu_plane->u_offset,
-                                             ipu_plane->v_offset);
-       } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
-               ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
-                                             ipu_plane->stride[1],
-                                             ipu_plane->v_offset,
-                                             ipu_plane->u_offset);
-       }
-
-       ipu_plane->w = src_w;
-       ipu_plane->h = src_h;
-
-       return 0;
 }
 
 void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
@@ -372,7 +203,7 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
        return ret;
 }
 
-void ipu_plane_enable(struct ipu_plane *ipu_plane)
+static void ipu_plane_enable(struct ipu_plane *ipu_plane)
 {
        if (ipu_plane->dp)
                ipu_dp_enable(ipu_plane->ipu);
@@ -380,14 +211,10 @@ void ipu_plane_enable(struct ipu_plane *ipu_plane)
        ipu_idmac_enable_channel(ipu_plane->ipu_ch);
        if (ipu_plane->dp)
                ipu_dp_enable_channel(ipu_plane->dp);
-
-       ipu_plane->enabled = true;
 }
 
-void ipu_plane_disable(struct ipu_plane *ipu_plane)
+static void ipu_plane_disable(struct ipu_plane *ipu_plane)
 {
-       ipu_plane->enabled = false;
-
        ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
 
        if (ipu_plane->dp)
@@ -398,74 +225,225 @@ void ipu_plane_disable(struct ipu_plane *ipu_plane)
                ipu_dp_disable(ipu_plane->ipu);
 }
 
-/*
- * drm_plane API
- */
-
-static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
-                           struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                           unsigned int crtc_w, unsigned int crtc_h,
-                           uint32_t src_x, uint32_t src_y,
-                           uint32_t src_w, uint32_t src_h)
+static int ipu_disable_plane(struct drm_plane *plane)
 {
        struct ipu_plane *ipu_plane = to_ipu_plane(plane);
-       int ret = 0;
-
-       DRM_DEBUG_KMS("plane - %p\n", plane);
-
-       if (!ipu_plane->enabled)
-               ret = ipu_plane_get_resources(ipu_plane);
-       if (ret < 0)
-               return ret;
-
-       ret = ipu_plane_mode_set(ipu_plane, crtc, &crtc->hwmode, fb,
-                       crtc_x, crtc_y, crtc_w, crtc_h,
-                       src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16,
-                       false);
-       if (ret < 0) {
-               ipu_plane_put_resources(ipu_plane);
-               return ret;
-       }
 
-       if (crtc != plane->crtc)
-               dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
-                               plane->crtc, crtc);
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
-       if (!ipu_plane->enabled)
-               ipu_plane_enable(ipu_plane);
+       ipu_plane_disable(ipu_plane);
 
        return 0;
 }
 
-static int ipu_disable_plane(struct drm_plane *plane)
+static void ipu_plane_destroy(struct drm_plane *plane)
 {
        struct ipu_plane *ipu_plane = to_ipu_plane(plane);
 
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
-       if (ipu_plane->enabled)
-               ipu_plane_disable(ipu_plane);
+       ipu_disable_plane(plane);
+       drm_plane_cleanup(plane);
+       kfree(ipu_plane);
+}
 
-       ipu_plane_put_resources(ipu_plane);
+static const struct drm_plane_funcs ipu_plane_funcs = {
+       .update_plane   = drm_atomic_helper_update_plane,
+       .disable_plane  = drm_atomic_helper_disable_plane,
+       .destroy        = ipu_plane_destroy,
+       .reset          = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
+};
+
+static int ipu_plane_atomic_check(struct drm_plane *plane,
+                                 struct drm_plane_state *state)
+{
+       struct drm_plane_state *old_state = plane->state;
+       struct drm_crtc_state *crtc_state;
+       struct device *dev = plane->dev->dev;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_framebuffer *old_fb = old_state->fb;
+       unsigned long eba, ubo, vbo, old_ubo, old_vbo;
+
+       /* Ok to disable */
+       if (!fb)
+               return 0;
+
+       if (!state->crtc)
+               return -EINVAL;
+
+       crtc_state =
+               drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+       if (WARN_ON(!crtc_state))
+               return -EINVAL;
+
+       /* CRTC should be enabled */
+       if (!crtc_state->enable)
+               return -EINVAL;
+
+       /* no scaling */
+       if (state->src_w >> 16 != state->crtc_w ||
+           state->src_h >> 16 != state->crtc_h)
+               return -EINVAL;
+
+       switch (plane->type) {
+       case DRM_PLANE_TYPE_PRIMARY:
+               /* full plane doesn't support partial off screen */
+               if (state->crtc_x || state->crtc_y ||
+                   state->crtc_w != crtc_state->adjusted_mode.hdisplay ||
+                   state->crtc_h != crtc_state->adjusted_mode.vdisplay)
+                       return -EINVAL;
+
+               /* full plane minimum width is 13 pixels */
+               if (state->crtc_w < 13)
+                       return -EINVAL;
+               break;
+       case DRM_PLANE_TYPE_OVERLAY:
+               if (state->crtc_x < 0 || state->crtc_y < 0)
+                       return -EINVAL;
+
+               if (state->crtc_x + state->crtc_w >
+                   crtc_state->adjusted_mode.hdisplay)
+                       return -EINVAL;
+               if (state->crtc_y + state->crtc_h >
+                   crtc_state->adjusted_mode.vdisplay)
+                       return -EINVAL;
+               break;
+       default:
+               dev_warn(dev, "Unsupported plane type\n");
+               return -EINVAL;
+       }
+
+       if (state->crtc_h < 2)
+               return -EINVAL;
+
+       /*
+        * since we cannot touch active IDMAC channels, we do not support
+        * resizing the enabled plane or changing its format
+        */
+       if (old_fb && (state->src_w != old_state->src_w ||
+                             state->src_h != old_state->src_h ||
+                             fb->pixel_format != old_fb->pixel_format))
+               return -EINVAL;
+
+       eba = drm_plane_state_to_eba(state);
+
+       if (eba & 0x7)
+               return -EINVAL;
+
+       if (fb->pitches[0] < 1 || fb->pitches[0] > 16384)
+               return -EINVAL;
+
+       if (old_fb && fb->pitches[0] != old_fb->pitches[0])
+               return -EINVAL;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               /*
+                * Multiplanar formats have to meet the following restrictions:
+                * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
+                * - EBA, UBO and VBO are a multiple of 8
+                * - UBO and VBO are unsigned and not larger than 0xfffff8
+                * - Only EBA may be changed while scanout is active
+                * - The strides of U and V planes must be identical.
+                */
+               ubo = drm_plane_state_to_ubo(state);
+               vbo = drm_plane_state_to_vbo(state);
+
+               if ((ubo & 0x7) || (vbo & 0x7))
+                       return -EINVAL;
+
+               if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
+                       return -EINVAL;
+
+               if (old_fb) {
+                       old_ubo = drm_plane_state_to_ubo(old_state);
+                       old_vbo = drm_plane_state_to_vbo(old_state);
+                       if (ubo != old_ubo || vbo != old_vbo)
+                               return -EINVAL;
+               }
+
+               if (fb->pitches[1] != fb->pitches[2])
+                       return -EINVAL;
+
+               if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
+                       return -EINVAL;
+
+               if (old_fb && old_fb->pitches[1] != fb->pitches[1])
+                       return -EINVAL;
+       }
 
        return 0;
 }
 
-static void ipu_plane_destroy(struct drm_plane *plane)
+static void ipu_plane_atomic_disable(struct drm_plane *plane,
+                                    struct drm_plane_state *old_state)
+{
+       ipu_disable_plane(plane);
+}
+
+static void ipu_plane_atomic_update(struct drm_plane *plane,
+                                   struct drm_plane_state *old_state)
 {
        struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+       struct drm_plane_state *state = plane->state;
+       enum ipu_color_space ics;
 
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+       if (old_state->fb) {
+               ipu_plane_atomic_set_base(ipu_plane, old_state);
+               return;
+       }
 
-       ipu_disable_plane(plane);
-       drm_plane_cleanup(plane);
-       kfree(ipu_plane);
+       switch (ipu_plane->dp_flow) {
+       case IPU_DP_FLOW_SYNC_BG:
+               ipu_dp_setup_channel(ipu_plane->dp,
+                                       IPUV3_COLORSPACE_RGB,
+                                       IPUV3_COLORSPACE_RGB);
+               ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
+               break;
+       case IPU_DP_FLOW_SYNC_FG:
+               ics = ipu_drm_fourcc_to_colorspace(state->fb->pixel_format);
+               ipu_dp_setup_channel(ipu_plane->dp, ics,
+                                       IPUV3_COLORSPACE_UNKNOWN);
+               ipu_dp_set_window_pos(ipu_plane->dp, state->crtc_x,
+                                       state->crtc_y);
+               /* Enable local alpha on partial plane */
+               switch (state->fb->pixel_format) {
+               case DRM_FORMAT_ARGB1555:
+               case DRM_FORMAT_ABGR1555:
+               case DRM_FORMAT_RGBA5551:
+               case DRM_FORMAT_BGRA5551:
+               case DRM_FORMAT_ARGB4444:
+               case DRM_FORMAT_ARGB8888:
+               case DRM_FORMAT_ABGR8888:
+               case DRM_FORMAT_RGBA8888:
+               case DRM_FORMAT_BGRA8888:
+                       ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, state->crtc_w);
+
+       ipu_cpmem_zero(ipu_plane->ipu_ch);
+       ipu_cpmem_set_resolution(ipu_plane->ipu_ch, state->src_w >> 16,
+                                       state->src_h >> 16);
+       ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->pixel_format);
+       ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
+       ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
+       ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
+       ipu_plane_atomic_set_base(ipu_plane, old_state);
+       ipu_plane_enable(ipu_plane);
 }
 
-static const struct drm_plane_funcs ipu_plane_funcs = {
-       .update_plane   = ipu_update_plane,
-       .disable_plane  = ipu_disable_plane,
-       .destroy        = ipu_plane_destroy,
+static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
+       .atomic_check = ipu_plane_atomic_check,
+       .atomic_disable = ipu_plane_atomic_disable,
+       .atomic_update = ipu_plane_atomic_update,
 };
 
 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -498,5 +476,7 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
                return ERR_PTR(ret);
        }
 
+       drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
+
        return ipu_plane;
 }
index 4448fd4ad4eb503422a750f374c6b20a64b9f643..338b88a74eb6e06fc94db173c7cac0be8700f758 100644 (file)
@@ -23,17 +23,6 @@ struct ipu_plane {
 
        int                     dma;
        int                     dp_flow;
-
-       int                     x;
-       int                     y;
-       int                     w;
-       int                     h;
-
-       unsigned int            u_offset;
-       unsigned int            v_offset;
-       unsigned int            stride[2];
-
-       bool                    enabled;
 };
 
 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -48,11 +37,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
                       uint32_t src_x, uint32_t src_y, uint32_t src_w,
                       uint32_t src_h, bool interlaced);
 
-void ipu_plane_enable(struct ipu_plane *plane);
-void ipu_plane_disable(struct ipu_plane *plane);
-int ipu_plane_set_base(struct ipu_plane *plane, struct drm_framebuffer *fb,
-                      int x, int y);
-
 int ipu_plane_get_resources(struct ipu_plane *plane);
 void ipu_plane_put_resources(struct ipu_plane *plane);
 
index 2d1fd02cd3d663c672fffd8c5d4b6eb60e63e195..1dad297b01fd5a5723b5cba7dd92b352057c7d7a 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/component.h>
 #include <linux/module.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_panel.h>
@@ -25,9 +26,6 @@
 
 #include "imx-drm.h"
 
-#define con_to_imxpd(x) container_of(x, struct imx_parallel_display, connector)
-#define enc_to_imxpd(x) container_of(x, struct imx_parallel_display, encoder)
-
 struct imx_parallel_display {
        struct drm_connector connector;
        struct drm_encoder encoder;
@@ -37,8 +35,19 @@ struct imx_parallel_display {
        u32 bus_format;
        struct drm_display_mode mode;
        struct drm_panel *panel;
+       struct drm_bridge *bridge;
 };
 
+static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
+{
+       return container_of(c, struct imx_parallel_display, connector);
+}
+
+static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
+{
+       return container_of(e, struct imx_parallel_display, encoder);
+}
+
 static enum drm_connector_status imx_pd_connector_detect(
                struct drm_connector *connector, bool force)
 {
@@ -53,11 +62,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
 
        if (imxpd->panel && imxpd->panel->funcs &&
            imxpd->panel->funcs->get_modes) {
-               struct drm_display_info *di = &connector->display_info;
-
                num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
-               if (!imxpd->bus_format && di->num_bus_formats)
-                       imxpd->bus_format = di->bus_formats[0];
                if (num_modes > 0)
                        return num_modes;
        }
@@ -69,10 +74,16 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
 
        if (np) {
                struct drm_display_mode *mode = drm_mode_create(connector->dev);
+               int ret;
 
                if (!mode)
                        return -EINVAL;
-               of_get_drm_display_mode(np, &imxpd->mode, OF_USE_NATIVE_MODE);
+
+               ret = of_get_drm_display_mode(np, &imxpd->mode,
+                                             OF_USE_NATIVE_MODE);
+               if (ret)
+                       return ret;
+
                drm_mode_copy(mode, &imxpd->mode);
                mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
                drm_mode_probed_add(connector, mode);
@@ -90,24 +101,7 @@ static struct drm_encoder *imx_pd_connector_best_encoder(
        return &imxpd->encoder;
 }
 
-static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
-
-       if (mode != DRM_MODE_DPMS_ON)
-               drm_panel_disable(imxpd->panel);
-       else
-               drm_panel_enable(imxpd->panel);
-}
-
-static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
-{
-       struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
-       imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3,
-                              imxpd->connector.display_info.bus_flags);
-}
-
-static void imx_pd_encoder_commit(struct drm_encoder *encoder)
+static void imx_pd_encoder_enable(struct drm_encoder *encoder)
 {
        struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
 
@@ -115,12 +109,6 @@ static void imx_pd_encoder_commit(struct drm_encoder *encoder)
        drm_panel_enable(imxpd->panel);
 }
 
-static void imx_pd_encoder_mode_set(struct drm_encoder *encoder,
-                        struct drm_display_mode *orig_mode,
-                        struct drm_display_mode *mode)
-{
-}
-
 static void imx_pd_encoder_disable(struct drm_encoder *encoder)
 {
        struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
@@ -129,11 +117,33 @@ static void imx_pd_encoder_disable(struct drm_encoder *encoder)
        drm_panel_unprepare(imxpd->panel);
 }
 
+static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
+                                      struct drm_crtc_state *crtc_state,
+                                      struct drm_connector_state *conn_state)
+{
+       struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+       struct drm_display_info *di = &conn_state->connector->display_info;
+       struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+
+       imx_crtc_state->bus_flags = di->bus_flags;
+       if (!imxpd->bus_format && di->num_bus_formats)
+               imx_crtc_state->bus_format = di->bus_formats[0];
+       else
+               imx_crtc_state->bus_format = imxpd->bus_format;
+       imx_crtc_state->di_hsync_pin = 2;
+       imx_crtc_state->di_vsync_pin = 3;
+
+       return 0;
+}
+
 static const struct drm_connector_funcs imx_pd_connector_funcs = {
-       .dpms = drm_helper_connector_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = imx_pd_connector_detect,
        .destroy = imx_drm_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
@@ -146,20 +156,18 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
 };
 
 static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
-       .dpms = imx_pd_encoder_dpms,
-       .prepare = imx_pd_encoder_prepare,
-       .commit = imx_pd_encoder_commit,
-       .mode_set = imx_pd_encoder_mode_set,
+       .enable = imx_pd_encoder_enable,
        .disable = imx_pd_encoder_disable,
+       .atomic_check = imx_pd_encoder_atomic_check,
 };
 
 static int imx_pd_register(struct drm_device *drm,
        struct imx_parallel_display *imxpd)
 {
+       struct drm_encoder *encoder = &imxpd->encoder;
        int ret;
 
-       ret = imx_drm_encoder_parse_of(drm, &imxpd->encoder,
-                                      imxpd->dev->of_node);
+       ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
        if (ret)
                return ret;
 
@@ -170,19 +178,33 @@ static int imx_pd_register(struct drm_device *drm,
         */
        imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
 
-       drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
-       drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
+       drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs);
+       drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
                         DRM_MODE_ENCODER_NONE, NULL);
 
-       drm_connector_helper_add(&imxpd->connector,
-                       &imx_pd_connector_helper_funcs);
-       drm_connector_init(drm, &imxpd->connector, &imx_pd_connector_funcs,
-                          DRM_MODE_CONNECTOR_VGA);
+       if (!imxpd->bridge) {
+               drm_connector_helper_add(&imxpd->connector,
+                               &imx_pd_connector_helper_funcs);
+               drm_connector_init(drm, &imxpd->connector,
+                                  &imx_pd_connector_funcs,
+                                  DRM_MODE_CONNECTOR_VGA);
+       }
 
        if (imxpd->panel)
                drm_panel_attach(imxpd->panel, &imxpd->connector);
 
-       drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder);
+       if (imxpd->bridge) {
+               imxpd->bridge->encoder = encoder;
+               encoder->bridge = imxpd->bridge;
+               ret = drm_bridge_attach(drm, imxpd->bridge);
+               if (ret < 0) {
+                       dev_err(imxpd->dev, "failed to attach bridge: %d\n",
+                               ret);
+                       return ret;
+               }
+       } else {
+               drm_mode_connector_attach_encoder(&imxpd->connector, encoder);
+       }
 
        return 0;
 }
@@ -195,6 +217,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
        const u8 *edidp;
        struct imx_parallel_display *imxpd;
        int ret;
+       u32 bus_format = 0;
        const char *fmt;
 
        imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
@@ -208,14 +231,15 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
        ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
        if (!ret) {
                if (!strcmp(fmt, "rgb24"))
-                       imxpd->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+                       bus_format = MEDIA_BUS_FMT_RGB888_1X24;
                else if (!strcmp(fmt, "rgb565"))
-                       imxpd->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
+                       bus_format = MEDIA_BUS_FMT_RGB565_1X16;
                else if (!strcmp(fmt, "bgr666"))
-                       imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+                       bus_format = MEDIA_BUS_FMT_RGB666_1X18;
                else if (!strcmp(fmt, "lvds666"))
-                       imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+                       bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
        }
+       imxpd->bus_format = bus_format;
 
        /* port@1 is the output port */
        ep = of_graph_get_endpoint_by_regs(np, 1, -1);
@@ -223,13 +247,30 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
                struct device_node *remote;
 
                remote = of_graph_get_remote_port_parent(ep);
+               if (!remote) {
+                       dev_warn(dev, "endpoint %s not connected\n",
+                                ep->full_name);
+                       of_node_put(ep);
+                       return -ENODEV;
+               }
                of_node_put(ep);
-               if (remote) {
-                       imxpd->panel = of_drm_find_panel(remote);
-                       of_node_put(remote);
+
+               imxpd->panel = of_drm_find_panel(remote);
+               if (imxpd->panel) {
+                       dev_dbg(dev, "found panel %s\n", remote->full_name);
+               } else {
+                       imxpd->bridge = of_drm_find_bridge(remote);
+                       if (imxpd->bridge)
+                               dev_dbg(dev, "found bridge %s\n",
+                                       remote->full_name);
                }
-               if (!imxpd->panel)
+               if (!imxpd->panel && !imxpd->bridge) {
+                       dev_dbg(dev, "waiting for panel or bridge %s\n",
+                               remote->full_name);
+                       of_node_put(remote);
                        return -EPROBE_DEFER;
+               }
+               of_node_put(remote);
        }
 
        imxpd->dev = dev;
index ba812ef2c9d1304d9e2e24d8c414eee9abba4941..334562d067311300de2a27c7c802ad5b7a905fc1 100644 (file)
@@ -1535,7 +1535,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
  * HDMI audio codec callbacks
  */
 
-static int mtk_hdmi_audio_hw_params(struct device *dev,
+static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
                                    struct hdmi_codec_daifmt *daifmt,
                                    struct hdmi_codec_params *params)
 {
@@ -1604,7 +1604,7 @@ static int mtk_hdmi_audio_hw_params(struct device *dev,
        return 0;
 }
 
-static int mtk_hdmi_audio_startup(struct device *dev)
+static int mtk_hdmi_audio_startup(struct device *dev, void *data)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
@@ -1615,7 +1615,7 @@ static int mtk_hdmi_audio_startup(struct device *dev)
        return 0;
 }
 
-static void mtk_hdmi_audio_shutdown(struct device *dev)
+static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
@@ -1624,7 +1624,7 @@ static void mtk_hdmi_audio_shutdown(struct device *dev)
        mtk_hdmi_audio_disable(hdmi);
 }
 
-int mtk_hdmi_audio_digital_mute(struct device *dev, bool enable)
+int mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
@@ -1638,7 +1638,7 @@ int mtk_hdmi_audio_digital_mute(struct device *dev, bool enable)
        return 0;
 }
 
-static int mtk_hdmi_audio_get_eld(struct device *dev, uint8_t *buf, size_t len)
+static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
index 9d5083d0f1eec7a0c84c2f4e9bb7746e4f21e785..68268e55d595c343eed84053cb5bc19b4918f529 100644 (file)
@@ -186,17 +186,6 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
 {
 }
 
-static int mgag200_bo_move(struct ttm_buffer_object *bo,
-                      bool evict, bool interruptible,
-                      bool no_wait_gpu,
-                      struct ttm_mem_reg *new_mem)
-{
-       int r;
-       r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-       return r;
-}
-
-
 static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
 {
        ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
        .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
        .init_mem_type = mgag200_bo_init_mem_type,
        .evict_flags = mgag200_bo_evict_flags,
-       .move = mgag200_bo_move,
+       .move = NULL,
        .verify_access = mgag200_bo_verify_access,
        .io_mem_reserve = &mgag200_ttm_io_mem_reserve,
        .io_mem_free = &mgag200_ttm_io_mem_free,
index fbe304ee6c809548bf734be1e9ee7d56a1554392..2aec27dbb5bbbb36f5236e7488ed1677ae67de10 100644 (file)
@@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        }
 
        adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
-       if (!adreno_gpu->memptrs) {
+       if (IS_ERR(adreno_gpu->memptrs)) {
                dev_err(drm->dev, "could not vmap memptrs\n");
                return -ENOMEM;
        }
index 1a061e3e8b9ebb95489f87b3070c2816a8bb5f41..a9223bea871b7c647351c6bb527b49843b012fef 100644 (file)
@@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
        dev->mode_config.fb_base = paddr;
 
        fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+       if (IS_ERR(fbi->screen_base)) {
+               ret = PTR_ERR(fbi->screen_base);
+               goto fail_unlock;
+       }
        fbi->screen_size = fbdev->bo->size;
        fbi->fix.smem_start = paddr;
        fbi->fix.smem_len = fbdev->bo->size;
index 7daf4054dd2b78ea634641bf4554fa6cb3d3a444..69836f5685b1ba63b3038ce90da38e9b18d8c6f3 100644 (file)
@@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
                        return ERR_CAST(pages);
                msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
                                VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+               if (msm_obj->vaddr == NULL)
+                       return ERR_PTR(-ENOMEM);
        }
        return msm_obj->vaddr;
 }
index b89ca5174863e7ab73c48117605ded1d56626e19..eb4bb8b2f3a5cae6799e1ef142af1be3da568e5d 100644 (file)
@@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
 
        submit->dev = dev;
        submit->gpu = gpu;
+       submit->fence = NULL;
        submit->pid = get_pid(task_pid(current));
 
        /* initially, until copy_from_user() and bo lookup succeeds: */
        submit->nr_bos = 0;
        submit->nr_cmds = 0;
 
+       INIT_LIST_HEAD(&submit->node);
        INIT_LIST_HEAD(&submit->bo_list);
        ww_acquire_init(&submit->ticket, &reservation_ww_class);
 
@@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                void __user *userptr =
                        u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
 
+               /* make sure we don't have garbage flags, in case we hit
+                * error path before flags is initialized:
+                */
+               submit->bos[i].flags = 0;
+
                ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
                if (ret) {
                        ret = -EFAULT;
index b48f73ac63896a087938ae6225836a5d30d1a482..0857710c2ff2f809751bbab313045dfa8acd77cd 100644 (file)
@@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
                struct msm_gem_object *obj = submit->bos[idx].obj;
                const char *buf = msm_gem_vaddr_locked(&obj->base);
 
+               if (IS_ERR(buf))
+                       continue;
+
                buf += iova - submit->bos[idx].iova;
 
                rd_write_section(rd, RD_GPUADDR,
index 1f14b908b22136117eb2b37179d05abfedce2b13..42f5359cf98889cfa837f9ec60eb6d55f1b6e09d 100644 (file)
@@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
        }
 
        ring->start = msm_gem_vaddr_locked(ring->bo);
+       if (IS_ERR(ring->start)) {
+               ret = PTR_ERR(ring->start);
+               goto fail;
+       }
        ring->end   = ring->start + (size / 4);
        ring->cur   = ring->start;
 
index 331620a52afa69e8575728d1f4e1d6093a3d39dc..287a7d6fa4804d9dd0abe901d322f1fa3ed5ebd1 100644 (file)
@@ -29,6 +29,7 @@ struct nv_device_info_v0 {
 #define NV_DEVICE_INFO_V0_FERMI                                            0x07
 #define NV_DEVICE_INFO_V0_KEPLER                                           0x08
 #define NV_DEVICE_INFO_V0_MAXWELL                                          0x09
+#define NV_DEVICE_INFO_V0_PASCAL                                           0x0a
        __u8  family;
        __u8  pad06[2];
        __u64 ram_size;
index 982aad8fa645f5f13562a31c1fa5085cbb83e4cb..e6e9537537cf96e7eba26b32b44f6ed600e8b87e 100644 (file)
@@ -39,6 +39,7 @@
 #define KEPLER_CHANNEL_GPFIFO_A                       /* cla06f.h */ 0x0000a06f
 #define KEPLER_CHANNEL_GPFIFO_B                       /* cla06f.h */ 0x0000a16f
 #define MAXWELL_CHANNEL_GPFIFO_A                      /* cla06f.h */ 0x0000b06f
+#define PASCAL_CHANNEL_GPFIFO_A                       /* cla06f.h */ 0x0000c06f
 
 #define NV50_DISP                                     /* cl5070.h */ 0x00005070
 #define G82_DISP                                      /* cl5070.h */ 0x00008270
@@ -50,6 +51,8 @@
 #define GK110_DISP                                    /* cl5070.h */ 0x00009270
 #define GM107_DISP                                    /* cl5070.h */ 0x00009470
 #define GM200_DISP                                    /* cl5070.h */ 0x00009570
+#define GP100_DISP                                    /* cl5070.h */ 0x00009770
+#define GP104_DISP                                    /* cl5070.h */ 0x00009870
 
 #define NV31_MPEG                                                    0x00003174
 #define G82_MPEG                                                     0x00008274
@@ -86,6 +89,8 @@
 #define GK110_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000927d
 #define GM107_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000947d
 #define GM200_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000957d
+#define GP100_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000977d
+#define GP104_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000987d
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA                 /* cl507e.h */ 0x0000507e
 #define G82_DISP_OVERLAY_CHANNEL_DMA                  /* cl507e.h */ 0x0000827e
 #define MAXWELL_A                                     /* cl9097.h */ 0x0000b097
 #define MAXWELL_B                                     /* cl9097.h */ 0x0000b197
 
+#define PASCAL_A                                      /* cl9097.h */ 0x0000c097
+
 #define NV74_BSP                                                     0x000074b0
 
 #define GT212_MSVLD                                                  0x000085b1
 #define FERMI_DMA                                                    0x000090b5
 #define KEPLER_DMA_COPY_A                                            0x0000a0b5
 #define MAXWELL_DMA_COPY_A                                           0x0000b0b5
+#define PASCAL_DMA_COPY_A                                            0x0000c0b5
+#define PASCAL_DMA_COPY_B                                            0x0000c1b5
 
 #define FERMI_DECOMPRESS                                             0x000090b8
 
 #define KEPLER_COMPUTE_B                                             0x0000a1c0
 #define MAXWELL_COMPUTE_A                                            0x0000b0c0
 #define MAXWELL_COMPUTE_B                                            0x0000b1c0
+#define PASCAL_COMPUTE_A                                             0x0000c0c0
 
 #define NV74_CIPHER                                                  0x000074c1
 #endif
index c612dc1f1eb4ef651faa4ec398904a894b0ac814..7ea8aa7ca40859b0bcdf4cca65f57e4ba1caacd9 100644 (file)
@@ -16,9 +16,9 @@ enum nvkm_devidx {
        NVKM_SUBDEV_MC,
        NVKM_SUBDEV_BUS,
        NVKM_SUBDEV_TIMER,
+       NVKM_SUBDEV_INSTMEM,
        NVKM_SUBDEV_FB,
        NVKM_SUBDEV_LTC,
-       NVKM_SUBDEV_INSTMEM,
        NVKM_SUBDEV_MMU,
        NVKM_SUBDEV_BAR,
        NVKM_SUBDEV_PMU,
@@ -33,7 +33,10 @@ enum nvkm_devidx {
        NVKM_ENGINE_CE0,
        NVKM_ENGINE_CE1,
        NVKM_ENGINE_CE2,
-       NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE2,
+       NVKM_ENGINE_CE3,
+       NVKM_ENGINE_CE4,
+       NVKM_ENGINE_CE5,
+       NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE5,
 
        NVKM_ENGINE_CIPHER,
        NVKM_ENGINE_DISP,
@@ -50,7 +53,8 @@ enum nvkm_devidx {
 
        NVKM_ENGINE_NVENC0,
        NVKM_ENGINE_NVENC1,
-       NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC1,
+       NVKM_ENGINE_NVENC2,
+       NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
 
        NVKM_ENGINE_NVDEC,
        NVKM_ENGINE_PM,
@@ -102,6 +106,7 @@ struct nvkm_device {
                NV_C0    = 0xc0,
                NV_E0    = 0xe0,
                GM100    = 0x110,
+               GP100    = 0x130,
        } card_type;
        u32 chipset;
        u8  chiprev;
@@ -136,7 +141,7 @@ struct nvkm_device {
        struct nvkm_volt *volt;
 
        struct nvkm_engine *bsp;
-       struct nvkm_engine *ce[3];
+       struct nvkm_engine *ce[6];
        struct nvkm_engine *cipher;
        struct nvkm_disp *disp;
        struct nvkm_dma *dma;
@@ -149,7 +154,7 @@ struct nvkm_device {
        struct nvkm_engine *mspdec;
        struct nvkm_engine *msppp;
        struct nvkm_engine *msvld;
-       struct nvkm_engine *nvenc[2];
+       struct nvkm_engine *nvenc[3];
        struct nvkm_engine *nvdec;
        struct nvkm_pm *pm;
        struct nvkm_engine *sec;
@@ -170,7 +175,6 @@ struct nvkm_device_func {
        void (*fini)(struct nvkm_device *, bool suspend);
        resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
        resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
-       bool cpu_coherent;
 };
 
 struct nvkm_device_quirk {
@@ -206,7 +210,7 @@ struct nvkm_device_chip {
        int (*volt    )(struct nvkm_device *, int idx, struct nvkm_volt **);
 
        int (*bsp     )(struct nvkm_device *, int idx, struct nvkm_engine **);
-       int (*ce[3]   )(struct nvkm_device *, int idx, struct nvkm_engine **);
+       int (*ce[6]   )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*cipher  )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*disp    )(struct nvkm_device *, int idx, struct nvkm_disp **);
        int (*dma     )(struct nvkm_device *, int idx, struct nvkm_dma **);
@@ -219,7 +223,7 @@ struct nvkm_device_chip {
        int (*mspdec  )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*msppp   )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*msvld   )(struct nvkm_device *, int idx, struct nvkm_engine **);
-       int (*nvenc[2])(struct nvkm_device *, int idx, struct nvkm_engine **);
+       int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*nvdec   )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*pm      )(struct nvkm_device *, int idx, struct nvkm_pm **);
        int (*sec     )(struct nvkm_device *, int idx, struct nvkm_engine **);
index b5370cb56e3c2c4fbb539ffd9f0156c569af38c1..e5c9b6268dcca59efdaab12e5922b3a5e0ac2b2b 100644 (file)
@@ -28,6 +28,7 @@ struct nvkm_device_tegra {
        } iommu;
 
        int gpu_speedo;
+       int gpu_speedo_id;
 };
 
 struct nvkm_device_tegra_func {
index 594d719ba41e81edd21f8b079d8f6bfbd85a7a6a..d3d26a1e215dacc0d94d180bd4cdea4f89ebb48a 100644 (file)
@@ -7,4 +7,6 @@ int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
index d4fdce27b2978173bab089f54004ba1f4f5392f1..e82049667ce495f534921611d0ef55e9aea454be 100644 (file)
@@ -32,4 +32,6 @@ int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 #endif
index 15ddfcf5e8dbf2e6d1a459a361436625099e7d43..ed92fec5292c5b41d7de5b2e4ed01ce0c04e2feb 100644 (file)
@@ -66,4 +66,5 @@ int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 #endif
index 6515f5810a265e334a7c3d21af9c365bf3dcea19..89cf993078283f8bca3c5016329f9e5404806c93 100644 (file)
@@ -42,4 +42,5 @@ int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 #endif
index e39a1fea930beefa28d0ab5ac2400cb7181952ad..a72f3290528a4a0c8bddca75c3d8f3b6e186c95c 100644 (file)
@@ -7,6 +7,9 @@ struct nvkm_bios {
        u32 size;
        u8 *data;
 
+       u32 image0_size;
+       u32 imaged_addr;
+
        u32 bmp_offset;
        u32 bit_offset;
 
@@ -22,10 +25,9 @@ struct nvkm_bios {
 u8  nvbios_checksum(const u8 *data, int size);
 u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
 int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
-
-#define nvbios_rd08(b,o) (b)->data[(o)]
-#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)])
-#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)])
+u8  nvbios_rd08(struct nvkm_bios *, u32 addr);
+u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
+u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
 
 int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
 #endif
index db10c11f0595deabf3f3acbcac7299b658a388a8..c5a6ebd5a478691c90c61175adec8cb4df6c4600 100644 (file)
@@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask,
                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
 
 struct nvbios_ocfg {
-       u16 match;
+       u8  proto;
+       u8  flags;
        u16 clkcmp[2];
 };
 
@@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx,
                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
 u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
-u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
+u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
 u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
 #endif
index 0a734fd06acf347aeee52368ee159c62d99a4e75..3a410275fa71165c3adb6f0a7765e896ff7ce1ad 100644 (file)
@@ -56,6 +56,8 @@ struct nvkm_fb {
                int regions;
        } tile;
 
+       u8 page;
+
        struct nvkm_memory *mmu_rd;
        struct nvkm_memory *mmu_wr;
 };
@@ -91,6 +93,8 @@ int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 
 #include <subdev/bios.h>
 #include <subdev/bios/ramcfg.h>
index c6b90b6543b3e8751b5effbfb0f888f2e210fbe5..cd755baf9cab709b1384b2f1a0177117a5258721 100644 (file)
@@ -38,4 +38,5 @@ int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 #endif
index 2e80682b2da1a7305d65c5911c099697120bef68..27d25b18d85c0d1da40adb950ac9f55826953f5f 100644 (file)
@@ -7,11 +7,14 @@ struct nvkm_mc {
        struct nvkm_subdev subdev;
 };
 
-void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
-void nvkm_mc_intr_unarm(struct nvkm_mc *);
-void nvkm_mc_intr_rearm(struct nvkm_mc *);
-void nvkm_mc_reset(struct nvkm_mc *, enum nvkm_devidx);
-void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
+void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_intr(struct nvkm_device *, bool *handled);
+void nvkm_mc_intr_unarm(struct nvkm_device *);
+void nvkm_mc_intr_rearm(struct nvkm_device *);
+void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable);
+void nvkm_mc_unk260(struct nvkm_device *, u32 data);
 
 int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
@@ -24,4 +27,5 @@ int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 #endif
index ddb913889d7e2b69307b4d823fae72ce9e9697c8..e6523e2cea9fee49300e14b194dfe87f5e7ed94f 100644 (file)
@@ -47,6 +47,7 @@ int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
 int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
 int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
 int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
 
 /* pcie functions */
 int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
index c6edd95a5b69c4e113eb9599ad4dbc6bbebe53db..b04c38c07761e60554396cae8e51f9114058f86e 100644 (file)
@@ -43,9 +43,8 @@ struct nvkm_secboot {
        const struct nvkm_secboot_func *func;
        struct nvkm_subdev subdev;
 
+       enum nvkm_devidx devidx;
        u32 base;
-       u32 irq_mask;
-       u32 enable_mask;
 };
 #define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
 
index 8fb575a92c48f69336b844eb09575fbfcc73beb5..71ebbfd4484fc9b824cdb84cb07d30449c7576e7 100644 (file)
@@ -8,10 +8,11 @@ struct nvkm_top {
        struct list_head device;
 };
 
-u32 nvkm_top_reset(struct nvkm_top *, enum nvkm_devidx);
-u32 nvkm_top_intr(struct nvkm_top *, u32 intr, u64 *subdevs);
-enum nvkm_devidx nvkm_top_fault(struct nvkm_top *, int fault);
-enum nvkm_devidx nvkm_top_engine(struct nvkm_top *, int, int *runl, int *engn);
+u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
+u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
+u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
+enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
+enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
 
 int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **);
 #endif
index feff55cff05bbf994ffed76ecf9ca05eac3055b6..b765f4ffcde63ba284aeba11ba0d757624e91253 100644 (file)
@@ -12,6 +12,9 @@ struct nvkm_volt {
                u32 uv;
                u8 vid;
        } vid[256];
+
+       u32 max_uv;
+       u32 min_uv;
 };
 
 int nvkm_volt_get(struct nvkm_volt *);
index eb7de487a2b3f16936c70b183c4945d1e26072a8..7bd4683216d016dcbc78aa088a2b7bd67d4453a5 100644 (file)
@@ -100,6 +100,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
        case NV_DEVICE_INFO_V0_FERMI:
        case NV_DEVICE_INFO_V0_KEPLER:
        case NV_DEVICE_INFO_V0_MAXWELL:
+       case NV_DEVICE_INFO_V0_PASCAL:
                return NVIF_CLASS_SW_GF100;
        }
 
index 5e3f3e826476cc0b71b64bc12d615841191cebd7..528bdeffb339ecaaa887c97887dc80427a153c77 100644 (file)
@@ -209,8 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
        nvbo->tile_flags = tile_flags;
        nvbo->bo.bdev = &drm->ttm.bdev;
 
-       if (!nvxx_device(&drm->device)->func->cpu_coherent)
-               nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+       nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
 
        nvbo->page_shift = 12;
        if (drm->client.vm) {
@@ -424,13 +423,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
        if (ret)
                return ret;
 
-       /*
-        * TTM buffers allocated using the DMA API already have a mapping, let's
-        * use it instead.
-        */
-       if (!nvbo->force_coherent)
-               ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
-                                 &nvbo->kmap);
+       ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
 
        ttm_bo_unreserve(&nvbo->bo);
        return ret;
@@ -442,12 +435,7 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
        if (!nvbo)
                return;
 
-       /*
-        * TTM buffers allocated using the DMA API already had a coherent
-        * mapping which we used, no need to unmap.
-        */
-       if (!nvbo->force_coherent)
-               ttm_bo_kunmap(&nvbo->kmap);
+       ttm_bo_kunmap(&nvbo->kmap);
 }
 
 void
@@ -506,35 +494,13 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
        return 0;
 }
 
-static inline void *
-_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
-{
-       struct ttm_dma_tt *dma_tt;
-       u8 *m = mem;
-
-       index *= sz;
-
-       if (m) {
-               /* kmap'd address, return the corresponding offset */
-               m += index;
-       } else {
-               /* DMA-API mapping, lookup the right address */
-               dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
-               m = dma_tt->cpu_address[index / PAGE_SIZE];
-               m += index % PAGE_SIZE;
-       }
-
-       return m;
-}
-#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
-
 void
 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
 {
        bool is_iomem;
        u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
 
-       mem = nouveau_bo_mem_index(nvbo, index, mem);
+       mem += index;
 
        if (is_iomem)
                iowrite16_native(val, (void __force __iomem *)mem);
@@ -548,7 +514,7 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
        bool is_iomem;
        u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
 
-       mem = nouveau_bo_mem_index(nvbo, index, mem);
+       mem += index;
 
        if (is_iomem)
                return ioread32_native((void __force __iomem *)mem);
@@ -562,7 +528,7 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
        bool is_iomem;
        u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
 
-       mem = nouveau_bo_mem_index(nvbo, index, mem);
+       mem += index;
 
        if (is_iomem)
                iowrite32_native(val, (void __force __iomem *)mem);
@@ -1082,7 +1048,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
                                ret = ttm_bo_move_accel_cleanup(bo,
                                                                &fence->base,
                                                                evict,
-                                                               no_wait_gpu,
                                                                new_mem);
                                nouveau_fence_unref(&fence);
                        }
@@ -1104,6 +1069,10 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
                            struct ttm_mem_reg *, struct ttm_mem_reg *);
                int (*init)(struct nouveau_channel *, u32 handle);
        } _methods[] = {
+               {  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
+               {  "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
+               {  "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
+               {  "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
                {  "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
                {  "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
                {  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
@@ -1289,6 +1258,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
        struct nouveau_drm_tile *new_tile = NULL;
        int ret = 0;
 
+       ret = ttm_bo_wait(bo, intr, no_wait_gpu);
+       if (ret)
+               return ret;
+
        if (nvbo->pin_refcnt)
                NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
 
@@ -1324,7 +1297,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
        /* Fallback to software copy. */
        ret = ttm_bo_wait(bo, intr, no_wait_gpu);
        if (ret == 0)
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+               ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
 
 out:
        if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
@@ -1488,14 +1461,6 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
        dev = drm->dev;
        pdev = device->dev;
 
-       /*
-        * Objects matching this condition have been marked as force_coherent,
-        * so use the DMA API for them.
-        */
-       if (!nvxx_device(&drm->device)->func->cpu_coherent &&
-           ttm->caching_state == tt_uncached)
-               return ttm_dma_populate(ttm_dma, dev->dev);
-
 #if IS_ENABLED(CONFIG_AGP)
        if (drm->agp.bridge) {
                return ttm_agp_tt_populate(ttm);
@@ -1553,16 +1518,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
        dev = drm->dev;
        pdev = device->dev;
 
-       /*
-        * Objects matching this condition have been marked as force_coherent,
-        * so use the DMA API for them.
-        */
-       if (!nvxx_device(&drm->device)->func->cpu_coherent &&
-           ttm->caching_state == tt_uncached) {
-               ttm_dma_unpopulate(ttm_dma, dev->dev);
-               return;
-       }
-
 #if IS_ENABLED(CONFIG_AGP)
        if (drm->agp.bridge) {
                ttm_agp_tt_unpopulate(ttm);
index b1d2527c56256372007537399f0abaa11d4f0d49..f9b3c811187e117aba807697d3e27b984f0d0bf3 100644 (file)
@@ -191,7 +191,8 @@ static int
 nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
                    u32 engine, struct nouveau_channel **pchan)
 {
-       static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A,
+       static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
+                                       MAXWELL_CHANNEL_GPFIFO_A,
                                        KEPLER_CHANNEL_GPFIFO_B,
                                        KEPLER_CHANNEL_GPFIFO_A,
                                        FERMI_CHANNEL_GPFIFO,
index 6e97862d31a12db0d291736eb1a0d8aa17ca2f8d..afbf557b23d4395f34c15a299a6b5e2e9510c924 100644 (file)
@@ -495,6 +495,8 @@ nouveau_display_create(struct drm_device *dev)
 
        if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
                static const u16 oclass[] = {
+                       GP104_DISP,
+                       GP100_DISP,
                        GM200_DISP,
                        GM107_DISP,
                        GK110_DISP,
index 1161f8b452f23c6c265aa567baa9da33b5157e21..66c1280c0f1f2854aa501f810c50a11e6149eaaa 100644 (file)
@@ -198,6 +198,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
                case KEPLER_CHANNEL_GPFIFO_A:
                case KEPLER_CHANNEL_GPFIFO_B:
                case MAXWELL_CHANNEL_GPFIFO_A:
+               case PASCAL_CHANNEL_GPFIFO_A:
                        ret = nvc0_fence_create(drm);
                        break;
                default:
@@ -316,7 +317,16 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
        if (vga_switcheroo_client_probe_defer(pdev))
                return -EPROBE_DEFER;
 
-       /* remove conflicting drivers (vesafb, efifb etc) */
+       /* We need to check that the chipset is supported before booting
+        * fbdev off the hardware, as there's no way to put it back.
+        */
+       ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
+       if (ret)
+               return ret;
+
+       nvkm_device_del(&device);
+
+       /* Remove conflicting drivers (vesafb, efifb etc). */
        aper = alloc_apertures(3);
        if (!aper)
                return -ENOMEM;
@@ -430,6 +440,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
        nouveau_vga_init(drm);
 
        if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+               if (!nvxx_device(&drm->device)->mmu) {
+                       ret = -ENOSYS;
+                       goto fail_device;
+               }
+
                ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
                                  0x1000, NULL, &drm->client.vm);
                if (ret)
index 57aaf98a26f9c3d5bcfd91e351f29c2e89b7c276..d1f248fd35061993db2529c4b93e557a4331e829 100644 (file)
@@ -552,6 +552,8 @@ nouveau_fbcon_init(struct drm_device *dev)
        if (ret)
                goto fini;
 
+       if (fbcon->helper.fbdev)
+               fbcon->helper.fbdev->pixmap.buf_align = 4;
        return 0;
 
 fini:
index 1ff4166af26ece5b20927f89c14297eb853841be..71f764bf4cc6fbca2713f974a5ed535f030e84a8 100644 (file)
@@ -534,6 +534,40 @@ nouveau_hwmon_get_in0_input(struct device *d,
 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO,
                          nouveau_hwmon_get_in0_input, NULL, 0);
 
+static ssize_t
+nouveau_hwmon_get_in0_min(struct device *d,
+                           struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nvkm_volt *volt = nvxx_volt(&drm->device);
+
+       if (!volt || !volt->min_uv)
+               return -ENODEV;
+
+       return sprintf(buf, "%i\n", volt->min_uv / 1000);
+}
+
+static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO,
+                         nouveau_hwmon_get_in0_min, NULL, 0);
+
+static ssize_t
+nouveau_hwmon_get_in0_max(struct device *d,
+                           struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nvkm_volt *volt = nvxx_volt(&drm->device);
+
+       if (!volt || !volt->max_uv)
+               return -ENODEV;
+
+       return sprintf(buf, "%i\n", volt->max_uv / 1000);
+}
+
+static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO,
+                         nouveau_hwmon_get_in0_max, NULL, 0);
+
 static ssize_t
 nouveau_hwmon_get_in0_label(struct device *d,
                            struct device_attribute *a, char *buf)
@@ -594,6 +628,8 @@ static struct attribute *hwmon_pwm_fan_attributes[] = {
 
 static struct attribute *hwmon_in0_attributes[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
+       &sensor_dev_attr_in0_min.dev_attr.attr,
+       &sensor_dev_attr_in0_max.dev_attr.attr,
        &sensor_dev_attr_in0_label.dev_attr.attr,
        NULL
 };
index bcee91497eb9778fb1c8c266369b33cdfe44a2b7..1825dbc331926c84de69e13606f38cf4c80a2bed 100644 (file)
@@ -164,6 +164,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
        case NV_DEVICE_INFO_V0_FERMI:
        case NV_DEVICE_INFO_V0_KEPLER:
        case NV_DEVICE_INFO_V0_MAXWELL:
+       case NV_DEVICE_INFO_V0_PASCAL:
                node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
                break;
        default:
index 0f3e4bb411cc99c9052beb5b862dd9927888ad7e..7d9248b8c66479b7b72c51a6e856e25428eebb49 100644 (file)
@@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        uint32_t fg;
        uint32_t bg;
        uint32_t dsize;
-       uint32_t width;
        uint32_t *data = (uint32_t *)image->data;
        int ret;
 
@@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        if (ret)
                return ret;
 
-       width = ALIGN(image->width, 8);
-       dsize = ALIGN(width * image->height, 32) >> 5;
-
        if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
            info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
                fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
@@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
                         ((image->dx + image->width) & 0xffff));
        OUT_RING(chan, bg);
        OUT_RING(chan, fg);
-       OUT_RING(chan, (image->height << 16) | width);
+       OUT_RING(chan, (image->height << 16) | image->width);
        OUT_RING(chan, (image->height << 16) | image->width);
        OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
 
+       dsize = ALIGN(image->width * image->height, 32) >> 5;
        while (dsize) {
                int iter_len = dsize > 128 ? 128 : dsize;
 
index 7a7788212df76748c3395d5c5416e1baa4a94e15..7d0edcbcfca7794b9f05c55dd3184ee3ebf90f2f 100644 (file)
@@ -297,6 +297,8 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
                .pushbuf = 0xb0007d00,
        };
        static const s32 oclass[] = {
+               GP104_DISP_CORE_CHANNEL_DMA,
+               GP100_DISP_CORE_CHANNEL_DMA,
                GM200_DISP_CORE_CHANNEL_DMA,
                GM107_DISP_CORE_CHANNEL_DMA,
                GK110_DISP_CORE_CHANNEL_DMA,
index 33d9ee0fac4084918c002d2164977a53e03ae65a..1aeb698e97079bd2e13c515f610a77155497b3fb 100644 (file)
@@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
        struct nouveau_channel *chan = drm->channel;
-       uint32_t width, dwords, *data = (uint32_t *)image->data;
+       uint32_t dwords, *data = (uint32_t *)image->data;
        uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
        uint32_t *palette = info->pseudo_palette;
        int ret;
@@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        if (ret)
                return ret;
 
-       width = ALIGN(image->width, 32);
-       dwords = (width * image->height) >> 5;
-
        BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
        if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
            info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        OUT_RING(chan, 0);
        OUT_RING(chan, image->dy);
 
+       dwords = ALIGN(image->width * image->height, 32) >> 5;
        while (dwords) {
                int push = dwords > 2047 ? 2047 : dwords;
 
index a0913359ac05a02046f9286254b98fa58a650ef4..839f4c8c18056c5bce7dbd8c1511db77784609d8 100644 (file)
@@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
        struct nouveau_channel *chan = drm->channel;
-       uint32_t width, dwords, *data = (uint32_t *)image->data;
+       uint32_t dwords, *data = (uint32_t *)image->data;
        uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
        uint32_t *palette = info->pseudo_palette;
        int ret;
@@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        if (ret)
                return ret;
 
-       width = ALIGN(image->width, 32);
-       dwords = (width * image->height) >> 5;
-
        BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
        if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
            info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        OUT_RING  (chan, 0);
        OUT_RING  (chan, image->dy);
 
+       dwords = ALIGN(image->width * image->height, 32) >> 5;
        while (dwords) {
                int push = dwords > 2047 ? 2047 : dwords;
 
index b18557858f19e25c8fbbad4b34d5ec58cd4a8d17..19044aba265eb165efa73c74d76f8f042c28873d 100644 (file)
@@ -57,6 +57,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
        [NVKM_ENGINE_CE0     ] = "ce0",
        [NVKM_ENGINE_CE1     ] = "ce1",
        [NVKM_ENGINE_CE2     ] = "ce2",
+       [NVKM_ENGINE_CE3     ] = "ce3",
+       [NVKM_ENGINE_CE4     ] = "ce4",
+       [NVKM_ENGINE_CE5     ] = "ce5",
        [NVKM_ENGINE_CIPHER  ] = "cipher",
        [NVKM_ENGINE_DISP    ] = "disp",
        [NVKM_ENGINE_DMAOBJ  ] = "dma",
@@ -71,6 +74,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
        [NVKM_ENGINE_MSVLD   ] = "msvld",
        [NVKM_ENGINE_NVENC0  ] = "nvenc0",
        [NVKM_ENGINE_NVENC1  ] = "nvenc1",
+       [NVKM_ENGINE_NVENC2  ] = "nvenc2",
        [NVKM_ENGINE_NVDEC   ] = "nvdec",
        [NVKM_ENGINE_PM      ] = "pm",
        [NVKM_ENGINE_SEC     ] = "sec",
@@ -105,7 +109,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
                }
        }
 
-       nvkm_mc_reset(device->mc, subdev->index);
+       nvkm_mc_reset(device, subdev->index);
 
        time = ktime_to_us(ktime_get()) - time;
        nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
index 9c19d59b47df60b0c5ec79661b096052cc259ddc..a4458a8eb30a7f5da77dee8960cf7aaf81e6914e 100644 (file)
@@ -3,3 +3,5 @@ nvkm-y += nvkm/engine/ce/gf100.o
 nvkm-y += nvkm/engine/ce/gk104.o
 nvkm-y += nvkm/engine/ce/gm107.o
 nvkm-y += nvkm/engine/ce/gm200.o
+nvkm-y += nvkm/engine/ce/gp100.o
+nvkm-y += nvkm/engine/ce/gp104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
new file mode 100644 (file)
index 0000000..c771045
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include <core/enum.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_enum
+gp100_ce_launcherr_report[] = {
+       { 0x0, "NO_ERR" },
+       { 0x1, "2D_LAYER_EXCEEDS_DEPTH" },
+       { 0x2, "INVALID_ALIGNMENT" },
+       { 0x3, "MEM2MEM_RECT_OUT_OF_BOUNDS" },
+       { 0x4, "SRC_LINE_EXCEEDS_PITCH" },
+       { 0x5, "SRC_LINE_EXCEEDS_NEG_PITCH" },
+       { 0x6, "DST_LINE_EXCEEDS_PITCH" },
+       { 0x7, "DST_LINE_EXCEEDS_NEG_PITCH" },
+       { 0x8, "BAD_SRC_PIXEL_COMP_REF" },
+       { 0x9, "INVALID_VALUE" },
+       { 0xa, "UNUSED_FIELD" },
+       { 0xb, "INVALID_OPERATION" },
+       { 0xc, "NO_RESOURCES" },
+       { 0xd, "INVALID_CONFIG" },
+       {}
+};
+
+static void
+gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
+{
+       struct nvkm_subdev *subdev = &ce->subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 stat = nvkm_rd32(device, 0x104418 + base);
+       const struct nvkm_enum *en =
+               nvkm_enum_find(gp100_ce_launcherr_report, stat & 0x0000000f);
+       nvkm_warn(subdev, "LAUNCHERR %08x [%s]\n", stat, en ? en->name : "");
+}
+
+void
+gp100_ce_intr(struct nvkm_engine *ce)
+{
+       const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80;
+       struct nvkm_subdev *subdev = &ce->subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 mask = nvkm_rd32(device, 0x10440c + base);
+       u32 intr = nvkm_rd32(device, 0x104410 + base) & mask;
+       if (intr & 0x00000001) { //XXX: guess
+               nvkm_warn(subdev, "BLOCKPIPE\n");
+               nvkm_wr32(device, 0x104410 + base, 0x00000001);
+               intr &= ~0x00000001;
+       }
+       if (intr & 0x00000002) { //XXX: guess
+               nvkm_warn(subdev, "NONBLOCKPIPE\n");
+               nvkm_wr32(device, 0x104410 + base, 0x00000002);
+               intr &= ~0x00000002;
+       }
+       if (intr & 0x00000004) {
+               gp100_ce_intr_launcherr(ce, base);
+               nvkm_wr32(device, 0x104410 + base, 0x00000004);
+               intr &= ~0x00000004;
+       }
+       if (intr) {
+               nvkm_warn(subdev, "intr %08x\n", intr);
+               nvkm_wr32(device, 0x104410 + base, intr);
+       }
+}
+
+static const struct nvkm_engine_func
+gp100_ce = {
+       .intr = gp100_ce_intr,
+       .sclass = {
+               { -1, -1, PASCAL_DMA_COPY_A },
+               {}
+       }
+};
+
+int
+gp100_ce_new(struct nvkm_device *device, int index,
+            struct nvkm_engine **pengine)
+{
+       return nvkm_engine_new_(&gp100_ce, device, index, true, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
new file mode 100644 (file)
index 0000000..20e0197
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include <core/enum.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+gp104_ce = {
+       .intr = gp100_ce_intr,
+       .sclass = {
+               { -1, -1, PASCAL_DMA_COPY_B },
+               { -1, -1, PASCAL_DMA_COPY_A },
+               {}
+       }
+};
+
+int
+gp104_ce_new(struct nvkm_device *device, int index,
+            struct nvkm_engine **pengine)
+{
+       return nvkm_engine_new_(&gp104_ce, device, index, true, pengine);
+}
index e2fa8b1619437597df5df065e6792e24bd11f97c..2dce405976ad6a552855aef9ce61b63cb234dc1a 100644 (file)
@@ -4,4 +4,5 @@
 
 void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
 void gk104_ce_intr(struct nvkm_engine *);
+void gp100_ce_intr(struct nvkm_engine *);
 #endif
index 4572debcb0c9862f54fa5d7f4db436b885b2df42..7218a067a6c5e85764f1f330cb05b10c7e50132c 100644 (file)
@@ -2148,6 +2148,67 @@ nv12b_chipset = {
        .sw = gf100_sw_new,
 };
 
+static const struct nvkm_device_chip
+nv130_chipset = {
+       .name = "GP100",
+       .bar = gf100_bar_new,
+       .bios = nvkm_bios_new,
+       .bus = gf100_bus_new,
+       .devinit = gm200_devinit_new,
+       .fb = gp100_fb_new,
+       .fuse = gm107_fuse_new,
+       .gpio = gk104_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .ltc = gp100_ltc_new,
+       .mc = gp100_mc_new,
+       .mmu = gf100_mmu_new,
+       .secboot = gm200_secboot_new,
+       .pci = gp100_pci_new,
+       .timer = gk20a_timer_new,
+       .top = gk104_top_new,
+       .ce[0] = gp100_ce_new,
+       .ce[1] = gp100_ce_new,
+       .ce[2] = gp100_ce_new,
+       .ce[3] = gp100_ce_new,
+       .ce[4] = gp100_ce_new,
+       .ce[5] = gp100_ce_new,
+       .dma = gf119_dma_new,
+       .disp = gp100_disp_new,
+       .fifo = gp100_fifo_new,
+       .gr = gp100_gr_new,
+       .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv134_chipset = {
+       .name = "GP104",
+       .bar = gf100_bar_new,
+       .bios = nvkm_bios_new,
+       .bus = gf100_bus_new,
+       .devinit = gm200_devinit_new,
+       .fb = gp104_fb_new,
+       .fuse = gm107_fuse_new,
+       .gpio = gk104_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .ltc = gp100_ltc_new,
+       .mc = gp100_mc_new,
+       .mmu = gf100_mmu_new,
+       .pci = gp100_pci_new,
+       .timer = gk20a_timer_new,
+       .top = gk104_top_new,
+       .ce[0] = gp104_ce_new,
+       .ce[1] = gp104_ce_new,
+       .ce[2] = gp104_ce_new,
+       .ce[3] = gp104_ce_new,
+       .disp = gp104_disp_new,
+       .dma = gf119_dma_new,
+       .fifo = gp100_fifo_new,
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
                       struct nvkm_notify *notify)
@@ -2221,6 +2282,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
        _(CE0    , device->ce[0]   ,  device->ce[0]);
        _(CE1    , device->ce[1]   ,  device->ce[1]);
        _(CE2    , device->ce[2]   ,  device->ce[2]);
+       _(CE3    , device->ce[3]   ,  device->ce[3]);
+       _(CE4    , device->ce[4]   ,  device->ce[4]);
+       _(CE5    , device->ce[5]   ,  device->ce[5]);
        _(CIPHER , device->cipher  ,  device->cipher);
        _(DISP   , device->disp    , &device->disp->engine);
        _(DMAOBJ , device->dma     , &device->dma->engine);
@@ -2235,6 +2299,7 @@ nvkm_device_engine(struct nvkm_device *device, int index)
        _(MSVLD  , device->msvld   ,  device->msvld);
        _(NVENC0 , device->nvenc[0],  device->nvenc[0]);
        _(NVENC1 , device->nvenc[1],  device->nvenc[1]);
+       _(NVENC2 , device->nvenc[2],  device->nvenc[2]);
        _(NVDEC  , device->nvdec   ,  device->nvdec);
        _(PM     , device->pm      , &device->pm->engine);
        _(SEC    , device->sec     ,  device->sec);
@@ -2492,6 +2557,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                        case 0x100: device->card_type = NV_E0; break;
                        case 0x110:
                        case 0x120: device->card_type = GM100; break;
+                       case 0x130: device->card_type = GP100; break;
                        default:
                                break;
                        }
@@ -2576,6 +2642,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x124: device->chip = &nv124_chipset; break;
                case 0x126: device->chip = &nv126_chipset; break;
                case 0x12b: device->chip = &nv12b_chipset; break;
+               case 0x130: device->chip = &nv130_chipset; break;
+               case 0x134: device->chip = &nv134_chipset; break;
                default:
                        nvdev_error(device, "unknown chipset (%08x)\n", boot0);
                        goto done;
@@ -2659,6 +2727,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                _(NVKM_ENGINE_CE0     ,    ce[0]);
                _(NVKM_ENGINE_CE1     ,    ce[1]);
                _(NVKM_ENGINE_CE2     ,    ce[2]);
+               _(NVKM_ENGINE_CE3     ,    ce[3]);
+               _(NVKM_ENGINE_CE4     ,    ce[4]);
+               _(NVKM_ENGINE_CE5     ,    ce[5]);
                _(NVKM_ENGINE_CIPHER  ,   cipher);
                _(NVKM_ENGINE_DISP    ,     disp);
                _(NVKM_ENGINE_DMAOBJ  ,      dma);
@@ -2673,6 +2744,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                _(NVKM_ENGINE_MSVLD   ,    msvld);
                _(NVKM_ENGINE_NVENC0  , nvenc[0]);
                _(NVKM_ENGINE_NVENC1  , nvenc[1]);
+               _(NVKM_ENGINE_NVENC2  , nvenc[2]);
                _(NVKM_ENGINE_NVDEC   ,    nvdec);
                _(NVKM_ENGINE_PM      ,       pm);
                _(NVKM_ENGINE_SEC     ,      sec);
index 18fab3973ce5f9b0c2a527d3fcfeb07b465288e5..b1b693219db3f70edbf29ae5f508141937e84e42 100644 (file)
@@ -1614,7 +1614,6 @@ nvkm_device_pci_func = {
        .fini = nvkm_device_pci_fini,
        .resource_addr = nvkm_device_pci_resource_addr,
        .resource_size = nvkm_device_pci_resource_size,
-       .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64),
 };
 
 int
index ec12efb4689a7ea8a50fd7ee8626f5df4440f0a3..939682f18788feda2d5b30b045140bc696e476db 100644 (file)
@@ -191,13 +191,11 @@ static irqreturn_t
 nvkm_device_tegra_intr(int irq, void *arg)
 {
        struct nvkm_device_tegra *tdev = arg;
-       struct nvkm_mc *mc = tdev->device.mc;
+       struct nvkm_device *device = &tdev->device;
        bool handled = false;
-       if (likely(mc)) {
-               nvkm_mc_intr_unarm(mc);
-               nvkm_mc_intr(mc, &handled);
-               nvkm_mc_intr_rearm(mc);
-       }
+       nvkm_mc_intr_unarm(device);
+       nvkm_mc_intr(device, &handled);
+       nvkm_mc_intr_rearm(device);
        return handled ? IRQ_HANDLED : IRQ_NONE;
 }
 
@@ -247,7 +245,6 @@ nvkm_device_tegra_func = {
        .fini = nvkm_device_tegra_fini,
        .resource_addr = nvkm_device_tegra_resource_addr,
        .resource_size = nvkm_device_tegra_resource_size,
-       .cpu_coherent = false,
 };
 
 int
@@ -313,6 +310,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
                goto remove;
 
        tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
+       tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
        ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
                               NVKM_DEVICE_TEGRA, pdev->id, NULL,
                               cfg, dbg, detect, mmio, subdev_mask,
index 137066426ed7f9cbb37e24650a95e15225e02765..79a8f71cf7884dc3e2307e12b591a194f8ae841a 100644 (file)
@@ -102,6 +102,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
        case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
        case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
        case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+       case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
        default:
                args->v0.family = 0;
                break;
index a74c5dd27dc0f8774706bc41f81c7d43169aa339..77a52b54a31e3c5627adac7466e1cc96b4e74945 100644 (file)
@@ -10,6 +10,8 @@ nvkm-y += nvkm/engine/disp/gk104.o
 nvkm-y += nvkm/engine/disp/gk110.o
 nvkm-y += nvkm/engine/disp/gm107.o
 nvkm-y += nvkm/engine/disp/gm200.o
+nvkm-y += nvkm/engine/disp/gp100.o
+nvkm-y += nvkm/engine/disp/gp104.o
 
 nvkm-y += nvkm/engine/disp/outp.o
 nvkm-y += nvkm/engine/disp/outpdp.o
@@ -18,6 +20,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o
 nvkm-y += nvkm/engine/disp/sornv50.o
 nvkm-y += nvkm/engine/disp/sorg94.o
 nvkm-y += nvkm/engine/disp/sorgf119.o
+nvkm-y += nvkm/engine/disp/sorgm107.o
 nvkm-y += nvkm/engine/disp/sorgm200.o
 nvkm-y += nvkm/engine/disp/dport.o
 
@@ -44,12 +47,15 @@ nvkm-y += nvkm/engine/disp/rootgk104.o
 nvkm-y += nvkm/engine/disp/rootgk110.o
 nvkm-y += nvkm/engine/disp/rootgm107.o
 nvkm-y += nvkm/engine/disp/rootgm200.o
+nvkm-y += nvkm/engine/disp/rootgp100.o
+nvkm-y += nvkm/engine/disp/rootgp104.o
 
 nvkm-y += nvkm/engine/disp/channv50.o
 nvkm-y += nvkm/engine/disp/changf119.o
 
 nvkm-y += nvkm/engine/disp/dmacnv50.o
 nvkm-y += nvkm/engine/disp/dmacgf119.o
+nvkm-y += nvkm/engine/disp/dmacgp104.o
 
 nvkm-y += nvkm/engine/disp/basenv50.o
 nvkm-y += nvkm/engine/disp/baseg84.o
@@ -58,6 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o
 nvkm-y += nvkm/engine/disp/basegf119.o
 nvkm-y += nvkm/engine/disp/basegk104.o
 nvkm-y += nvkm/engine/disp/basegk110.o
+nvkm-y += nvkm/engine/disp/basegp104.o
 
 nvkm-y += nvkm/engine/disp/corenv50.o
 nvkm-y += nvkm/engine/disp/coreg84.o
@@ -69,6 +76,8 @@ nvkm-y += nvkm/engine/disp/coregk104.o
 nvkm-y += nvkm/engine/disp/coregk110.o
 nvkm-y += nvkm/engine/disp/coregm107.o
 nvkm-y += nvkm/engine/disp/coregm200.o
+nvkm-y += nvkm/engine/disp/coregp100.o
+nvkm-y += nvkm/engine/disp/coregp104.o
 
 nvkm-y += nvkm/engine/disp/ovlynv50.o
 nvkm-y += nvkm/engine/disp/ovlyg84.o
@@ -76,6 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o
 nvkm-y += nvkm/engine/disp/ovlygt215.o
 nvkm-y += nvkm/engine/disp/ovlygf119.o
 nvkm-y += nvkm/engine/disp/ovlygk104.o
+nvkm-y += nvkm/engine/disp/ovlygp104.o
 
 nvkm-y += nvkm/engine/disp/piocnv50.o
 nvkm-y += nvkm/engine/disp/piocgf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
new file mode 100644 (file)
index 0000000..51688e3
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_base_oclass = {
+       .base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
+       .base.minver = 0,
+       .base.maxver = 0,
+       .ctor = nv50_disp_base_new,
+       .func = &gp104_disp_dmac_func,
+       .mthd = &gf119_disp_base_chan_mthd,
+       .chid = 1,
+};
index aee374884c960394490655c66055d35ec1fdd2a6..f5f683d9fd20225815518f4867c2e4a873248d9f 100644 (file)
@@ -85,6 +85,7 @@ extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
 extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
 
 extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
+extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
 
 struct nv50_disp_pioc_oclass {
        int (*ctor)(const struct nv50_disp_chan_func *,
index 6b1dc703dac7e77bdaf778d90bc1e46895fee49c..21fbf89b6319fba4f155acceee2a61c5b398b2cf 100644 (file)
@@ -171,7 +171,7 @@ gf119_disp_core_chan_mthd = {
        }
 };
 
-static void
+void
 gf119_disp_core_fini(struct nv50_disp_dmac *chan)
 {
        struct nv50_disp *disp = chan->base.root->disp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
new file mode 100644 (file)
index 0000000..d5dff66
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp100_disp_core_oclass = {
+       .base.oclass = GP100_DISP_CORE_CHANNEL_DMA,
+       .base.minver = 0,
+       .base.maxver = 0,
+       .ctor = nv50_disp_core_new,
+       .func = &gf119_disp_core_func,
+       .mthd = &gk104_disp_core_chan_mthd,
+       .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
new file mode 100644 (file)
index 0000000..6922f40
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+static int
+gp104_disp_core_init(struct nv50_disp_dmac *chan)
+{
+       struct nv50_disp *disp = chan->base.root->disp;
+       struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+
+       /* enable error reporting */
+       nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
+
+       /* initialise channel for dma command submission */
+       nvkm_wr32(device, 0x611494, chan->push);
+       nvkm_wr32(device, 0x611498, 0x00010000);
+       nvkm_wr32(device, 0x61149c, 0x00000001);
+       nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
+       nvkm_wr32(device, 0x640000, 0x00000000);
+       nvkm_wr32(device, 0x610490, 0x01000013);
+
+       /* wait for it to go inactive */
+       if (nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
+                       break;
+       ) < 0) {
+               nvkm_error(subdev, "core init: %08x\n",
+                          nvkm_rd32(device, 0x610490));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+const struct nv50_disp_dmac_func
+gp104_disp_core_func = {
+       .init = gp104_disp_core_init,
+       .fini = gf119_disp_core_fini,
+       .bind = gf119_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_core_oclass = {
+       .base.oclass = GP104_DISP_CORE_CHANNEL_DMA,
+       .base.minver = 0,
+       .base.maxver = 0,
+       .ctor = nv50_disp_core_new,
+       .func = &gp104_disp_core_func,
+       .mthd = &gk104_disp_core_chan_mthd,
+       .chid = 0,
+};
index 876b14549a587e32effac90da6f8b7d957afdb38..a57f7cef307a89fae14784ee369b9bc9733ab930 100644 (file)
@@ -36,7 +36,7 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
                                 chan->base.chid << 27 | 0x00000001);
 }
 
-static void
+void
 gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
 {
        struct nv50_disp *disp = chan->base.root->disp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
new file mode 100644 (file)
index 0000000..ad24c2c
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static int
+gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+       struct nv50_disp *disp = chan->base.root->disp;
+       struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       int chid = chan->base.chid;
+
+       /* enable error reporting */
+       nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+       /* initialise channel for dma command submission */
+       nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
+       nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
+       nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
+       nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+       nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+       nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+
+       /* wait for it to go inactive */
+       if (nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+                       break;
+       ) < 0) {
+               nvkm_error(subdev, "ch %d init: %08x\n", chid,
+                          nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+const struct nv50_disp_dmac_func
+gp104_disp_dmac_func = {
+       .init = gp104_disp_dmac_init,
+       .fini = gf119_disp_dmac_fini,
+       .bind = gf119_disp_dmac_bind,
+};
index fc84eb8b5c451e74722ecc0e73486dff7c953ec7..43ac0585785375423c3d287779fd910f55461469 100644 (file)
@@ -25,8 +25,12 @@ int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
 extern const struct nv50_disp_dmac_func nv50_disp_core_func;
 
 extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
+void gf119_disp_dmac_fini(struct nv50_disp_dmac *);
 int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
 extern const struct nv50_disp_dmac_func gf119_disp_core_func;
+void gf119_disp_core_fini(struct nv50_disp_dmac *);
+
+extern const struct nv50_disp_dmac_func gp104_disp_dmac_func;
 
 struct nv50_disp_dmac_oclass {
        int (*ctor)(const struct nv50_disp_dmac_func *,
@@ -88,4 +92,10 @@ extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
 extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
 
 extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass;
 #endif
index f0314664349c3b2c39ef650e1ecdc636bde5aae1..29e84b241cca9510a84fca47c34ae56538d6c1c3 100644 (file)
@@ -79,8 +79,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
        list_for_each_entry(outp, &disp->base.outp, head) {
                if ((outp->info.hasht & 0xff) == type &&
                    (outp->info.hashm & mask) == mask) {
-                       *data = nvbios_outp_match(bios, outp->info.hasht,
-                                                       outp->info.hashm,
+                       *data = nvbios_outp_match(bios, outp->info.hasht, mask,
                                                  ver, hdr, cnt, len, info);
                        if (!*data)
                                return NULL;
@@ -155,25 +154,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
        if (!outp)
                return NULL;
 
+       *conf = (ctrl & 0x00000f00) >> 8;
        switch (outp->info.type) {
        case DCB_OUTPUT_TMDS:
-               *conf = (ctrl & 0x00000f00) >> 8;
                if (*conf == 5)
                        *conf |= 0x0100;
                break;
        case DCB_OUTPUT_LVDS:
-               *conf = disp->sor.lvdsconf;
+               *conf |= disp->sor.lvdsconf;
                break;
-       case DCB_OUTPUT_DP:
-               *conf = (ctrl & 0x00000f00) >> 8;
-               break;
-       case DCB_OUTPUT_ANALOG:
        default:
-               *conf = 0x00ff;
                break;
        }
 
-       data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+       data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+                                &ver, &hdr, &cnt, &len, &info2);
        if (data && id < 0xff) {
                data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
                if (data) {
@@ -418,7 +413,7 @@ gf119_disp_intr_supervisor(struct work_struct *work)
        nvkm_wr32(device, 0x6101d0, 0x80000000);
 }
 
-static void
+void
 gf119_disp_intr_error(struct nv50_disp *disp, int chid)
 {
        struct nvkm_subdev *subdev = &disp->base.engine.subdev;
@@ -466,7 +461,7 @@ gf119_disp_intr(struct nv50_disp *disp)
                u32 stat = nvkm_rd32(device, 0x61009c);
                int chid = ffs(stat) - 1;
                if (chid >= 0)
-                       gf119_disp_intr_error(disp, chid);
+                       disp->func->intr_error(disp, chid);
                intr &= ~0x00000002;
        }
 
@@ -510,6 +505,7 @@ gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
 static const struct nv50_disp_func
 gf119_disp = {
        .intr = gf119_disp_intr,
+       .intr_error = gf119_disp_intr_error,
        .uevent = &gf119_disp_chan_uevent,
        .super = gf119_disp_intr_supervisor,
        .root = &gf119_disp_root_oclass,
index a86384b8e388defac9284aec848212317abec765..37f145cf30d78345c2c599ca9174994fc164b81f 100644 (file)
@@ -27,6 +27,7 @@
 static const struct nv50_disp_func
 gk104_disp = {
        .intr = gf119_disp_intr,
+       .intr_error = gf119_disp_intr_error,
        .uevent = &gf119_disp_chan_uevent,
        .super = gf119_disp_intr_supervisor,
        .root = &gk104_disp_root_oclass,
index 0d574c7e594ae04f3d2829359e6e3a5a86cdddf2..e14ac946608ca7d39c870b12f770ab4f8450e6f4 100644 (file)
@@ -27,6 +27,7 @@
 static const struct nv50_disp_func
 gk110_disp = {
        .intr = gf119_disp_intr,
+       .intr_error = gf119_disp_intr_error,
        .uevent = &gf119_disp_chan_uevent,
        .super = gf119_disp_intr_supervisor,
        .root = &gk110_disp_root_oclass,
index b6944142d616f91fba4886521994b60ee50a18e0..2f2437cc5891553a876a9a2f02605a6b3de33cdb 100644 (file)
@@ -27,6 +27,7 @@
 static const struct nv50_disp_func
 gm107_disp = {
        .intr = gf119_disp_intr,
+       .intr_error = gf119_disp_intr_error,
        .uevent = &gf119_disp_chan_uevent,
        .super = gf119_disp_intr_supervisor,
        .root = &gm107_disp_root_oclass,
@@ -36,7 +37,7 @@ gm107_disp = {
        .outp.internal.crt = nv50_dac_output_new,
        .outp.internal.tmds = nv50_sor_output_new,
        .outp.internal.lvds = nv50_sor_output_new,
-       .outp.internal.dp = gf119_sor_dp_new,
+       .outp.internal.dp = gm107_sor_dp_new,
        .dac.nr = 3,
        .dac.power = nv50_dac_power,
        .dac.sense = nv50_dac_sense,
index 67eec86207194e3d7894e51037b6e81f2d4d86e4..9f368d4ee61e1fb6fe1bf404692714d84a525031 100644 (file)
@@ -27,6 +27,7 @@
 static const struct nv50_disp_func
 gm200_disp = {
        .intr = gf119_disp_intr,
+       .intr_error = gf119_disp_intr_error,
        .uevent = &gf119_disp_chan_uevent,
        .super = gf119_disp_intr_supervisor,
        .root = &gm200_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
new file mode 100644 (file)
index 0000000..4f81bf3
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gp100_disp = {
+       .intr = gf119_disp_intr,
+       .intr_error = gf119_disp_intr_error,
+       .uevent = &gf119_disp_chan_uevent,
+       .super = gf119_disp_intr_supervisor,
+       .root = &gp100_disp_root_oclass,
+       .head.vblank_init = gf119_disp_vblank_init,
+       .head.vblank_fini = gf119_disp_vblank_fini,
+       .head.scanoutpos = gf119_disp_root_scanoutpos,
+       .outp.internal.crt = nv50_dac_output_new,
+       .outp.internal.tmds = nv50_sor_output_new,
+       .outp.internal.lvds = nv50_sor_output_new,
+       .outp.internal.dp = gm200_sor_dp_new,
+       .dac.nr = 3,
+       .dac.power = nv50_dac_power,
+       .dac.sense = nv50_dac_sense,
+       .sor.nr = 4,
+       .sor.power = nv50_sor_power,
+       .sor.hda_eld = gf119_hda_eld,
+       .sor.hdmi = gk104_hdmi_ctrl,
+       .sor.magic = gm200_sor_magic,
+};
+
+int
+gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+       return gf119_disp_new_(&gp100_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
new file mode 100644 (file)
index 0000000..3bf3380
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+static void
+gp104_disp_intr_error(struct nv50_disp *disp, int chid)
+{
+       struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12));
+       u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12));
+       u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12));
+
+       nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
+                  chid, (mthd & 0x0000ffc), data, mthd, unkn);
+
+       if (chid < ARRAY_SIZE(disp->chan)) {
+               switch (mthd & 0xffc) {
+               case 0x0080:
+                       nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       nvkm_wr32(device, 0x61009c, (1 << chid));
+       nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000);
+}
+
+static const struct nv50_disp_func
+gp104_disp = {
+       .intr = gf119_disp_intr,
+       .intr_error = gp104_disp_intr_error,
+       .uevent = &gf119_disp_chan_uevent,
+       .super = gf119_disp_intr_supervisor,
+       .root = &gp104_disp_root_oclass,
+       .head.vblank_init = gf119_disp_vblank_init,
+       .head.vblank_fini = gf119_disp_vblank_fini,
+       .head.scanoutpos = gf119_disp_root_scanoutpos,
+       .outp.internal.crt = nv50_dac_output_new,
+       .outp.internal.tmds = nv50_sor_output_new,
+       .outp.internal.lvds = nv50_sor_output_new,
+       .outp.internal.dp = gm200_sor_dp_new,
+       .dac.nr = 3,
+       .dac.power = nv50_dac_power,
+       .dac.sense = nv50_dac_sense,
+       .sor.nr = 4,
+       .sor.power = nv50_sor_power,
+       .sor.hda_eld = gf119_hda_eld,
+       .sor.hdmi = gk104_hdmi_ctrl,
+       .sor.magic = gm200_sor_magic,
+};
+
+int
+gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+       return gf119_disp_new_(&gp104_disp, device, index, pdisp);
+}
index 4226d2153b9c2a1f8072697fbb77c1b7838e9e09..fbb8c7dc18fd2b67a880432e9a0144e2ee65c57a 100644 (file)
@@ -32,6 +32,7 @@
 #include <subdev/bios/init.h>
 #include <subdev/bios/pll.h>
 #include <subdev/devinit.h>
+#include <subdev/timer.h>
 
 static const struct nvkm_disp_oclass *
 nv50_disp_root_(struct nvkm_disp *base)
@@ -269,8 +270,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
        list_for_each_entry(outp, &disp->base.outp, head) {
                if ((outp->info.hasht & 0xff) == type &&
                    (outp->info.hashm & mask) == mask) {
-                       *data = nvbios_outp_match(bios, outp->info.hasht,
-                                                       outp->info.hashm,
+                       *data = nvbios_outp_match(bios, outp->info.hasht, mask,
                                                  ver, hdr, cnt, len, info);
                        if (!*data)
                                return NULL;
@@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
        if (!outp)
                return NULL;
 
+       *conf = (ctrl & 0x00000f00) >> 8;
        if (outp->info.location == 0) {
                switch (outp->info.type) {
                case DCB_OUTPUT_TMDS:
-                       *conf = (ctrl & 0x00000f00) >> 8;
                        if (*conf == 5)
                                *conf |= 0x0100;
                        break;
                case DCB_OUTPUT_LVDS:
-                       *conf = disp->sor.lvdsconf;
+                       *conf |= disp->sor.lvdsconf;
                        break;
-               case DCB_OUTPUT_DP:
-                       *conf = (ctrl & 0x00000f00) >> 8;
-                       break;
-               case DCB_OUTPUT_ANALOG:
                default:
-                       *conf = 0x00ff;
                        break;
                }
        } else {
@@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
                pclk = pclk / 2;
        }
 
-       data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+       data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+                                &ver, &hdr, &cnt, &len, &info2);
        if (data && id < 0xff) {
                data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
                if (data) {
@@ -430,6 +426,134 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
        return outp;
 }
 
+static bool
+nv50_disp_dptmds_war(struct nvkm_device *device)
+{
+       switch (device->chipset) {
+       case 0x94:
+       case 0x96:
+       case 0x98:
+       case 0xaa:
+       case 0xac:
+               return true;
+       default:
+               break;
+       }
+       return false;
+}
+
+static bool
+nv50_disp_dptmds_war_needed(struct nv50_disp *disp, struct dcb_output *outp)
+{
+       struct nvkm_device *device = disp->base.engine.subdev.device;
+       const u32 soff = __ffs(outp->or) * 0x800;
+       if (nv50_disp_dptmds_war(device) && outp->type == DCB_OUTPUT_TMDS) {
+               switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
+               case 0x00000000:
+               case 0x00030000:
+                       return true;
+               default:
+                       break;
+               }
+       }
+       return false;
+
+}
+
+static void
+nv50_disp_dptmds_war_2(struct nv50_disp *disp, struct dcb_output *outp)
+{
+       struct nvkm_device *device = disp->base.engine.subdev.device;
+       const u32 soff = __ffs(outp->or) * 0x800;
+
+       if (!nv50_disp_dptmds_war_needed(disp, outp))
+               return;
+
+       nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
+       nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
+       nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
+
+       nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
+       nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
+       nvkm_usec(device, 400, NVKM_DELAY);
+       nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
+       nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
+
+       if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
+               u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
+               u32  pu_pc = seqctl & 0x0000000f;
+               nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
+       }
+}
+
+static void
+nv50_disp_dptmds_war_3(struct nv50_disp *disp, struct dcb_output *outp)
+{
+       struct nvkm_device *device = disp->base.engine.subdev.device;
+       const u32 soff = __ffs(outp->or) * 0x800;
+       u32 sorpwr;
+
+       if (!nv50_disp_dptmds_war_needed(disp, outp))
+               return;
+
+       sorpwr = nvkm_rd32(device, 0x61c004 + soff);
+       if (sorpwr & 0x00000001) {
+               u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
+               u32  pd_pc = (seqctl & 0x00000f00) >> 8;
+               u32  pu_pc =  seqctl & 0x0000000f;
+
+               nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
+
+               nvkm_msec(device, 2000,
+                       if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+                               break;
+               );
+               nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
+               nvkm_msec(device, 2000,
+                       if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+                               break;
+               );
+
+               nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
+               nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
+       }
+
+       nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
+       nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
+
+       if (sorpwr & 0x00000001) {
+               nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
+       }
+}
+
+static void
+nv50_disp_update_sppll1(struct nv50_disp *disp)
+{
+       struct nvkm_device *device = disp->base.engine.subdev.device;
+       bool used = false;
+       int sor;
+
+       if (!nv50_disp_dptmds_war(device))
+               return;
+
+       for (sor = 0; sor < disp->func->sor.nr; sor++) {
+               u32 clksor = nvkm_rd32(device, 0x614300 + (sor * 0x800));
+               switch (clksor & 0x03000000) {
+               case 0x02000000:
+               case 0x03000000:
+                       used = true;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       if (used)
+               return;
+
+       nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
+}
+
 static void
 nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
 {
@@ -683,6 +807,8 @@ nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
 
        nvkm_mask(device, hreg, 0x0000000f, hval);
        nvkm_mask(device, oreg, mask, oval);
+
+       nv50_disp_dptmds_war_2(disp, &outp->info);
 }
 
 /* If programming a TMDS output on a SOR that can also be configured for
@@ -724,6 +850,7 @@ nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
 
        if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
                nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
+       nv50_disp_dptmds_war_3(disp, &outp->info);
 }
 
 void
@@ -771,6 +898,7 @@ nv50_disp_intr_supervisor(struct work_struct *work)
                                continue;
                        nv50_disp_intr_unk40_0(disp, head);
                }
+               nv50_disp_update_sppll1(disp);
        }
 
        nvkm_wr32(device, 0x610030, 0x80000000);
index aecebd8717e5ccbdea2f11903faee88474d2163f..1e1de6bfe85a25eb00d881a5a266cd6fc1b36e49 100644 (file)
@@ -68,6 +68,7 @@ struct nv50_disp_func_outp {
 
 struct nv50_disp_func {
        void (*intr)(struct nv50_disp *);
+       void (*intr_error)(struct nv50_disp *, int chid);
 
        const struct nvkm_event_func *uevent;
        void (*super)(struct work_struct *);
@@ -114,4 +115,5 @@ void gf119_disp_vblank_init(struct nv50_disp *, int);
 void gf119_disp_vblank_fini(struct nv50_disp *, int);
 void gf119_disp_intr(struct nv50_disp *);
 void gf119_disp_intr_supervisor(struct work_struct *);
+void gf119_disp_intr_error(struct nv50_disp *, int);
 #endif
index e9067ba4e1790d1e70a5cce27035f30234bc8856..4e983f6d7032e897e033085a196405d7c20d9fec 100644 (file)
@@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
 int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
                     struct nvkm_output **);
 int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
 
-int  gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
-                     struct nvkm_output **);
+int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+                    struct nvkm_output **);
+int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
+
+int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+                    struct nvkm_output **);
 #endif
index 2e2dc0641ef2b37b3c422e1432a7676b14b09c0b..2f0220b39f3469ba146a73efd94307c6b281d3b7 100644 (file)
@@ -80,7 +80,7 @@ gk104_disp_ovly_mthd_base = {
        }
 };
 
-static const struct nv50_disp_chan_mthd
+const struct nv50_disp_chan_mthd
 gk104_disp_ovly_chan_mthd = {
        .name = "Overlay",
        .addr = 0x001000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
new file mode 100644 (file)
index 0000000..97e2dd2
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_ovly_oclass = {
+       .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
+       .base.minver = 0,
+       .base.maxver = 0,
+       .ctor = nv50_disp_ovly_new,
+       .func = &gp104_disp_dmac_func,
+       .mthd = &gk104_disp_ovly_chan_mthd,
+       .chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
new file mode 100644 (file)
index 0000000..ac8fdd7
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gp100_disp_root = {
+       .init = gf119_disp_root_init,
+       .fini = gf119_disp_root_fini,
+       .dmac = {
+               &gp100_disp_core_oclass,
+               &gk110_disp_base_oclass,
+               &gk104_disp_ovly_oclass,
+       },
+       .pioc = {
+               &gk104_disp_oimm_oclass,
+               &gk104_disp_curs_oclass,
+       },
+};
+
+static int
+gp100_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+                   void *data, u32 size, struct nvkm_object **pobject)
+{
+       return nv50_disp_root_new_(&gp100_disp_root, disp, oclass,
+                                  data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gp100_disp_root_oclass = {
+       .base.oclass = GP100_DISP,
+       .base.minver = -1,
+       .base.maxver = -1,
+       .ctor = gp100_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
new file mode 100644 (file)
index 0000000..8443e04
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gp104_disp_root = {
+       .init = gf119_disp_root_init,
+       .fini = gf119_disp_root_fini,
+       .dmac = {
+               &gp104_disp_core_oclass,
+               &gp104_disp_base_oclass,
+               &gp104_disp_ovly_oclass,
+       },
+       .pioc = {
+               &gk104_disp_oimm_oclass,
+               &gk104_disp_curs_oclass,
+       },
+};
+
+static int
+gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+                   void *data, u32 size, struct nvkm_object **pobject)
+{
+       return nv50_disp_root_new_(&gp104_disp_root, disp, oclass,
+                                  data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gp104_disp_root_oclass = {
+       .base.oclass = GP104_DISP,
+       .base.minver = -1,
+       .base.maxver = -1,
+       .ctor = gp104_disp_root_new,
+};
index cb449ed8d92c45c6543a2249f60c471438d4ba66..ad00f1724b729c60ff749c8cf601533aa4f05d55 100644 (file)
@@ -40,4 +40,6 @@ extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
 extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
 extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
 extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp104_disp_root_oclass;
 #endif
index b4b41b13564344978517fad5c1a5622430fc1720..22706c0a54b53a9f95a1ad7e453217b7276cfcaf 100644 (file)
@@ -40,8 +40,7 @@ static int
 gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
        struct nvkm_device *device = outp->base.disp->engine.subdev.device;
-       const u32 loff = gf119_sor_loff(outp);
-       nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+       nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
        return 0;
 }
 
@@ -64,7 +63,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
        return 0;
 }
 
-static int
+int
 gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
                     int ln, int vs, int pe, int pc)
 {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
new file mode 100644 (file)
index 0000000..37790b2
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "outpdp.h"
+
+int
+gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+       struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+       const u32 soff = outp->base.or * 0x800;
+       const u32 data = 0x01010101 * pattern;
+       if (outp->base.info.sorconf.link & 1)
+               nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
+       else
+               nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
+       return 0;
+}
+
+static const struct nvkm_output_dp_func
+gm107_sor_dp_func = {
+       .pattern = gm107_sor_dp_pattern,
+       .lnk_pwr = g94_sor_dp_lnk_pwr,
+       .lnk_ctl = gf119_sor_dp_lnk_ctl,
+       .drv_ctl = gf119_sor_dp_drv_ctl,
+};
+
+int
+gm107_sor_dp_new(struct nvkm_disp *disp, int index,
+                struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+       return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
+}
index 2cfbef9c344ff68ba9e71805cbbe911aed4ddf5e..c44fa7ea672ab20192c8498e418018492ad93252 100644 (file)
@@ -56,19 +56,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
        return lane * 0x08;
 }
 
-static int
-gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
-{
-       struct nvkm_device *device = outp->base.disp->engine.subdev.device;
-       const u32 soff = gm200_sor_soff(outp);
-       const u32 data = 0x01010101 * pattern;
-       if (outp->base.info.sorconf.link & 1)
-               nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
-       else
-               nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
-       return 0;
-}
-
 static int
 gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
 {
@@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
 
 static const struct nvkm_output_dp_func
 gm200_sor_dp_func = {
-       .pattern = gm200_sor_dp_pattern,
+       .pattern = gm107_sor_dp_pattern,
        .lnk_pwr = gm200_sor_dp_lnk_pwr,
        .lnk_ctl = gf119_sor_dp_lnk_ctl,
        .drv_ctl = gm200_sor_dp_drv_ctl,
index 65e5d291ecda000ce02e711779b62be285e5c67f..98651a43bc12fe5bec383eb88fee6b228e7f1e42 100644 (file)
@@ -13,6 +13,7 @@ nvkm-y += nvkm/engine/fifo/gk20a.o
 nvkm-y += nvkm/engine/fifo/gm107.o
 nvkm-y += nvkm/engine/fifo/gm200.o
 nvkm-y += nvkm/engine/fifo/gm20b.o
+nvkm-y += nvkm/engine/fifo/gp100.o
 
 nvkm-y += nvkm/engine/fifo/chan.o
 nvkm-y += nvkm/engine/fifo/channv50.o
@@ -31,3 +32,4 @@ nvkm-y += nvkm/engine/fifo/gpfifogf100.o
 nvkm-y += nvkm/engine/fifo/gpfifogk104.o
 nvkm-y += nvkm/engine/fifo/gpfifogk110.o
 nvkm-y += nvkm/engine/fifo/gpfifogm200.o
+nvkm-y += nvkm/engine/fifo/gpfifogp100.o
index e06f4d46f802e9cb17015d70fbb3f31cdbe18446..230f64e5f73180dbc944a87296de2f801456316d 100644 (file)
@@ -27,4 +27,5 @@ int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
 extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
 extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass;
 extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gp100_fifo_gpfifo_oclass;
 #endif
index 743f3a189f285dd6234a54efe6e5d50009fbf25e..103c0afaaa6d1414169276bc7af6b672bb7bedf9 100644 (file)
@@ -329,7 +329,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
        }
 
        if (eu == NULL) {
-               enum nvkm_devidx engidx = nvkm_top_fault(device->top, unit);
+               enum nvkm_devidx engidx = nvkm_top_fault(device, unit);
                if (engidx < NVKM_SUBDEV_NR) {
                        const char *src = nvkm_subdev_name[engidx];
                        char *dst = en;
@@ -589,7 +589,6 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
        struct gk104_fifo *fifo = gk104_fifo(base);
        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
        struct nvkm_device *device = subdev->device;
-       struct nvkm_top *top = device->top;
        int engn, runl, pbid, ret, i, j;
        enum nvkm_devidx engidx;
        u32 *map;
@@ -608,7 +607,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
 
        /* Determine runlist configuration from topology device info. */
        i = 0;
-       while ((int)(engidx = nvkm_top_engine(top, i++, &runl, &engn)) >= 0) {
+       while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
                /* Determine which PBDMA handles requests for this engine. */
                for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
                        if (map[j] & (1 << runl)) {
@@ -617,8 +616,8 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
                        }
                }
 
-               nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d\n",
-                          engn, runl, pbid);
+               nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
+                          engn, runl, pbid, nvkm_subdev_name[engidx]);
 
                fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
                fifo->engine[engn].runl = runl;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
new file mode 100644 (file)
index 0000000..eff83f7
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct nvkm_enum
+gp100_fifo_fault_engine[] = {
+       { 0x01, "DISPLAY" },
+       { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+       { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+       { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+       { 0x06, "HOST0" },
+       { 0x07, "HOST1" },
+       { 0x08, "HOST2" },
+       { 0x09, "HOST3" },
+       { 0x0a, "HOST4" },
+       { 0x0b, "HOST5" },
+       { 0x0c, "HOST6" },
+       { 0x0d, "HOST7" },
+       { 0x0e, "HOST8" },
+       { 0x0f, "HOST9" },
+       { 0x10, "HOST10" },
+       { 0x13, "PERF" },
+       { 0x17, "PMU" },
+       { 0x18, "PTP" },
+       { 0x1f, "PHYSICAL" },
+       {}
+};
+
+static const struct gk104_fifo_func
+gp100_fifo = {
+       .fault.engine = gp100_fifo_fault_engine,
+       .fault.reason = gk104_fifo_fault_reason,
+       .fault.hubclient = gk104_fifo_fault_hubclient,
+       .fault.gpcclient = gk104_fifo_fault_gpcclient,
+       .chan = {
+               &gp100_fifo_gpfifo_oclass,
+               NULL
+       },
+};
+
+int
+gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+       return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
new file mode 100644 (file)
index 0000000..1530a92
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+const struct nvkm_fifo_chan_oclass
+gp100_fifo_gpfifo_oclass = {
+       .base.oclass = PASCAL_CHANNEL_GPFIFO_A,
+       .base.minver = 0,
+       .base.maxver = 0,
+       .ctor = gk104_fifo_gpfifo_new,
+};
index 290ed0db8047146b6f2015d39e9373908d064196..f1c494182248a5e6a65519a7b394dabff9f9ed72 100644 (file)
@@ -31,6 +31,7 @@ nvkm-y += nvkm/engine/gr/gk20a.o
 nvkm-y += nvkm/engine/gr/gm107.o
 nvkm-y += nvkm/engine/gr/gm200.o
 nvkm-y += nvkm/engine/gr/gm20b.o
+nvkm-y += nvkm/engine/gr/gp100.o
 
 nvkm-y += nvkm/engine/gr/ctxnv40.o
 nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -48,3 +49,4 @@ nvkm-y += nvkm/engine/gr/ctxgk20a.o
 nvkm-y += nvkm/engine/gr/ctxgm107.o
 nvkm-y += nvkm/engine/gr/ctxgm200.o
 nvkm-y += nvkm/engine/gr/ctxgm20b.o
+nvkm-y += nvkm/engine/gr/ctxgp100.o
index b02d8f50ea6aff4549a27b3e3917da5b4731b883..bc77eea351a5c78351feeadbe244591fa6b6810a 100644 (file)
@@ -1240,7 +1240,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
        const struct gf100_grctx_func *grctx = gr->func->grctx;
        u32 idle_timeout;
 
-       nvkm_mc_unk260(device->mc, 0);
+       nvkm_mc_unk260(device, 0);
 
        gf100_gr_mmio(gr, grctx->hub);
        gf100_gr_mmio(gr, grctx->gpc);
@@ -1264,7 +1264,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
        gf100_gr_icmd(gr, grctx->icmd);
        nvkm_wr32(device, 0x404154, idle_timeout);
        gf100_gr_mthd(gr, grctx->mthd);
-       nvkm_mc_unk260(device->mc, 1);
+       nvkm_mc_unk260(device, 1);
 }
 
 int
index ac895edce164558fff1893405aef32f5cbf486ff..52048b5a527489b5d960c24a279f9d1972f23c10 100644 (file)
@@ -101,6 +101,8 @@ void gm200_grctx_generate_405b60(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gm20b_grctx;
 
+extern const struct gf100_grctx_func gp100_grctx;
+
 /* context init value lists */
 
 extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
index f521de11a299f3cdaac3241fbb62f101ce653dc1..c925ade5880e708d6a2befde5cb2f26d02470fa2 100644 (file)
@@ -226,7 +226,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
        u32 idle_timeout;
        int i;
 
-       nvkm_mc_unk260(device->mc, 0);
+       nvkm_mc_unk260(device, 0);
 
        gf100_gr_mmio(gr, grctx->hub);
        gf100_gr_mmio(gr, grctx->gpc);
@@ -253,7 +253,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
        gf100_gr_icmd(gr, grctx->icmd);
        nvkm_wr32(device, 0x404154, idle_timeout);
        gf100_gr_mthd(gr, grctx->mthd);
-       nvkm_mc_unk260(device->mc, 1);
+       nvkm_mc_unk260(device, 1);
 }
 
 const struct gf100_grctx_func
index 9ba337778ef550ac0d7ab270f8f8f32105dba54b..c46b3fdf720315984a428beecdbf7d2976dba5c5 100644 (file)
@@ -950,7 +950,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
        u32 idle_timeout;
        int i;
 
-       nvkm_mc_unk260(device->mc, 0);
+       nvkm_mc_unk260(device, 0);
 
        gf100_gr_mmio(gr, grctx->hub);
        gf100_gr_mmio(gr, grctx->gpc);
@@ -979,7 +979,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
        gf100_gr_icmd(gr, grctx->icmd);
        nvkm_wr32(device, 0x404154, idle_timeout);
        gf100_gr_mthd(gr, grctx->mthd);
-       nvkm_mc_unk260(device->mc, 1);
+       nvkm_mc_unk260(device, 1);
 
        nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
        nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
new file mode 100644 (file)
index 0000000..3d1ae7d
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+#include <subdev/fb.h>
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+static void
+gp100_grctx_generate_pagepool(struct gf100_grctx *info)
+{
+       const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+       const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+       const int s = 8;
+       const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+       mmio_refn(info, 0x40800c, 0x00000000, s, b);
+       mmio_wr32(info, 0x408010, 0x80000000);
+       mmio_refn(info, 0x419004, 0x00000000, s, b);
+       mmio_wr32(info, 0x419008, 0x00000000);
+}
+
+static void
+gp100_grctx_generate_attrib(struct gf100_grctx *info)
+{
+       struct gf100_gr *gr = info->gr;
+       const struct gf100_grctx_func *grctx = gr->func->grctx;
+       const u32  alpha = grctx->alpha_nr;
+       const u32 attrib = grctx->attrib_nr;
+       const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
+       const u32   size = roundup(gr->tpc_total * pertpc, 0x80);
+       const u32 access = NV_MEM_ACCESS_RW;
+       const int s = 12;
+       const int b = mmio_vram(info, size, (1 << s), access);
+       const int max_batches = 0xffff;
+       u32 ao = 0;
+       u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
+       int gpc, ppc, n = 0;
+
+       mmio_refn(info, 0x418810, 0x80000000, s, b);
+       mmio_refn(info, 0x419848, 0x10000000, s, b);
+       mmio_refn(info, 0x419c2c, 0x10000000, s, b);
+       mmio_refn(info, 0x419b00, 0x00000000, s, b);
+       mmio_wr32(info, 0x419b04, 0x80000000 | size >> 7);
+       mmio_wr32(info, 0x405830, attrib);
+       mmio_wr32(info, 0x40585c, alpha);
+       mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
+
+       for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+               for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
+                       const u32 as =  alpha * gr->ppc_tpc_nr[gpc][ppc];
+                       const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
+                       const u32 u = 0x418ea0 + (n * 0x04);
+                       const u32 o = PPC_UNIT(gpc, ppc, 0);
+                       if (!(gr->ppc_mask[gpc] & (1 << ppc)))
+                               continue;
+                       mmio_wr32(info, o + 0xc0, bs);
+                       mmio_wr32(info, o + 0xf4, bo);
+                       mmio_wr32(info, o + 0xf0, bs);
+                       bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+                       mmio_wr32(info, o + 0xe4, as);
+                       mmio_wr32(info, o + 0xf8, ao);
+                       ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+                       mmio_wr32(info, u, bs);
+               }
+       }
+
+       mmio_wr32(info, 0x418eec, 0x00000000);
+       mmio_wr32(info, 0x41befc, 0x00000000);
+}
+
+static void
+gp100_grctx_generate_405b60(struct gf100_gr *gr)
+{
+       struct nvkm_device *device = gr->base.engine.subdev.device;
+       const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
+       u32 dist[TPC_MAX / 4] = {};
+       u32 gpcs[GPC_MAX * 2] = {};
+       u8  tpcnr[GPC_MAX];
+       int tpc, gpc, i;
+
+       memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+
+       /* won't result in the same distribution as the binary driver where
+        * some of the gpcs have more tpcs than others, but this shall do
+        * for the moment.  the code for earlier gpus has this issue too.
+        */
+       for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
+               do {
+                       gpc = (gpc + 1) % gr->gpc_nr;
+               } while(!tpcnr[gpc]);
+               tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+               dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
+               gpcs[gpc + (gr->gpc_nr * (tpc / 4))] |= i << (tpc * 8);
+       }
+
+       for (i = 0; i < dist_nr; i++)
+               nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
+       for (i = 0; i < gr->gpc_nr * 2; i++)
+               nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
+}
+
+static void
+gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+       struct nvkm_device *device = gr->base.engine.subdev.device;
+       const struct gf100_grctx_func *grctx = gr->func->grctx;
+       u32 idle_timeout, tmp;
+       int i;
+
+       gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+       idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
+
+       grctx->pagepool(info);
+       grctx->bundle(info);
+       grctx->attrib(info);
+       grctx->unkn(gr);
+
+       gm200_grctx_generate_tpcid(gr);
+       gf100_grctx_generate_r406028(gr);
+       gk104_grctx_generate_r418bb8(gr);
+
+       for (i = 0; i < 8; i++)
+               nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+       nvkm_wr32(device, 0x406500, 0x00000000);
+
+       nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+       for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+               tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 5);
+       nvkm_wr32(device, 0x4041c4, tmp);
+
+       gp100_grctx_generate_405b60(gr);
+
+       gf100_gr_icmd(gr, gr->fuc_bundle);
+       nvkm_wr32(device, 0x404154, idle_timeout);
+       gf100_gr_mthd(gr, gr->fuc_method);
+}
+
+const struct gf100_grctx_func
+gp100_grctx = {
+       .main  = gp100_grctx_generate_main,
+       .unkn  = gk104_grctx_generate_unkn,
+       .bundle = gm107_grctx_generate_bundle,
+       .bundle_size = 0x3000,
+       .bundle_min_gpm_fifo_depth = 0x180,
+       .bundle_token_limit = 0x1080,
+       .pagepool = gp100_grctx_generate_pagepool,
+       .pagepool_size = 0x20000,
+       .attrib = gp100_grctx_generate_attrib,
+       .attrib_nr_max = 0x660,
+       .attrib_nr = 0x440,
+       .alpha_nr_max = 0xc00,
+       .alpha_nr = 0x800,
+};
index 9513badb8220e8dfa01f4fa5f129692507f90c01..157919c788e6f72261d6989a352c4f6fddf3a014 100644 (file)
@@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
 }
 
 static const struct nvkm_enum gf100_mp_warp_error[] = {
-       { 0x00, "NO_ERROR" },
-       { 0x01, "STACK_MISMATCH" },
+       { 0x01, "STACK_ERROR" },
+       { 0x02, "API_STACK_ERROR" },
+       { 0x03, "RET_EMPTY_STACK_ERROR" },
+       { 0x04, "PC_WRAP" },
        { 0x05, "MISALIGNED_PC" },
-       { 0x08, "MISALIGNED_GPR" },
-       { 0x09, "INVALID_OPCODE" },
-       { 0x0d, "GPR_OUT_OF_BOUNDS" },
-       { 0x0e, "MEM_OUT_OF_BOUNDS" },
-       { 0x0f, "UNALIGNED_MEM_ACCESS" },
+       { 0x06, "PC_OVERFLOW" },
+       { 0x07, "MISALIGNED_IMMC_ADDR" },
+       { 0x08, "MISALIGNED_REG" },
+       { 0x09, "ILLEGAL_INSTR_ENCODING" },
+       { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
+       { 0x0b, "ILLEGAL_INSTR_PARAM" },
+       { 0x0c, "INVALID_CONST_ADDR" },
+       { 0x0d, "OOR_REG" },
+       { 0x0e, "OOR_ADDR" },
+       { 0x0f, "MISALIGNED_ADDR" },
        { 0x10, "INVALID_ADDR_SPACE" },
-       { 0x11, "INVALID_PARAM" },
+       { 0x11, "ILLEGAL_INSTR_PARAM2" },
+       { 0x12, "INVALID_CONST_ADDR_LDC" },
+       { 0x13, "GEOMETRY_SM_ERROR" },
+       { 0x14, "DIVERGENT" },
+       { 0x15, "WARP_EXIT" },
        {}
 };
 
 static const struct nvkm_bitfield gf100_mp_global_error[] = {
+       { 0x00000001, "SM_TO_SM_FAULT" },
+       { 0x00000002, "L1_ERROR" },
        { 0x00000004, "MULTIPLE_WARP_ERRORS" },
-       { 0x00000008, "OUT_OF_STACK_SPACE" },
+       { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
+       { 0x00000010, "BPT_INT" },
+       { 0x00000020, "BPT_PAUSE" },
+       { 0x00000040, "SINGLE_STEP_COMPLETE" },
+       { 0x20000000, "ECC_SEC_ERROR" },
+       { 0x40000000, "ECC_DED_ERROR" },
+       { 0x80000000, "TIMEOUT" },
        {}
 };
 
@@ -1438,24 +1457,30 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
        struct nvkm_device *device = subdev->device;
        struct nvkm_secboot *sb = device->secboot;
        int i;
+       int ret = 0;
 
        if (gr->firmware) {
                /* load fuc microcode */
-               nvkm_mc_unk260(device->mc, 0);
+               nvkm_mc_unk260(device, 0);
 
                /* securely-managed falcons must be reset using secure boot */
                if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
-                       nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
+                       ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
                else
                        gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c,
                                         &gr->fuc409d);
+               if (ret)
+                       return ret;
+
                if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
-                       nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
+                       ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
                else
                        gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac,
                                         &gr->fuc41ad);
+               if (ret)
+                       return ret;
 
-               nvkm_mc_unk260(device->mc, 1);
+               nvkm_mc_unk260(device, 1);
 
                /* start both of them running */
                nvkm_wr32(device, 0x409840, 0xffffffff);
@@ -1557,7 +1582,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
        }
 
        /* load HUB microcode */
-       nvkm_mc_unk260(device->mc, 0);
+       nvkm_mc_unk260(device, 0);
        nvkm_wr32(device, 0x4091c0, 0x01000000);
        for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
                nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
@@ -1580,7 +1605,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
                        nvkm_wr32(device, 0x41a188, i >> 6);
                nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
        }
-       nvkm_mc_unk260(device->mc, 1);
+       nvkm_mc_unk260(device, 1);
 
        /* load register lists */
        gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
index 2b98abdb92703bec9b56f6834703c77c8d33510b..268b8d60ff73c286215e541be987c204e16a3469 100644 (file)
@@ -292,4 +292,6 @@ extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
 extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
 extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
 void gm107_gr_init_bios(struct gf100_gr *);
+
+void gm200_gr_init_gpc_mmu(struct gf100_gr *);
 #endif
index 4ca8ed15191c6ea158ef1bf8ef1cb6ed4a770661..de8b806b88fd9dfd8cc1e1b864a3974db04ffbf7 100644 (file)
@@ -361,6 +361,5 @@ gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
        if (ret)
                return ret;
 
-
        return 0;
 }
index 4dfa4513bb6c5a0d8ba45ce615f221d3883d06f1..6435f125757232a884c72477c44b19eef84f7d56 100644 (file)
@@ -38,7 +38,7 @@ gm200_gr_rops(struct gf100_gr *gr)
        return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
 }
 
-static void
+void
 gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
 {
        struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
new file mode 100644 (file)
index 0000000..26ad79d
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
+{
+       struct nvkm_device *device = gr->base.engine.subdev.device;
+       /*XXX: otherwise identical to gm200 aside from mask.. do everywhere? */
+       const u32 fbp_count = nvkm_rd32(device, 0x12006c) & 0x0000000f;
+       nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+       nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+static int
+gp100_gr_init(struct gf100_gr *gr)
+{
+       struct nvkm_device *device = gr->base.engine.subdev.device;
+       const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+       u32 data[TPC_MAX / 8] = {};
+       u8  tpcnr[GPC_MAX];
+       int gpc, tpc, rop;
+       int i;
+
+       gr->func->init_gpc_mmu(gr);
+
+       gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+
+       nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
+
+       memset(data, 0x00, sizeof(data));
+       memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+       for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+               do {
+                       gpc = (gpc + 1) % gr->gpc_nr;
+               } while (!tpcnr[gpc]);
+               tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+               data[i / 8] |= tpc << ((i % 8) * 4);
+       }
+
+       nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+       nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+       nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+       nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+       for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+                         gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+                                                        gr->tpc_total);
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+       }
+
+       nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+       nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+       nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+
+       gr->func->init_rop_active_fbps(gr);
+
+       nvkm_wr32(device, 0x400500, 0x00010001);
+       nvkm_wr32(device, 0x400100, 0xffffffff);
+       nvkm_wr32(device, 0x40013c, 0xffffffff);
+       nvkm_wr32(device, 0x400124, 0x00000002);
+       nvkm_wr32(device, 0x409c24, 0x000f0002);
+       nvkm_wr32(device, 0x405848, 0xc0000000);
+       nvkm_mask(device, 0x40584c, 0x00000000, 0x00000001);
+       nvkm_wr32(device, 0x404000, 0xc0000000);
+       nvkm_wr32(device, 0x404600, 0xc0000000);
+       nvkm_wr32(device, 0x408030, 0xc0000000);
+       nvkm_wr32(device, 0x404490, 0xc0000000);
+       nvkm_wr32(device, 0x406018, 0xc0000000);
+       nvkm_wr32(device, 0x407020, 0x40000000);
+       nvkm_wr32(device, 0x405840, 0xc0000000);
+       nvkm_wr32(device, 0x405844, 0x00ffffff);
+       nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+       nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000);
+       nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000);
+
+       gr->func->init_ppc_exceptions(gr);
+
+       for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+               for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105);
+               }
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+       }
+
+       for (rop = 0; rop < gr->rop_nr; rop++) {
+               nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+               nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+               nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+               nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
+       }
+
+       nvkm_wr32(device, 0x400108, 0xffffffff);
+       nvkm_wr32(device, 0x400138, 0xffffffff);
+       nvkm_wr32(device, 0x400118, 0xffffffff);
+       nvkm_wr32(device, 0x400130, 0xffffffff);
+       nvkm_wr32(device, 0x40011c, 0xffffffff);
+       nvkm_wr32(device, 0x400134, 0xffffffff);
+
+       gf100_gr_zbc_init(gr);
+
+       return gf100_gr_init_ctxctl(gr);
+}
+
+static const struct gf100_gr_func
+gp100_gr = {
+       .init = gp100_gr_init,
+       .init_gpc_mmu = gm200_gr_init_gpc_mmu,
+       .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+       .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+       .rops = gm200_gr_rops,
+       .ppc_nr = 2,
+       .grctx = &gp100_grctx,
+       .sclass = {
+               { -1, -1, FERMI_TWOD_A },
+               { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+               { -1, -1, PASCAL_A, &gf100_fermi },
+               { -1, -1, PASCAL_COMPUTE_A },
+               {}
+       }
+};
+
+int
+gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+       return gm200_gr_new_(&gp100_gr, device, index, pgr);
+}
index e15b9627b07e12cacca0bb671813d9d9e3b58628..f3c30b2a788e8534fe72c23729779afd5e39d573 100644 (file)
 #include <subdev/bios.h>
 #include <subdev/bios/bmp.h>
 #include <subdev/bios/bit.h>
+#include <subdev/bios/image.h>
+
+static bool
+nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
+{
+       u32 p = *addr;
+
+       if (*addr > bios->image0_size && bios->imaged_addr) {
+               *addr -= bios->image0_size;
+               *addr += bios->imaged_addr;
+       }
+
+       if (unlikely(*addr + size >= bios->size)) {
+               nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
+               return false;
+       }
+
+       return true;
+}
+
+u8
+nvbios_rd08(struct nvkm_bios *bios, u32 addr)
+{
+       if (likely(nvbios_addr(bios, &addr, 1)))
+               return bios->data[addr];
+       return 0x00;
+}
+
+u16
+nvbios_rd16(struct nvkm_bios *bios, u32 addr)
+{
+       if (likely(nvbios_addr(bios, &addr, 2)))
+               return get_unaligned_le16(&bios->data[addr]);
+       return 0x0000;
+}
+
+u32
+nvbios_rd32(struct nvkm_bios *bios, u32 addr)
+{
+       if (likely(nvbios_addr(bios, &addr, 4)))
+               return get_unaligned_le32(&bios->data[addr]);
+       return 0x00000000;
+}
 
 u8
 nvbios_checksum(const u8 *data, int size)
@@ -100,8 +143,9 @@ int
 nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
 {
        struct nvkm_bios *bios;
+       struct nvbios_image image;
        struct bit_entry bit_i;
-       int ret;
+       int ret, idx = 0;
 
        if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
                return -ENOMEM;
@@ -111,6 +155,19 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
        if (ret)
                return ret;
 
+       /* Some tables have weird pointers that need adjustment before
+        * they're dereferenced.  I'm not entirely sure why...
+        */
+       if (nvbios_image(bios, idx++, &image)) {
+               bios->image0_size = image.size;
+               while (nvbios_image(bios, idx++, &image)) {
+                       if (image.type == 0xe0) {
+                               bios->imaged_addr = image.base;
+                               break;
+                       }
+               }
+       }
+
        /* detect type of vbios we're dealing with */
        bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
                                          "\xff\x7f""NV\0", 5);
index a5e92135cd778682cbf3b06889e536aa25d07bb3..9efb1b48cd54d6d93404da7fedab525e4fb75417 100644 (file)
@@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
 {
        u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
        if (data) {
-               info->match     = nvbios_rd16(bios, data + 0x00);
+               info->proto     = nvbios_rd08(bios, data + 0x00);
+               info->flags     = nvbios_rd16(bios, data + 0x01);
                info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
                info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
        }
@@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
 }
 
 u16
-nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
+nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
 {
        u16 data, idx = 0;
        while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
-               if (info->match == type)
+               if ((info->proto == proto || info->proto == 0xff) &&
+                   (info->flags == flags))
                        break;
        }
        return data;
index 05332476354abd4f49ddf9da6c47b2714a01018b..d89e78c4e6890c43f361d1f1acb3ad252a57b844 100644 (file)
@@ -40,6 +40,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
                                case 0x30:
                                case 0x40:
                                case 0x41:
+                               case 0x42:
                                        *hdr = nvbios_rd08(bios, data + 0x01);
                                        *len = nvbios_rd08(bios, data + 0x02);
                                        *cnt = nvbios_rd08(bios, data + 0x03);
@@ -70,6 +71,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
                        break;
                case 0x40:
                case 0x41:
+               case 0x42:
                        *hdr = nvbios_rd08(bios, data + 0x04);
                        *cnt = 0;
                        *len = 0;
@@ -109,6 +111,7 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
                        break;
                case 0x40:
                case 0x41:
+               case 0x42:
                        info->flags     = nvbios_rd08(bios, data + 0x04);
                        info->script[0] = nvbios_rd16(bios, data + 0x05);
                        info->script[1] = nvbios_rd16(bios, data + 0x07);
@@ -180,6 +183,11 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
                        info->pe    = nvbios_rd08(bios, data + 0x02);
                        info->tx_pu = nvbios_rd08(bios, data + 0x03);
                        break;
+               case 0x42:
+                       info->dc    = nvbios_rd08(bios, data + 0x00);
+                       info->pe    = nvbios_rd08(bios, data + 0x01);
+                       info->tx_pu = nvbios_rd08(bios, data + 0x02);
+                       break;
                default:
                        data = 0x0000;
                        break;
index 74b14cf093082a1208ee707f1df35a04c822cde0..1dbff7aeafec2ecf2006d588e3656c70071fb1c2 100644 (file)
@@ -68,11 +68,16 @@ nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
 bool
 nvbios_image(struct nvkm_bios *bios, int idx, struct nvbios_image *image)
 {
+       u32 imaged_addr = bios->imaged_addr;
        memset(image, 0x00, sizeof(*image));
+       bios->imaged_addr = 0;
        do {
                image->base += image->size;
-               if (image->last || !nvbios_imagen(bios, image))
+               if (image->last || !nvbios_imagen(bios, image)) {
+                       bios->imaged_addr = imaged_addr;
                        return false;
+               }
        } while(idx--);
+       bios->imaged_addr = imaged_addr;
        return true;
 }
index 91a7dc56e406980ccf225fb76f232a9ab1a983fd..2ca23a9157ab9345af8d99681b204ab4cc6f52bd 100644 (file)
@@ -77,15 +77,17 @@ g84_pll_mapping[] = {
        {}
 };
 
-static u16
+static u32
 pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
        struct bit_entry bit_C;
-       u16 data = 0x0000;
+       u32 data = 0x0000;
 
        if (!bit_entry(bios, 'C', &bit_C)) {
                if (bit_C.version == 1 && bit_C.length >= 10)
                        data = nvbios_rd16(bios, bit_C.offset + 8);
+               if (bit_C.version == 2 && bit_C.length >= 4)
+                       data = nvbios_rd32(bios, bit_C.offset + 0);
                if (data) {
                        *ver = nvbios_rd08(bios, data + 0);
                        *hdr = nvbios_rd08(bios, data + 1);
@@ -137,12 +139,12 @@ pll_map(struct nvkm_bios *bios)
        }
 }
 
-static u16
+static u32
 pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
 {
        struct pll_mapping *map;
        u8  hdr, cnt;
-       u16 data;
+       u32 data;
 
        data = pll_limits_table(bios, ver, &hdr, &cnt, len);
        if (data && *ver >= 0x30) {
@@ -160,7 +162,7 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
        map = pll_map(bios);
        while (map && map->reg) {
                if (map->reg == reg && *ver >= 0x20) {
-                       u16 addr = (data += hdr);
+                       u32 addr = (data += hdr);
                        *type = map->type;
                        while (cnt--) {
                                if (nvbios_rd32(bios, data) == map->reg)
@@ -179,12 +181,12 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
        return 0x0000;
 }
 
-static u16
+static u32
 pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
 {
        struct pll_mapping *map;
        u8  hdr, cnt;
-       u16 data;
+       u32 data;
 
        data = pll_limits_table(bios, ver, &hdr, &cnt, len);
        if (data && *ver >= 0x30) {
@@ -202,7 +204,7 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
        map = pll_map(bios);
        while (map && map->reg) {
                if (map->type == type && *ver >= 0x20) {
-                       u16 addr = (data += hdr);
+                       u32 addr = (data += hdr);
                        *reg = map->reg;
                        while (cnt--) {
                                if (nvbios_rd32(bios, data) == map->reg)
@@ -228,7 +230,7 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
        struct nvkm_device *device = subdev->device;
        u8  ver, len;
        u32 reg = type;
-       u16 data;
+       u32 data;
 
        if (type > PLL_MAX) {
                reg  = type;
index c268e5afe852f35af49f9e65c80556671802d10a..b4a308f3cf7b0c461515a9904e62280b245d70a9 100644 (file)
 #include <subdev/bios/image.h>
 #include <subdev/bios/pmu.h>
 
-static u32
-weirdo_pointer(struct nvkm_bios *bios, u32 data)
-{
-       struct nvbios_image image;
-       int idx = 0;
-       if (nvbios_image(bios, idx++, &image)) {
-               data -= image.size;
-               while (nvbios_image(bios, idx++, &image)) {
-                       if (image.type == 0xe0)
-                               return image.base + data;
-               }
-       }
-       return 0;
-}
-
 u32
 nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
@@ -50,7 +35,7 @@ nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
        if (!bit_entry(bios, 'p', &bit_p)) {
                if (bit_p.version == 2 && bit_p.length >= 4)
                        data = nvbios_rd32(bios, bit_p.offset + 0x00);
-               if ((data = weirdo_pointer(bios, data))) {
+               if (data) {
                        *ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
                        *hdr = nvbios_rd08(bios, data + 0x01);
                        *len = nvbios_rd08(bios, data + 0x02);
@@ -97,8 +82,7 @@ nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info)
        u32 data;
        memset(info, 0x00, sizeof(*info));
        while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
-               if ( pmuE.type == type &&
-                   (data = weirdo_pointer(bios, pmuE.data))) {
+               if (pmuE.type == type && (data = pmuE.data)) {
                        info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
                        info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
                        info->boot_addr     = data + 0x30;
index d0ae7454764ea4097e9be1386b268414ba68a11d..b57c370c725d8c56533b0f0625c91629b0d8a4f5 100644 (file)
@@ -30,11 +30,11 @@ nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
                u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
 {
        struct bit_entry bit_P;
-       u16 rammap = 0x0000;
+       u32 rammap = 0x0000;
 
        if (!bit_entry(bios, 'P', &bit_P)) {
                if (bit_P.version == 2)
-                       rammap = nvbios_rd16(bios, bit_P.offset + 4);
+                       rammap = nvbios_rd32(bios, bit_P.offset + 4);
 
                if (rammap) {
                        *ver = nvbios_rd08(bios, rammap + 0);
@@ -61,7 +61,7 @@ nvbios_rammapEe(struct nvkm_bios *bios, int idx,
                u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
        u8  snr, ssz;
-       u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+       u32 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
        if (rammap && idx < *cnt) {
                rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
                *hdr = *len;
index 78c449b417b74c18b6d03c0eacb9b1b4894026a9..89d5543118cff139e7736d2d69dcab0c12984041 100644 (file)
@@ -99,7 +99,7 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
 {
        struct nvkm_device *device = clk->base.subdev.device;
        u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
-       u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
+       u32 sclk, sctl, sdiv = 2;
 
        switch (ssrc & 0x00000003) {
        case 0:
@@ -109,13 +109,21 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
        case 2:
                return 100000;
        case 3:
-               if (sctl & 0x80000000) {
-                       u32 sclk = read_vco(clk, dsrc + (doff * 4));
-                       u32 sdiv = (sctl & 0x0000003f) + 2;
-                       return (sclk * 2) / sdiv;
+               sclk = read_vco(clk, dsrc + (doff * 4));
+
+               /* Memclk has doff of 0 despite its alt. location */
+               if (doff <= 2) {
+                       sctl = nvkm_rd32(device, dctl + (doff * 4));
+
+                       if (sctl & 0x80000000) {
+                               if (ssrc & 0x100)
+                                       sctl >>= 8;
+
+                               sdiv = (sctl & 0x3f) + 2;
+                       }
                }
 
-               return read_vco(clk, dsrc + (doff * 4));
+               return (sclk * 2) / sdiv;
        default:
                return 0;
        }
@@ -366,11 +374,17 @@ gf100_clk_prog_2(struct gf100_clk *clk, int idx)
                if (info->coef) {
                        nvkm_wr32(device, addr + 0x04, info->coef);
                        nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+
+                       /* Test PLL lock */
+                       nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
                        nvkm_msec(device, 2000,
                                if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
                                        break;
                        );
-                       nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
+                       nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
+
+                       /* Enable sync mode */
+                       nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
                }
        }
 }
index 975c401bccab8c4e3ab69e5daec2de65ffd09ea6..06bc0d2d6ae195bb6c9cf5f88141bfef8e572a31 100644 (file)
@@ -393,11 +393,17 @@ gk104_clk_prog_2(struct gk104_clk *clk, int idx)
        if (info->coef) {
                nvkm_wr32(device, addr + 0x04, info->coef);
                nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+
+               /* Test PLL lock */
+               nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
                nvkm_msec(device, 2000,
                        if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
                                break;
                );
-               nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
+               nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
+
+               /* Enable sync mode */
+               nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
        }
 }
 
index 5f0ee24e31b8d5abd4240c054383b92dea73edc6..218893e3e5f92b95efa613e2f4269b4d19d0e8bb 100644 (file)
 #include <core/tegra.h>
 #include <subdev/timer.h>
 
-#define KHZ (1000)
-#define MHZ (KHZ * 1000)
-
-#define MASK(w)        ((1 << w) - 1)
-
-#define GPCPLL_CFG             (SYS_GPCPLL_CFG_BASE + 0)
-#define GPCPLL_CFG_ENABLE      BIT(0)
-#define GPCPLL_CFG_IDDQ                BIT(1)
-#define GPCPLL_CFG_LOCK_DET_OFF        BIT(4)
-#define GPCPLL_CFG_LOCK                BIT(17)
-
-#define GPCPLL_COEFF           (SYS_GPCPLL_CFG_BASE + 4)
-#define GPCPLL_COEFF_M_SHIFT   0
-#define GPCPLL_COEFF_M_WIDTH   8
-#define GPCPLL_COEFF_N_SHIFT   8
-#define GPCPLL_COEFF_N_WIDTH   8
-#define GPCPLL_COEFF_P_SHIFT   16
-#define GPCPLL_COEFF_P_WIDTH   6
-
-#define GPCPLL_CFG2                    (SYS_GPCPLL_CFG_BASE + 0xc)
-#define GPCPLL_CFG2_SETUP2_SHIFT       16
-#define GPCPLL_CFG2_PLL_STEPA_SHIFT    24
-
-#define GPCPLL_CFG3                    (SYS_GPCPLL_CFG_BASE + 0x18)
-#define GPCPLL_CFG3_PLL_STEPB_SHIFT    16
-
-#define GPC_BCASE_GPCPLL_CFG_BASE              0x00132800
-#define GPCPLL_NDIV_SLOWDOWN                   (SYS_GPCPLL_CFG_BASE + 0x1c)
-#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT     0
-#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT    8
-#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT    16
-#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT  22
-#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT  31
-
-#define SEL_VCO                                (SYS_GPCPLL_CFG_BASE + 0x100)
-#define SEL_VCO_GPC2CLK_OUT_SHIFT      0
-
-#define GPC2CLK_OUT                    (SYS_GPCPLL_CFG_BASE + 0x250)
-#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH        1
-#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT        31
-#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
-#define GPC2CLK_OUT_VCODIV_WIDTH       6
-#define GPC2CLK_OUT_VCODIV_SHIFT       8
-#define GPC2CLK_OUT_VCODIV1            0
-#define GPC2CLK_OUT_VCODIV_MASK                (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
-                                       GPC2CLK_OUT_VCODIV_SHIFT)
-#define GPC2CLK_OUT_BYPDIV_WIDTH       6
-#define GPC2CLK_OUT_BYPDIV_SHIFT       0
-#define GPC2CLK_OUT_BYPDIV31           0x3c
-#define GPC2CLK_OUT_INIT_MASK  ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
-               GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
-               | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
-               | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
-#define GPC2CLK_OUT_INIT_VAL   ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
-               GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
-               | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
-               | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
-
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG  (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0)
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT    24
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
-           (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
-
 static const u8 _pl_to_div[] = {
 /* PL:   0, 1, 2, 3, 4, 5, 6,  7,  8,  9, 10, 11, 12, 13, 14 */
 /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
@@ -124,7 +61,7 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
        .min_pl = 1, .max_pl = 32,
 };
 
-static void
+void
 gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
 {
        struct nvkm_device *device = clk->base.subdev.device;
@@ -136,20 +73,33 @@ gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
        pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
 }
 
-static u32
-gk20a_pllg_calc_rate(struct gk20a_clk *clk)
+void
+gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
+{
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 val;
+
+       val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT;
+       val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
+       val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT;
+       nvkm_wr32(device, GPCPLL_COEFF, val);
+}
+
+u32
+gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll)
 {
        u32 rate;
        u32 divider;
 
-       rate = clk->parent_rate * clk->pll.n;
-       divider = clk->pll.m * clk->pl_to_div(clk->pll.pl);
+       rate = clk->parent_rate * pll->n;
+       divider = pll->m * clk->pl_to_div(pll->pl);
 
        return rate / divider / 2;
 }
 
-static int
-gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
+int
+gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate,
+                   struct gk20a_pll *pll)
 {
        struct nvkm_subdev *subdev = &clk->base.subdev;
        u32 target_clk_f, ref_clk_f, target_freq;
@@ -163,16 +113,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
        target_clk_f = rate * 2 / KHZ;
        ref_clk_f = clk->parent_rate / KHZ;
 
-       max_vco_f = clk->params->max_vco;
+       target_vco_f = target_clk_f + target_clk_f / 50;
+       max_vco_f = max(clk->params->max_vco, target_vco_f);
        min_vco_f = clk->params->min_vco;
        best_m = clk->params->max_m;
        best_n = clk->params->min_n;
        best_pl = clk->params->min_pl;
 
-       target_vco_f = target_clk_f + target_clk_f / 50;
-       if (max_vco_f < target_vco_f)
-               max_vco_f = target_vco_f;
-
        /* min_pl <= high_pl <= max_pl */
        high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
        high_pl = min(high_pl, clk->params->max_pl);
@@ -195,9 +142,7 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
                target_vco_f = target_clk_f * clk->pl_to_div(pl);
 
                for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
-                       u32 u_f, vco_f;
-
-                       u_f = ref_clk_f / m;
+                       u32 u_f = ref_clk_f / m;
 
                        if (u_f < clk->params->min_u)
                                break;
@@ -211,6 +156,8 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
                                break;
 
                        for (; n <= n2; n++) {
+                               u32 vco_f;
+
                                if (n < clk->params->min_n)
                                        continue;
                                if (n > clk->params->max_n)
@@ -247,16 +194,16 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
                           "no best match for target @ %dMHz on gpc_pll",
                           target_clk_f / KHZ);
 
-       clk->pll.m = best_m;
-       clk->pll.n = best_n;
-       clk->pll.pl = best_pl;
+       pll->m = best_m;
+       pll->n = best_n;
+       pll->pl = best_pl;
 
-       target_freq = gk20a_pllg_calc_rate(clk);
+       target_freq = gk20a_pllg_calc_rate(clk, pll);
 
        nvkm_debug(subdev,
-                  "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
-                  target_freq / MHZ, clk->pll.m, clk->pll.n, clk->pll.pl,
-                  clk->pl_to_div(clk->pll.pl));
+                  "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n",
+                  target_freq / KHZ, pll->m, pll->n, pll->pl,
+                  clk->pl_to_div(pll->pl));
        return 0;
 }
 
@@ -265,45 +212,36 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
 {
        struct nvkm_subdev *subdev = &clk->base.subdev;
        struct nvkm_device *device = subdev->device;
-       u32 val;
-       int ramp_timeout;
+       struct gk20a_pll pll;
+       int ret = 0;
 
        /* get old coefficients */
-       val = nvkm_rd32(device, GPCPLL_COEFF);
+       gk20a_pllg_read_mnp(clk, &pll);
        /* do nothing if NDIV is the same */
-       if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
+       if (n == pll.n)
                return 0;
 
-       /* setup */
-       nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
-               0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
-       nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
-               0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
-
        /* pll slowdown mode */
        nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
                BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
                BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
 
        /* new ndiv ready for ramp */
-       val = nvkm_rd32(device, GPCPLL_COEFF);
-       val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
-       val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
+       pll.n = n;
        udelay(1);
-       nvkm_wr32(device, GPCPLL_COEFF, val);
+       gk20a_pllg_write_mnp(clk, &pll);
 
        /* dynamic ramp to new ndiv */
-       val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
-       val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
        udelay(1);
-       nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
+       nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+                 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
+                 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
 
-       for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
-               udelay(1);
-               val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
-               if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
-                       break;
-       }
+       /* wait for ramping to complete */
+       if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
+               GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
+               GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
+               ret = -ETIMEDOUT;
 
        /* exit slowdown mode */
        nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
@@ -311,21 +249,35 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
                BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
        nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
 
-       if (ramp_timeout <= 0) {
-               nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
-               return -ETIMEDOUT;
-       }
-
-       return 0;
+       return ret;
 }
 
-static void
+static int
 gk20a_pllg_enable(struct gk20a_clk *clk)
 {
        struct nvkm_device *device = clk->base.subdev.device;
+       u32 val;
 
        nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
        nvkm_rd32(device, GPCPLL_CFG);
+
+       /* enable lock detection */
+       val = nvkm_rd32(device, GPCPLL_CFG);
+       if (val & GPCPLL_CFG_LOCK_DET_OFF) {
+               val &= ~GPCPLL_CFG_LOCK_DET_OFF;
+               nvkm_wr32(device, GPCPLL_CFG, val);
+       }
+
+       /* wait for lock */
+       if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK,
+                          GPCPLL_CFG_LOCK) < 0)
+               return -ETIMEDOUT;
+
+       /* switch to VCO mode */
+       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
+               BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+
+       return 0;
 }
 
 static void
@@ -333,117 +285,81 @@ gk20a_pllg_disable(struct gk20a_clk *clk)
 {
        struct nvkm_device *device = clk->base.subdev.device;
 
+       /* put PLL in bypass before disabling it */
+       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
        nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
        nvkm_rd32(device, GPCPLL_CFG);
 }
 
 static int
-_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
+gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
 {
        struct nvkm_subdev *subdev = &clk->base.subdev;
        struct nvkm_device *device = subdev->device;
-       u32 val, cfg;
-       struct gk20a_pll old_pll;
-       u32 n_lo;
-
-       /* get old coefficients */
-       gk20a_pllg_read_mnp(clk, &old_pll);
-
-       /* do NDIV slide if there is no change in M and PL */
-       cfg = nvkm_rd32(device, GPCPLL_CFG);
-       if (allow_slide && clk->pll.m == old_pll.m &&
-           clk->pll.pl == old_pll.pl && (cfg & GPCPLL_CFG_ENABLE)) {
-               return gk20a_pllg_slide(clk, clk->pll.n);
-       }
-
-       /* slide down to NDIV_LO */
-       if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
-               int ret;
-
-               n_lo = DIV_ROUND_UP(old_pll.m * clk->params->min_vco,
-                                   clk->parent_rate / KHZ);
-               ret = gk20a_pllg_slide(clk, n_lo);
+       struct gk20a_pll cur_pll;
+       int ret;
 
-               if (ret)
-                       return ret;
-       }
+       gk20a_pllg_read_mnp(clk, &cur_pll);
 
-       /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
+       /* split VCO-to-bypass jump in half by setting out divider 1:2 */
        nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
-               0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
-
-       /* put PLL in bypass before programming it */
-       val = nvkm_rd32(device, SEL_VCO);
-       val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+                 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+       /* Intentional 2nd write to assure linear divider operation */
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+       nvkm_rd32(device, GPC2CLK_OUT);
        udelay(2);
-       nvkm_wr32(device, SEL_VCO, val);
-
-       /* get out from IDDQ */
-       val = nvkm_rd32(device, GPCPLL_CFG);
-       if (val & GPCPLL_CFG_IDDQ) {
-               val &= ~GPCPLL_CFG_IDDQ;
-               nvkm_wr32(device, GPCPLL_CFG, val);
-               nvkm_rd32(device, GPCPLL_CFG);
-               udelay(2);
-       }
 
        gk20a_pllg_disable(clk);
 
-       nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
-                  clk->pll.m, clk->pll.n, clk->pll.pl);
-
-       n_lo = DIV_ROUND_UP(clk->pll.m * clk->params->min_vco,
-                           clk->parent_rate / KHZ);
-       val = clk->pll.m << GPCPLL_COEFF_M_SHIFT;
-       val |= (allow_slide ? n_lo : clk->pll.n) << GPCPLL_COEFF_N_SHIFT;
-       val |= clk->pll.pl << GPCPLL_COEFF_P_SHIFT;
-       nvkm_wr32(device, GPCPLL_COEFF, val);
+       gk20a_pllg_write_mnp(clk, pll);
 
-       gk20a_pllg_enable(clk);
-
-       val = nvkm_rd32(device, GPCPLL_CFG);
-       if (val & GPCPLL_CFG_LOCK_DET_OFF) {
-               val &= ~GPCPLL_CFG_LOCK_DET_OFF;
-               nvkm_wr32(device, GPCPLL_CFG, val);
-       }
-
-       if (nvkm_usec(device, 300,
-               if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
-                       break;
-       ) < 0)
-               return -ETIMEDOUT;
-
-       /* switch to VCO mode */
-       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
-                 BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+       ret = gk20a_pllg_enable(clk);
+       if (ret)
+               return ret;
 
        /* restore out divider 1:1 */
-       val = nvkm_rd32(device, GPC2CLK_OUT);
-       if ((val & GPC2CLK_OUT_VCODIV_MASK) !=
-           (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT)) {
-               val &= ~GPC2CLK_OUT_VCODIV_MASK;
-               val |= GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT;
-               udelay(2);
-               nvkm_wr32(device, GPC2CLK_OUT, val);
-               /* Intentional 2nd write to assure linear divider operation */
-               nvkm_wr32(device, GPC2CLK_OUT, val);
-               nvkm_rd32(device, GPC2CLK_OUT);
-       }
+       udelay(2);
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+       /* Intentional 2nd write to assure linear divider operation */
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+       nvkm_rd32(device, GPC2CLK_OUT);
 
-       /* slide up to new NDIV */
-       return allow_slide ? gk20a_pllg_slide(clk, clk->pll.n) : 0;
+       return 0;
 }
 
 static int
-gk20a_pllg_program_mnp(struct gk20a_clk *clk)
+gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll)
 {
-       int err;
+       struct gk20a_pll cur_pll;
+       int ret;
 
-       err = _gk20a_pllg_program_mnp(clk, true);
-       if (err)
-               err = _gk20a_pllg_program_mnp(clk, false);
+       if (gk20a_pllg_is_enabled(clk)) {
+               gk20a_pllg_read_mnp(clk, &cur_pll);
+
+               /* just do NDIV slide if there is no change to M and PL */
+               if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
+                       return gk20a_pllg_slide(clk, pll->n);
+
+               /* slide down to current NDIV_LO */
+               cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
+               ret = gk20a_pllg_slide(clk, cur_pll.n);
+               if (ret)
+                       return ret;
+       }
+
+       /* program MNP with the new clock parameters and new NDIV_LO */
+       cur_pll = *pll;
+       cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
+       ret = gk20a_pllg_program_mnp(clk, &cur_pll);
+       if (ret)
+               return ret;
 
-       return err;
+       /* slide up to new NDIV */
+       return gk20a_pllg_slide(clk, pll->n);
 }
 
 static struct nvkm_pstate
@@ -546,13 +462,14 @@ gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
        struct gk20a_clk *clk = gk20a_clk(base);
        struct nvkm_subdev *subdev = &clk->base.subdev;
        struct nvkm_device *device = subdev->device;
+       struct gk20a_pll pll;
 
        switch (src) {
        case nv_clk_src_crystal:
                return device->crystal;
        case nv_clk_src_gpc:
-               gk20a_pllg_read_mnp(clk, &clk->pll);
-               return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
+               gk20a_pllg_read_mnp(clk, &pll);
+               return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV;
        default:
                nvkm_error(subdev, "invalid clock source %d\n", src);
                return -EINVAL;
@@ -565,15 +482,20 @@ gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
        struct gk20a_clk *clk = gk20a_clk(base);
 
        return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
-                                        GK20A_CLK_GPC_MDIV);
+                                        GK20A_CLK_GPC_MDIV, &clk->pll);
 }
 
 int
 gk20a_clk_prog(struct nvkm_clk *base)
 {
        struct gk20a_clk *clk = gk20a_clk(base);
+       int ret;
+
+       ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll);
+       if (ret)
+               ret = gk20a_pllg_program_mnp(clk, &clk->pll);
 
-       return gk20a_pllg_program_mnp(clk);
+       return ret;
 }
 
 void
@@ -581,29 +503,62 @@ gk20a_clk_tidy(struct nvkm_clk *base)
 {
 }
 
+int
+gk20a_clk_setup_slide(struct gk20a_clk *clk)
+{
+       struct nvkm_subdev *subdev = &clk->base.subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 step_a, step_b;
+
+       switch (clk->parent_rate) {
+       case 12000000:
+       case 12800000:
+       case 13000000:
+               step_a = 0x2b;
+               step_b = 0x0b;
+               break;
+       case 19200000:
+               step_a = 0x12;
+               step_b = 0x08;
+               break;
+       case 38400000:
+               step_a = 0x04;
+               step_b = 0x05;
+               break;
+       default:
+               nvkm_error(subdev, "invalid parent clock rate %u KHz",
+                          clk->parent_rate / KHZ);
+               return -EINVAL;
+       }
+
+       nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+               step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT);
+       nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+               step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT);
+
+       return 0;
+}
+
 void
 gk20a_clk_fini(struct nvkm_clk *base)
 {
        struct nvkm_device *device = base->subdev.device;
        struct gk20a_clk *clk = gk20a_clk(base);
-       u32 val;
 
        /* slide to VCO min */
-       val = nvkm_rd32(device, GPCPLL_CFG);
-       if (val & GPCPLL_CFG_ENABLE) {
+       if (gk20a_pllg_is_enabled(clk)) {
                struct gk20a_pll pll;
                u32 n_lo;
 
                gk20a_pllg_read_mnp(clk, &pll);
-               n_lo = DIV_ROUND_UP(pll.m * clk->params->min_vco,
-                                   clk->parent_rate / KHZ);
+               n_lo = gk20a_pllg_n_lo(clk, &pll);
                gk20a_pllg_slide(clk, n_lo);
        }
 
-       /* put PLL in bypass before disabling it */
-       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
-
        gk20a_pllg_disable(clk);
+
+       /* set IDDQ */
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
 }
 
 static int
@@ -614,9 +569,18 @@ gk20a_clk_init(struct nvkm_clk *base)
        struct nvkm_device *device = subdev->device;
        int ret;
 
+       /* get out from IDDQ */
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
+       nvkm_rd32(device, GPCPLL_CFG);
+       udelay(5);
+
        nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
                  GPC2CLK_OUT_INIT_VAL);
 
+       ret = gk20a_clk_setup_slide(clk);
+       if (ret)
+               return ret;
+
        /* Start with lowest frequency */
        base->func->calc(base, &base->func->pstates[0].base);
        ret = base->func->prog(&clk->base);
@@ -646,7 +610,7 @@ gk20a_clk = {
 };
 
 int
-_gk20a_clk_ctor(struct nvkm_device *device, int index,
+gk20a_clk_ctor(struct nvkm_device *device, int index,
                const struct nvkm_clk_func *func,
                const struct gk20a_clk_pllg_params *params,
                struct gk20a_clk *clk)
@@ -685,7 +649,7 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
                return -ENOMEM;
        *pclk = &clk->base;
 
-       ret = _gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
+       ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
                              clk);
 
        clk->pl_to_div = pl_to_div;
index 13c46740197db0567d5f2894e74ae864cad17308..0d1450972162cb158dc727bb964aed67bdd56446 100644 (file)
 #ifndef __NVKM_CLK_GK20A_H__
 #define __NVKM_CLK_GK20A_H__
 
+#define KHZ (1000)
+#define MHZ (KHZ * 1000)
+
+#define MASK(w)        ((1 << (w)) - 1)
+
 #define GK20A_CLK_GPC_MDIV 1000
 
 #define SYS_GPCPLL_CFG_BASE    0x00137000
+#define GPCPLL_CFG             (SYS_GPCPLL_CFG_BASE + 0)
+#define GPCPLL_CFG_ENABLE      BIT(0)
+#define GPCPLL_CFG_IDDQ                BIT(1)
+#define GPCPLL_CFG_LOCK_DET_OFF        BIT(4)
+#define GPCPLL_CFG_LOCK                BIT(17)
+
+#define GPCPLL_CFG2            (SYS_GPCPLL_CFG_BASE + 0xc)
+#define GPCPLL_CFG2_SETUP2_SHIFT       16
+#define GPCPLL_CFG2_PLL_STEPA_SHIFT    24
+
+#define GPCPLL_CFG3                    (SYS_GPCPLL_CFG_BASE + 0x18)
+#define GPCPLL_CFG3_VCO_CTRL_SHIFT             0
+#define GPCPLL_CFG3_VCO_CTRL_WIDTH             9
+#define GPCPLL_CFG3_VCO_CTRL_MASK              \
+       (MASK(GPCPLL_CFG3_VCO_CTRL_WIDTH) << GPCPLL_CFG3_VCO_CTRL_SHIFT)
+#define GPCPLL_CFG3_PLL_STEPB_SHIFT            16
+#define GPCPLL_CFG3_PLL_STEPB_WIDTH            8
+
+#define GPCPLL_COEFF           (SYS_GPCPLL_CFG_BASE + 4)
+#define GPCPLL_COEFF_M_SHIFT   0
+#define GPCPLL_COEFF_M_WIDTH   8
+#define GPCPLL_COEFF_N_SHIFT   8
+#define GPCPLL_COEFF_N_WIDTH   8
+#define GPCPLL_COEFF_N_MASK    \
+       (MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT)
+#define GPCPLL_COEFF_P_SHIFT   16
+#define GPCPLL_COEFF_P_WIDTH   6
+
+#define GPCPLL_NDIV_SLOWDOWN                   (SYS_GPCPLL_CFG_BASE + 0x1c)
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT     0
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT    8
+#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT    16
+#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT  22
+#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT  31
+
+#define GPC_BCAST_GPCPLL_CFG_BASE              0x00132800
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG  (GPC_BCAST_GPCPLL_CFG_BASE + 0xa0)
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT    24
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
+       (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
+
+#define SEL_VCO                                (SYS_GPCPLL_CFG_BASE + 0x100)
+#define SEL_VCO_GPC2CLK_OUT_SHIFT      0
+
+#define GPC2CLK_OUT                    (SYS_GPCPLL_CFG_BASE + 0x250)
+#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH        1
+#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT        31
+#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
+#define GPC2CLK_OUT_VCODIV_WIDTH       6
+#define GPC2CLK_OUT_VCODIV_SHIFT       8
+#define GPC2CLK_OUT_VCODIV1            0
+#define GPC2CLK_OUT_VCODIV2            2
+#define GPC2CLK_OUT_VCODIV_MASK                (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
+                                       GPC2CLK_OUT_VCODIV_SHIFT)
+#define GPC2CLK_OUT_BYPDIV_WIDTH       6
+#define GPC2CLK_OUT_BYPDIV_SHIFT       0
+#define GPC2CLK_OUT_BYPDIV31           0x3c
+#define GPC2CLK_OUT_INIT_MASK  ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
+               GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
+               | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
+               | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
+#define GPC2CLK_OUT_INIT_VAL   ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
+               GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
+               | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
+               | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
 
 /* All frequencies in Khz */
 struct gk20a_clk_pllg_params {
@@ -54,7 +124,29 @@ struct gk20a_clk {
 };
 #define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
 
-int _gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
+u32 gk20a_pllg_calc_rate(struct gk20a_clk *, struct gk20a_pll *);
+int gk20a_pllg_calc_mnp(struct gk20a_clk *, unsigned long, struct gk20a_pll *);
+void gk20a_pllg_read_mnp(struct gk20a_clk *, struct gk20a_pll *);
+void gk20a_pllg_write_mnp(struct gk20a_clk *, const struct gk20a_pll *);
+
+static inline bool
+gk20a_pllg_is_enabled(struct gk20a_clk *clk)
+{
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 val;
+
+       val = nvkm_rd32(device, GPCPLL_CFG);
+       return val & GPCPLL_CFG_ENABLE;
+}
+
+static inline u32
+gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll)
+{
+       return DIV_ROUND_UP(pll->m * clk->params->min_vco,
+                           clk->parent_rate / KHZ);
+}
+
+int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
                    const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
 void gk20a_clk_fini(struct nvkm_clk *);
 int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src);
@@ -62,4 +154,6 @@ int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
 int gk20a_clk_prog(struct nvkm_clk *);
 void gk20a_clk_tidy(struct nvkm_clk *);
 
+int gk20a_clk_setup_slide(struct gk20a_clk *);
+
 #endif
index 71b2bbb6197304ca3dea55821dd7ed83e809ce44..b284e949f73240c2702e87113d696523b7baea73 100644 (file)
  */
 
 #include <subdev/clk.h>
+#include <subdev/volt.h>
+#include <subdev/timer.h>
 #include <core/device.h>
+#include <core/tegra.h>
 
 #include "priv.h"
 #include "gk20a.h"
 
-#define KHZ (1000)
-#define MHZ (KHZ * 1000)
-
-#define MASK(w)        ((1 << w) - 1)
+#define GPCPLL_CFG_SYNC_MODE   BIT(2)
 
 #define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340)
 #define BYPASSCTRL_SYS_GPCPLL_SHIFT    0
 #define BYPASSCTRL_SYS_GPCPLL_WIDTH    1
 
+#define GPCPLL_CFG2_SDM_DIN_SHIFT      0
+#define GPCPLL_CFG2_SDM_DIN_WIDTH      8
+#define GPCPLL_CFG2_SDM_DIN_MASK       \
+       (MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT)
+#define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT  8
+#define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH  15
+#define GPCPLL_CFG2_SDM_DIN_NEW_MASK   \
+       (MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT)
+#define GPCPLL_CFG2_SETUP2_SHIFT       16
+#define GPCPLL_CFG2_PLL_STEPA_SHIFT    24
+
+#define GPCPLL_DVFS0   (SYS_GPCPLL_CFG_BASE + 0x10)
+#define GPCPLL_DVFS0_DFS_COEFF_SHIFT   0
+#define GPCPLL_DVFS0_DFS_COEFF_WIDTH   7
+#define GPCPLL_DVFS0_DFS_COEFF_MASK    \
+       (MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT)
+#define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT 8
+#define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH 7
+#define GPCPLL_DVFS0_DFS_DET_MAX_MASK  \
+       (MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT)
+
+#define GPCPLL_DVFS1           (SYS_GPCPLL_CFG_BASE + 0x14)
+#define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT         0
+#define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH         7
+#define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT                7
+#define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH                1
+#define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT         8
+#define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH         7
+#define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT         15
+#define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH         1
+#define GPCPLL_DVFS1_DFS_CTRL_SHIFT            16
+#define GPCPLL_DVFS1_DFS_CTRL_WIDTH            12
+#define GPCPLL_DVFS1_EN_SDM_SHIFT              28
+#define GPCPLL_DVFS1_EN_SDM_WIDTH              1
+#define GPCPLL_DVFS1_EN_SDM_BIT                        BIT(28)
+#define GPCPLL_DVFS1_EN_DFS_SHIFT              29
+#define GPCPLL_DVFS1_EN_DFS_WIDTH              1
+#define GPCPLL_DVFS1_EN_DFS_BIT                        BIT(29)
+#define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT          30
+#define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH          1
+#define GPCPLL_DVFS1_EN_DFS_CAL_BIT            BIT(30)
+#define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT                31
+#define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH                1
+#define GPCPLL_DVFS1_DFS_CAL_DONE_BIT          BIT(31)
+
+#define GPC_BCAST_GPCPLL_DVFS2 (GPC_BCAST_GPCPLL_CFG_BASE + 0x20)
+#define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT      BIT(16)
+
+#define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT      24
+#define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH      7
+
+#define DFS_DET_RANGE  6       /* -2^6 ... 2^6-1 */
+#define SDM_DIN_RANGE  12      /* -2^12 ... 2^12-1 */
+
+struct gm20b_clk_dvfs_params {
+       s32 coeff_slope;
+       s32 coeff_offs;
+       u32 vco_ctrl;
+};
+
+static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = {
+       .coeff_slope = -165230,
+       .coeff_offs = 214007,
+       .vco_ctrl = 0x7 << 3,
+};
+
+/*
+ * base.n is now the *integer* part of the N factor.
+ * sdm_din contains n's decimal part.
+ */
+struct gm20b_pll {
+       struct gk20a_pll base;
+       u32 sdm_din;
+};
+
+struct gm20b_clk_dvfs {
+       u32 dfs_coeff;
+       s32 dfs_det_max;
+       s32 dfs_ext_cal;
+};
+
+struct gm20b_clk {
+       /* currently applied parameters */
+       struct gk20a_clk base;
+       struct gm20b_clk_dvfs dvfs;
+       u32 uv;
+
+       /* new parameters to apply */
+       struct gk20a_pll new_pll;
+       struct gm20b_clk_dvfs new_dvfs;
+       u32 new_uv;
+
+       const struct gm20b_clk_dvfs_params *dvfs_params;
+
+       /* fused parameters */
+       s32 uvdet_slope;
+       s32 uvdet_offs;
+
+       /* safe frequency we can use at minimum voltage */
+       u32 safe_fmax_vmin;
+};
+#define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
+
 static u32 pl_to_div(u32 pl)
 {
        return pl;
@@ -53,6 +156,484 @@ static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
        .min_pl = 1, .max_pl = 31,
 };
 
+static void
+gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 val;
+
+       gk20a_pllg_read_mnp(&clk->base, &pll->base);
+       val = nvkm_rd32(device, GPCPLL_CFG2);
+       pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) &
+                      MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
+}
+
+static void
+gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll)
+{
+       struct nvkm_device *device = clk->base.base.subdev.device;
+
+       nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
+                 pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
+       gk20a_pllg_write_mnp(&clk->base, &pll->base);
+}
+
+/*
+ * Determine DFS_COEFF for the requested voltage. Always select external
+ * calibration override equal to the voltage, and set maximum detection
+ * limit "0" (to make sure that PLL output remains under F/V curve when
+ * voltage increases).
+ */
+static void
+gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv,
+                         struct gm20b_clk_dvfs *dvfs)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
+       u32 coeff;
+       /* Work with mv as uv would likely trigger an overflow */
+       s32 mv = DIV_ROUND_CLOSEST(uv, 1000);
+
+       /* coeff = slope * voltage + offset */
+       coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
+       coeff = DIV_ROUND_CLOSEST(coeff, 1000);
+       dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH));
+
+       dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs,
+                                            clk->uvdet_slope);
+       /* should never happen */
+       if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE))
+               nvkm_error(subdev, "dfs_ext_cal overflow!\n");
+
+       dvfs->dfs_det_max = 0;
+
+       nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n",
+                  __func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal,
+                  dvfs->dfs_det_max);
+}
+
+/*
+ * Solve equation for integer and fractional part of the effective NDIV:
+ *
+ * n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) +
+ *         (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE
+ *
+ * The SDM_DIN LSB is finally shifted out, since it is not accessible by sw.
+ */
+static void
+gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       const struct gk20a_clk_pllg_params *p = clk->base.params;
+       u32 n;
+       s32 det_delta;
+       u32 rem, rem_range;
+
+       /* calculate current ext_cal and subtract previous one */
+       det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs,
+                                     clk->uvdet_slope);
+       det_delta -= clk->dvfs.dfs_ext_cal;
+       det_delta = min(det_delta, clk->dvfs.dfs_det_max);
+       det_delta *= clk->dvfs.dfs_coeff;
+
+       /* integer part of n */
+       n = (n_eff << DFS_DET_RANGE) - det_delta;
+       /* should never happen! */
+       if (n <= 0) {
+               nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n");
+               n = 1 << DFS_DET_RANGE;
+       }
+       if (n >> DFS_DET_RANGE > p->max_n) {
+               nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n");
+               n = p->max_n << DFS_DET_RANGE;
+       }
+       *n_int = n >> DFS_DET_RANGE;
+
+       /* fractional part of n */
+       rem = ((u32)n) & MASK(DFS_DET_RANGE);
+       rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
+       /* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */
+       rem = (rem << rem_range) - BIT(SDM_DIN_RANGE);
+       /* lose 8 LSB and clip - sdm_din only keeps the most significant byte */
+       *sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
+
+       nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__,
+                  n_eff, *n_int, *sdm_din);
+}
+
+static int
+gm20b_pllg_slide(struct gm20b_clk *clk, u32 n)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       struct nvkm_device *device = subdev->device;
+       struct gm20b_pll pll;
+       u32 n_int, sdm_din;
+       int ret = 0;
+
+       /* calculate the new n_int/sdm_din for this n/uv */
+       gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din);
+
+       /* get old coefficients */
+       gm20b_pllg_read_mnp(clk, &pll);
+       /* do nothing if NDIV is the same */
+       if (n_int == pll.base.n && sdm_din == pll.sdm_din)
+               return 0;
+
+       /* pll slowdown mode */
+       nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+               BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
+               BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
+
+       /* new ndiv ready for ramp */
+       /* in DVFS mode SDM is updated via "new" field */
+       nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK,
+                 sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT);
+       pll.base.n = n_int;
+       udelay(1);
+       gk20a_pllg_write_mnp(&clk->base, &pll.base);
+
+       /* dynamic ramp to new ndiv */
+       udelay(1);
+       nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+                 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
+                 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
+
+       /* wait for ramping to complete */
+       if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
+               GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
+               GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
+               ret = -ETIMEDOUT;
+
+       /* in DVFS mode complete SDM update */
+       nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
+                 sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
+
+       /* exit slowdown mode */
+       nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+               BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
+               BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
+       nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
+
+       return ret;
+}
+
+static int
+gm20b_pllg_enable(struct gm20b_clk *clk)
+{
+       struct nvkm_device *device = clk->base.base.subdev.device;
+
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+       nvkm_rd32(device, GPCPLL_CFG);
+
+       /* In DVFS mode lock cannot be used - so just delay */
+       udelay(40);
+
+       /* set SYNC_MODE for glitchless switch out of bypass */
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE,
+                      GPCPLL_CFG_SYNC_MODE);
+       nvkm_rd32(device, GPCPLL_CFG);
+
+       /* switch to VCO mode */
+       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
+                 BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+
+       return 0;
+}
+
+static void
+gm20b_pllg_disable(struct gm20b_clk *clk)
+{
+       struct nvkm_device *device = clk->base.base.subdev.device;
+
+       /* put PLL in bypass before disabling it */
+       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
+       /* clear SYNC_MODE before disabling PLL */
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0);
+
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+       nvkm_rd32(device, GPCPLL_CFG);
+}
+
+static int
+gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       struct nvkm_device *device = subdev->device;
+       struct gm20b_pll cur_pll;
+       u32 n_int, sdm_din;
+       /* if we only change pdiv, we can do a glitchless transition */
+       bool pdiv_only;
+       int ret;
+
+       gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din);
+       gm20b_pllg_read_mnp(clk, &cur_pll);
+       pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din &&
+                   cur_pll.base.m == pll->m;
+
+       /* need full sequence if clock not enabled yet */
+       if (!gk20a_pllg_is_enabled(&clk->base))
+               pdiv_only = false;
+
+       /* split VCO-to-bypass jump in half by setting out divider 1:2 */
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+       /* Intentional 2nd write to assure linear divider operation */
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+       nvkm_rd32(device, GPC2CLK_OUT);
+       udelay(2);
+
+       if (pdiv_only) {
+               u32 old = cur_pll.base.pl;
+               u32 new = pll->pl;
+
+               /*
+                * we can do a glitchless transition only if the old and new PL
+                * parameters share at least one bit set to 1. If this is not
+                * the case, calculate and program an interim PL that will allow
+                * us to respect that rule.
+                */
+               if ((old & new) == 0) {
+                       cur_pll.base.pl = min(old | BIT(ffs(new) - 1),
+                                             new | BIT(ffs(old) - 1));
+                       gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
+               }
+
+               cur_pll.base.pl = new;
+               gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
+       } else {
+               /* disable before programming if more than pdiv changes */
+               gm20b_pllg_disable(clk);
+
+               cur_pll.base = *pll;
+               cur_pll.base.n = n_int;
+               cur_pll.sdm_din = sdm_din;
+               gm20b_pllg_write_mnp(clk, &cur_pll);
+
+               ret = gm20b_pllg_enable(clk);
+               if (ret)
+                       return ret;
+       }
+
+       /* restore out divider 1:1 */
+       udelay(2);
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+       /* Intentional 2nd write to assure linear divider operation */
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+       nvkm_rd32(device, GPC2CLK_OUT);
+
+       return 0;
+}
+
+static int
+gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll)
+{
+       struct gk20a_pll cur_pll;
+       int ret;
+
+       if (gk20a_pllg_is_enabled(&clk->base)) {
+               gk20a_pllg_read_mnp(&clk->base, &cur_pll);
+
+               /* just do NDIV slide if there is no change to M and PL */
+               if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
+                       return gm20b_pllg_slide(clk, pll->n);
+
+               /* slide down to current NDIV_LO */
+               cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
+               ret = gm20b_pllg_slide(clk, cur_pll.n);
+               if (ret)
+                       return ret;
+       }
+
+       /* program MNP with the new clock parameters and new NDIV_LO */
+       cur_pll = *pll;
+       cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
+       ret = gm20b_pllg_program_mnp(clk, &cur_pll);
+       if (ret)
+               return ret;
+
+       /* slide up to new NDIV */
+       return gm20b_pllg_slide(clk, pll->n);
+}
+
+static int
+gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
+{
+       struct gm20b_clk *clk = gm20b_clk(base);
+       struct nvkm_subdev *subdev = &base->subdev;
+       struct nvkm_volt *volt = base->subdev.device->volt;
+       int ret;
+
+       ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] *
+                                            GK20A_CLK_GPC_MDIV, &clk->new_pll);
+       if (ret)
+               return ret;
+
+       clk->new_uv = volt->vid[cstate->voltage].uv;
+       gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs);
+
+       nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv);
+
+       return 0;
+}
+
+/*
+ * Compute PLL parameters that are always safe for the current voltage
+ */
+static void
+gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll)
+{
+       u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ;
+       u32 parent_rate = clk->base.parent_rate / KHZ;
+       u32 nmin, nsafe;
+
+       /* remove a safe margin of 10% */
+       if (rate > clk->safe_fmax_vmin)
+               rate = rate * (100 - 10) / 100;
+
+       /* gpc2clk */
+       rate *= 2;
+
+       nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate);
+       nsafe = pll->m * rate / (clk->base.parent_rate);
+
+       if (nsafe < nmin) {
+               pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate);
+               nsafe = nmin;
+       }
+
+       pll->n = nsafe;
+}
+
+static void
+gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff)
+{
+       struct nvkm_device *device = clk->base.base.subdev.device;
+
+       /* strobe to read external DFS coefficient */
+       nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+                 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
+                 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
+
+       nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK,
+                 coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT);
+
+       udelay(1);
+       nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+                 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
+}
+
+static void
+gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal)
+{
+       struct nvkm_device *device = clk->base.base.subdev.device;
+       u32 val;
+
+       nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1),
+                 dfs_det_cal);
+       udelay(1);
+
+       val = nvkm_rd32(device, GPCPLL_DVFS1);
+       if (!(val & BIT(25))) {
+               /* Use external value to overwrite calibration value */
+               val |= BIT(25) | BIT(16);
+               nvkm_wr32(device, GPCPLL_DVFS1, val);
+       }
+}
+
+static void
+gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk,
+                                struct gm20b_clk_dvfs *dvfs)
+{
+       struct nvkm_device *device = clk->base.base.subdev.device;
+
+       /* strobe to read external DFS coefficient */
+       nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+                 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
+                 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
+
+       nvkm_mask(device, GPCPLL_DVFS0,
+                 GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK,
+                 dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT |
+                 dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT);
+
+       udelay(1);
+       nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+                 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
+
+       gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal);
+}
+
+static int
+gm20b_clk_prog(struct nvkm_clk *base)
+{
+       struct gm20b_clk *clk = gm20b_clk(base);
+       u32 cur_freq;
+       int ret;
+
+       /* No change in DVFS settings? */
+       if (clk->uv == clk->new_uv)
+               goto prog;
+
+       /*
+        * Interim step for changing DVFS detection settings: low enough
+        * frequency to be safe at at DVFS coeff = 0.
+        *
+        * 1. If voltage is increasing:
+        * - safe frequency target matches the lowest - old - frequency
+        * - DVFS settings are still old
+        * - Voltage already increased to new level by volt, but maximum
+        *   detection limit assures PLL output remains under F/V curve
+        *
+        * 2. If voltage is decreasing:
+        * - safe frequency target matches the lowest - new - frequency
+        * - DVFS settings are still old
+        * - Voltage is also old, it will be lowered by volt afterwards
+        *
+        * Interim step can be skipped if old frequency is below safe minimum,
+        * i.e., it is low enough to be safe at any voltage in operating range
+        * with zero DVFS coefficient.
+        */
+       cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc);
+       if (cur_freq > clk->safe_fmax_vmin) {
+               struct gk20a_pll pll_safe;
+
+               if (clk->uv < clk->new_uv)
+                       /* voltage will raise: safe frequency is current one */
+                       pll_safe = clk->base.pll;
+               else
+                       /* voltage will drop: safe frequency is new one */
+                       pll_safe = clk->new_pll;
+
+               gm20b_dvfs_calc_safe_pll(clk, &pll_safe);
+               ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe);
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * DVFS detection settings transition:
+        * - Set DVFS coefficient zero
+        * - Set calibration level to new voltage
+        * - Set DVFS coefficient to match new voltage
+        */
+       gm20b_dvfs_program_coeff(clk, 0);
+       gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal);
+       gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff);
+       gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
+
+prog:
+       clk->uv = clk->new_uv;
+       clk->dvfs = clk->new_dvfs;
+       clk->base.pll = clk->new_pll;
+
+       return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll);
+}
+
 static struct nvkm_pstate
 gm20b_pstates[] = {
        {
@@ -133,9 +714,99 @@ gm20b_pstates[] = {
                        .voltage = 12,
                },
        },
-
 };
 
+static void
+gm20b_clk_fini(struct nvkm_clk *base)
+{
+       struct nvkm_device *device = base->subdev.device;
+       struct gm20b_clk *clk = gm20b_clk(base);
+
+       /* slide to VCO min */
+       if (gk20a_pllg_is_enabled(&clk->base)) {
+               struct gk20a_pll pll;
+               u32 n_lo;
+
+               gk20a_pllg_read_mnp(&clk->base, &pll);
+               n_lo = gk20a_pllg_n_lo(&clk->base, &pll);
+               gm20b_pllg_slide(clk, n_lo);
+       }
+
+       gm20b_pllg_disable(clk);
+
+       /* set IDDQ */
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
+}
+
+static int
+gm20b_clk_init_dvfs(struct gm20b_clk *clk)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       struct nvkm_device *device = subdev->device;
+       bool fused = clk->uvdet_offs && clk->uvdet_slope;
+       static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */
+       u32 data;
+       int ret;
+
+       /* Enable NA DVFS */
+       nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT,
+                 GPCPLL_DVFS1_EN_DFS_BIT);
+
+       /* Set VCO_CTRL */
+       if (clk->dvfs_params->vco_ctrl)
+               nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK,
+                     clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT);
+
+       if (fused) {
+               /* Start internal calibration, but ignore results */
+               nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
+                         GPCPLL_DVFS1_EN_DFS_CAL_BIT);
+
+               /* got uvdev parameters from fuse, skip calibration */
+               goto calibrated;
+       }
+
+       /*
+        * If calibration parameters are not fused, start internal calibration,
+        * wait for completion, and use results along with default slope to
+        * calculate ADC offset during boot.
+        */
+       nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
+                         GPCPLL_DVFS1_EN_DFS_CAL_BIT);
+
+       /* Wait for internal calibration done (spec < 2us). */
+       ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1,
+                            GPCPLL_DVFS1_DFS_CAL_DONE_BIT,
+                            GPCPLL_DVFS1_DFS_CAL_DONE_BIT);
+       if (ret < 0) {
+               nvkm_error(subdev, "GPCPLL calibration timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       data = nvkm_rd32(device, GPCPLL_CFG3) >>
+                        GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT;
+       data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH);
+
+       clk->uvdet_slope = ADC_SLOPE_UV;
+       clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV;
+
+       nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n",
+                  clk->uvdet_offs, clk->uvdet_slope);
+
+calibrated:
+       /* Compute and apply initial DVFS parameters */
+       gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs);
+       gm20b_dvfs_program_coeff(clk, 0);
+       gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal);
+       gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff);
+       gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
+
+       return 0;
+}
+
+/* Forward declaration to detect speedo >=1 in gm20b_clk_init() */
+static const struct nvkm_clk_func gm20b_clk;
+
 static int
 gm20b_clk_init(struct nvkm_clk *base)
 {
@@ -143,15 +814,56 @@ gm20b_clk_init(struct nvkm_clk *base)
        struct nvkm_subdev *subdev = &clk->base.subdev;
        struct nvkm_device *device = subdev->device;
        int ret;
+       u32 data;
+
+       /* get out from IDDQ */
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
+       nvkm_rd32(device, GPCPLL_CFG);
+       udelay(5);
+
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
+                 GPC2CLK_OUT_INIT_VAL);
 
        /* Set the global bypass control to VCO */
        nvkm_mask(device, BYPASSCTRL_SYS,
               MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
               0);
 
+       ret = gk20a_clk_setup_slide(clk);
+       if (ret)
+               return ret;
+
+       /* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */
+       data = nvkm_rd32(device, 0x021944);
+       if (!(data & 0x3)) {
+               data |= 0x2;
+               nvkm_wr32(device, 0x021944, data);
+
+               data = nvkm_rd32(device, 0x021948);
+               data |=  0x1;
+               nvkm_wr32(device, 0x021948, data);
+       }
+
+       /* Disable idle slow down  */
+       nvkm_mask(device, 0x20160, 0x003f0000, 0x0);
+
+       /* speedo >= 1? */
+       if (clk->base.func == &gm20b_clk) {
+               struct gm20b_clk *_clk = gm20b_clk(base);
+               struct nvkm_volt *volt = device->volt;
+
+               /* Get current voltage */
+               _clk->uv = nvkm_volt_get(volt);
+
+               /* Initialize DVFS */
+               ret = gm20b_clk_init_dvfs(_clk);
+               if (ret)
+                       return ret;
+       }
+
        /* Start with lowest frequency */
        base->func->calc(base, &base->func->pstates[0].base);
-       ret = base->func->prog(&clk->base);
+       ret = base->func->prog(base);
        if (ret) {
                nvkm_error(subdev, "cannot initialize clock\n");
                return ret;
@@ -169,6 +881,7 @@ gm20b_clk_speedo0 = {
        .prog = gk20a_clk_prog,
        .tidy = gk20a_clk_tidy,
        .pstates = gm20b_pstates,
+       /* Speedo 0 only supports 12 voltages */
        .nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
        .domains = {
                { nv_clk_src_crystal, 0xff },
@@ -177,8 +890,26 @@ gm20b_clk_speedo0 = {
        },
 };
 
-int
-gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+static const struct nvkm_clk_func
+gm20b_clk = {
+       .init = gm20b_clk_init,
+       .fini = gm20b_clk_fini,
+       .read = gk20a_clk_read,
+       .calc = gm20b_clk_calc,
+       .prog = gm20b_clk_prog,
+       .tidy = gk20a_clk_tidy,
+       .pstates = gm20b_pstates,
+       .nr_pstates = ARRAY_SIZE(gm20b_pstates),
+       .domains = {
+               { nv_clk_src_crystal, 0xff },
+               { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+               { nv_clk_src_max },
+       },
+};
+
+static int
+gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
+                     struct nvkm_clk **pclk)
 {
        struct gk20a_clk *clk;
        int ret;
@@ -188,11 +919,156 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
                return -ENOMEM;
        *pclk = &clk->base;
 
-       ret = _gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
-                             &gm20b_pllg_params, clk);
+       ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
+                            &gm20b_pllg_params, clk);
 
        clk->pl_to_div = pl_to_div;
        clk->div_to_pl = div_to_pl;
 
        return ret;
 }
+
+/* FUSE register */
+#define FUSE_RESERVED_CALIB0   0x204
+#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT      0
+#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH      4
+#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT       4
+#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH       10
+#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT          14
+#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH          10
+#define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT           24
+#define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH           6
+#define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT            30
+#define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH            2
+
+static int
+gm20b_clk_init_fused_params(struct gm20b_clk *clk)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       u32 val = 0;
+       u32 rev = 0;
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA)
+       tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val);
+       rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) &
+             MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH);
+#endif
+
+       /* No fused parameters, we will calibrate later */
+       if (rev == 0)
+               return -EINVAL;
+
+       /* Integer part in mV + fractional part in uV */
+       clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) &
+                       MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 +
+                       ((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) &
+                       MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH));
+
+       /* Integer part in mV + fractional part in 100uV */
+       clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) &
+                       MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 +
+                       ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) &
+                        MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100;
+
+       nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n",
+                  clk->uvdet_slope, clk->uvdet_offs);
+       return 0;
+}
+
+static int
+gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       struct nvkm_volt *volt = subdev->device->volt;
+       struct nvkm_pstate *pstates = clk->base.base.func->pstates;
+       int nr_pstates = clk->base.base.func->nr_pstates;
+       int vmin, id = 0;
+       u32 fmax = 0;
+       int i;
+
+       /* find lowest voltage we can use */
+       vmin = volt->vid[0].uv;
+       for (i = 1; i < volt->vid_nr; i++) {
+               if (volt->vid[i].uv <= vmin) {
+                       vmin = volt->vid[i].uv;
+                       id = volt->vid[i].vid;
+               }
+       }
+
+       /* find max frequency at this voltage */
+       for (i = 0; i < nr_pstates; i++)
+               if (pstates[i].base.voltage == id)
+                       fmax = max(fmax,
+                                  pstates[i].base.domain[nv_clk_src_gpc]);
+
+       if (!fmax) {
+               nvkm_error(subdev, "failed to evaluate safe fmax\n");
+               return -EINVAL;
+       }
+
+       /* we are safe at 90% of the max frequency */
+       clk->safe_fmax_vmin = fmax * (100 - 10) / 100;
+       nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin);
+
+       return 0;
+}
+
+int
+gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+       struct nvkm_device_tegra *tdev = device->func->tegra(device);
+       struct gm20b_clk *clk;
+       struct nvkm_subdev *subdev;
+       struct gk20a_clk_pllg_params *clk_params;
+       int ret;
+
+       /* Speedo 0 GPUs cannot use noise-aware PLL */
+       if (tdev->gpu_speedo_id == 0)
+               return gm20b_clk_new_speedo0(device, index, pclk);
+
+       /* Speedo >= 1, use NAPLL */
+       clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
+       if (!clk)
+               return -ENOMEM;
+       *pclk = &clk->base.base;
+       subdev = &clk->base.base.subdev;
+
+       /* duplicate the clock parameters since we will patch them below */
+       clk_params = (void *) (clk + 1);
+       *clk_params = gm20b_pllg_params;
+       ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params,
+                            &clk->base);
+       if (ret)
+               return ret;
+
+       /*
+        * NAPLL can only work with max_u, clamp the m range so
+        * gk20a_pllg_calc_mnp always uses it
+        */
+       clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u,
+                                               (clk->base.parent_rate / KHZ));
+       if (clk_params->max_m == 0) {
+               nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
+               kfree(clk);
+               return gm20b_clk_new_speedo0(device, index, pclk);
+       }
+
+       clk->base.pl_to_div = pl_to_div;
+       clk->base.div_to_pl = div_to_pl;
+
+       clk->dvfs_params = &gm20b_dvfs_params;
+
+       ret = gm20b_clk_init_fused_params(clk);
+       /*
+        * we will calibrate during init - should never happen on
+        * prod parts
+        */
+       if (ret)
+               nvkm_warn(subdev, "no fused calibration parameters\n");
+
+       ret = gm20b_clk_init_safe_fmax(clk);
+       if (ret)
+               return ret;
+
+       return 0;
+}
index 842d5de96d73169be05fc86d2462fb1e27ebdcf9..edcc157e6ac843d13f5ad9339c05b662c3f784ce 100644 (file)
@@ -24,6 +24,8 @@ nvkm-y += nvkm/subdev/fb/gk104.o
 nvkm-y += nvkm/subdev/fb/gk20a.o
 nvkm-y += nvkm/subdev/fb/gm107.o
 nvkm-y += nvkm/subdev/fb/gm200.o
+nvkm-y += nvkm/subdev/fb/gp100.o
+nvkm-y += nvkm/subdev/fb/gp104.o
 
 nvkm-y += nvkm/subdev/fb/ram.o
 nvkm-y += nvkm/subdev/fb/ramnv04.o
@@ -41,6 +43,7 @@ nvkm-y += nvkm/subdev/fb/rammcp77.o
 nvkm-y += nvkm/subdev/fb/ramgf100.o
 nvkm-y += nvkm/subdev/fb/ramgk104.o
 nvkm-y += nvkm/subdev/fb/ramgm107.o
+nvkm-y += nvkm/subdev/fb/ramgp100.o
 nvkm-y += nvkm/subdev/fb/sddr2.o
 nvkm-y += nvkm/subdev/fb/sddr3.o
 nvkm-y += nvkm/subdev/fb/gddr3.o
index ce90242b8cceeca8718b2daa7c1c6e8553d77db5..a7049c041594b6511f52b33331a73e2bc8e3582d 100644 (file)
@@ -25,6 +25,7 @@
 #include "ram.h"
 
 #include <core/memory.h>
+#include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/M0203.h>
 #include <engine/gr.h>
@@ -134,6 +135,10 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
 
        if (fb->func->init)
                fb->func->init(fb);
+       if (fb->func->init_page)
+               fb->func->init_page(fb);
+       if (fb->func->init_unkn)
+               fb->func->init_unkn(fb);
        return 0;
 }
 
@@ -171,6 +176,7 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
        nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
        fb->func = func;
        fb->tile.regions = fb->func->tile.regions;
+       fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", 0);
 }
 
 int
index e649ead5ccfc74c7416e78f4294d7d1bcdbbf9b3..76433cc66fff2f1d96d1b4f81c5ab4a09b34e6e8 100644 (file)
@@ -71,6 +71,22 @@ gf100_fb_oneinit(struct nvkm_fb *fb)
        return 0;
 }
 
+void
+gf100_fb_init_page(struct nvkm_fb *fb)
+{
+       struct nvkm_device *device = fb->subdev.device;
+       switch (fb->page) {
+       case 16:
+               nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001);
+               break;
+       case 17:
+       default:
+               nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000);
+               fb->page = 17;
+               break;
+       }
+}
+
 void
 gf100_fb_init(struct nvkm_fb *base)
 {
@@ -79,8 +95,6 @@ gf100_fb_init(struct nvkm_fb *base)
 
        if (fb->r100c10_page)
                nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
-
-       nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
 }
 
 void *
@@ -125,6 +139,7 @@ gf100_fb = {
        .dtor = gf100_fb_dtor,
        .oneinit = gf100_fb_oneinit,
        .init = gf100_fb_init,
+       .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gf100_ram_new,
        .memtype_valid = gf100_fb_memtype_valid,
index 2160e5a39c9a928a316049b5657bad2b0b4ebe60..449f431644b31ab90a0245feebf03d7023be88b3 100644 (file)
@@ -14,4 +14,6 @@ int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
 void *gf100_fb_dtor(struct nvkm_fb *);
 void gf100_fb_init(struct nvkm_fb *);
 void gf100_fb_intr(struct nvkm_fb *);
+
+void gp100_fb_init(struct nvkm_fb *);
 #endif
index b41f0f70038cb4c8a9d2d0ee464ed7a6fc768392..4245e2e6e604501fa9ead8ad0ed3582f9afee6cc 100644 (file)
@@ -29,6 +29,7 @@ gk104_fb = {
        .dtor = gf100_fb_dtor,
        .oneinit = gf100_fb_oneinit,
        .init = gf100_fb_init,
+       .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gk104_ram_new,
        .memtype_valid = gf100_fb_memtype_valid,
index 7306f7dfc3b93f4455942e9c2cb8387c51320669..f815fe2bbf08892dd52654552600c33843392f0b 100644 (file)
@@ -27,7 +27,6 @@ static void
 gk20a_fb_init(struct nvkm_fb *fb)
 {
        struct nvkm_device *device = fb->subdev.device;
-       nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
        nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
        nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
 }
@@ -36,6 +35,7 @@ static const struct nvkm_fb_func
 gk20a_fb = {
        .oneinit = gf100_fb_oneinit,
        .init = gk20a_fb_init,
+       .init_page = gf100_fb_init_page,
        .memtype_valid = gf100_fb_memtype_valid,
 };
 
index 4869fdb753c971f7cc35a880cd3bad06c3efa031..db699025f5464711c8b02972c32ea32ee5076a59 100644 (file)
@@ -29,6 +29,7 @@ gm107_fb = {
        .dtor = gf100_fb_dtor,
        .oneinit = gf100_fb_oneinit,
        .init = gf100_fb_init,
+       .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gm107_ram_new,
        .memtype_valid = gf100_fb_memtype_valid,
index 44f5716f64d88dfff577250b6a8be0b46350019c..62f653240be3103a76d58e506b8ab44acf2d8739 100644 (file)
 
 #include <core/memory.h>
 
+void
+gm200_fb_init_page(struct nvkm_fb *fb)
+{
+       struct nvkm_device *device = fb->subdev.device;
+       switch (fb->page) {
+       case 16:
+               nvkm_mask(device, 0x100c80, 0x00000801, 0x00000001);
+               break;
+       case 17:
+               nvkm_mask(device, 0x100c80, 0x00000801, 0x00000000);
+               break;
+       default:
+               nvkm_mask(device, 0x100c80, 0x00000800, 0x00000800);
+               fb->page = 0;
+               break;
+       }
+}
+
 static void
 gm200_fb_init(struct nvkm_fb *base)
 {
@@ -48,6 +66,7 @@ gm200_fb = {
        .dtor = gf100_fb_dtor,
        .oneinit = gf100_fb_oneinit,
        .init = gm200_fb_init,
+       .init_page = gm200_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gm107_ram_new,
        .memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
new file mode 100644 (file)
index 0000000..98474ae
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ram.h"
+
+#include <core/memory.h>
+
+static void
+gp100_fb_init_unkn(struct nvkm_fb *base)
+{
+       struct nvkm_device *device = gf100_fb(base)->base.subdev.device;
+       nvkm_wr32(device, 0x1fac80, nvkm_rd32(device, 0x100c80));
+       nvkm_wr32(device, 0x1facc4, nvkm_rd32(device, 0x100cc4));
+       nvkm_wr32(device, 0x1facc8, nvkm_rd32(device, 0x100cc8));
+       nvkm_wr32(device, 0x1faccc, nvkm_rd32(device, 0x100ccc));
+}
+
+void
+gp100_fb_init(struct nvkm_fb *base)
+{
+       struct gf100_fb *fb = gf100_fb(base);
+       struct nvkm_device *device = fb->base.subdev.device;
+
+       if (fb->r100c10_page)
+               nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
+
+       nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
+       nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
+       nvkm_mask(device, 0x100cc4, 0x00060000,
+                 max(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
+}
+
+static const struct nvkm_fb_func
+gp100_fb = {
+       .dtor = gf100_fb_dtor,
+       .oneinit = gf100_fb_oneinit,
+       .init = gp100_fb_init,
+       .init_page = gm200_fb_init_page,
+       .init_unkn = gp100_fb_init_unkn,
+       .ram_new = gp100_ram_new,
+       .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+       return gf100_fb_new_(&gp100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
new file mode 100644 (file)
index 0000000..92cb718
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ram.h"
+
+#include <core/memory.h>
+
+static const struct nvkm_fb_func
+gp104_fb = {
+       .dtor = gf100_fb_dtor,
+       .oneinit = gf100_fb_oneinit,
+       .init = gp100_fb_init,
+       .init_page = gm200_fb_init_page,
+       .ram_new = gp100_ram_new,
+       .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+       return gf100_fb_new_(&gp104_fb, device, index, pfb);
+}
index d97d640e60a0a32e5e92af83c116128ca8559dce..e905d44fa1d59a88bf349e8bbcd467f49d7a7a00 100644 (file)
@@ -8,6 +8,8 @@ struct nvkm_fb_func {
        void *(*dtor)(struct nvkm_fb *);
        int (*oneinit)(struct nvkm_fb *);
        void (*init)(struct nvkm_fb *);
+       void (*init_page)(struct nvkm_fb *);
+       void (*init_unkn)(struct nvkm_fb *);
        void (*intr)(struct nvkm_fb *);
 
        struct {
@@ -60,5 +62,8 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
                       u32 pitch, u32 flags, struct nvkm_fb_tile *);
 
 int gf100_fb_oneinit(struct nvkm_fb *);
+void gf100_fb_init_page(struct nvkm_fb *);
 bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
+
+void gm200_fb_init_page(struct nvkm_fb *);
 #endif
index f816cbf2ced32f02a310b112cee757024acb1ab3..b9ec0ae6723ad1c71b93802c50236d27d0bc12bc 100644 (file)
@@ -47,4 +47,5 @@ int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
new file mode 100644 (file)
index 0000000..f3be408
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "ram.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+
+static int
+gp100_ram_init(struct nvkm_ram *ram)
+{
+       struct nvkm_subdev *subdev = &ram->fb->subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvkm_bios *bios = device->bios;
+       u8  ver, hdr, cnt, len, snr, ssz;
+       u32 data;
+       int i;
+
+       /* run a bunch of tables from rammap table.  there's actually
+        * individual pointers for each rammap entry too, but, nvidia
+        * seem to just run the last two entries' scripts early on in
+        * their init, and never again.. we'll just run 'em all once
+        * for now.
+        *
+        * i strongly suspect that each script is for a separate mode
+        * (likely selected by 0x9a065c's lower bits?), and the
+        * binary driver skips the one that's already been setup by
+        * the init tables.
+        */
+       data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+       if (!data || hdr < 0x15)
+               return -EINVAL;
+
+       cnt  = nvbios_rd08(bios, data + 0x14); /* guess at count */
+       data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
+       if (cnt) {
+               u32 save = nvkm_rd32(device, 0x9a065c) & 0x000000f0;
+               for (i = 0; i < cnt; i++, data += 4) {
+                       if (i != save >> 4) {
+                               nvkm_mask(device, 0x9a065c, 0x000000f0, i << 4);
+                               nvbios_exec(&(struct nvbios_init) {
+                                               .subdev = subdev,
+                                               .bios = bios,
+                                               .offset = nvbios_rd32(bios, data),
+                                               .execute = 1,
+                                           });
+                       }
+               }
+               nvkm_mask(device, 0x9a065c, 0x000000f0, save);
+       }
+
+       nvkm_mask(device, 0x9a0584, 0x11000000, 0x00000000);
+       nvkm_wr32(device, 0x10ecc0, 0xffffffff);
+       nvkm_mask(device, 0x9a0160, 0x00000010, 0x00000010);
+       return 0;
+}
+
+static const struct nvkm_ram_func
+gp100_ram_func = {
+       .init = gp100_ram_init,
+       .get = gf100_ram_get,
+       .put = gf100_ram_put,
+};
+
+int
+gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+       struct nvkm_ram *ram;
+       struct nvkm_subdev *subdev = &fb->subdev;
+       struct nvkm_device *device = subdev->device;
+       enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
+       const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+       const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+       u32 fbpa_num = nvkm_rd32(device, 0x022438), fbpa;
+       u32 fbio_opt = nvkm_rd32(device, 0x021c14);
+       u64 part, size = 0, comm = ~0ULL;
+       bool mixed = false;
+       int ret;
+
+       nvkm_debug(subdev, "022438: %08x\n", fbpa_num);
+       nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
+       for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
+               if (!(fbio_opt & (1 << fbpa))) {
+                       part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
+                       nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part);
+                       part = part << 20;
+                       if (part != comm) {
+                               if (comm != ~0ULL)
+                                       mixed = true;
+                               comm = min(comm, part);
+                       }
+                       size = size + part;
+               }
+       }
+
+       ret = nvkm_ram_new_(&gp100_ram_func, fb, type, size, 0, &ram);
+       *pram = ram;
+       if (ret)
+               return ret;
+
+       nvkm_mm_fini(&ram->vram);
+
+       if (mixed) {
+               ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+                                  ((comm * fbpa_num) - rsvd_head) >>
+                                  NVKM_RAM_MM_SHIFT, 1);
+               if (ret)
+                       return ret;
+
+               ret = nvkm_mm_init(&ram->vram, (0x1000000000ULL + comm) >>
+                                  NVKM_RAM_MM_SHIFT,
+                                  (size - (comm * fbpa_num) - rsvd_tail) >>
+                                  NVKM_RAM_MM_SHIFT, 1);
+               if (ret)
+                       return ret;
+       } else {
+               ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+                                  (size - rsvd_head - rsvd_tail) >>
+                                  NVKM_RAM_MM_SHIFT, 1);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
index 323c79abe46855f45ec5e91a722324f93d183a8a..41bd5d0f76927e1e2f38ffe7e1d3c228a4d05139 100644 (file)
@@ -276,6 +276,8 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
                struct pwr_rail_t *r = &stbl.rail[i];
                struct nvkm_iccsense_rail *rail;
                struct nvkm_iccsense_sensor *sensor;
+               int (*read)(struct nvkm_iccsense *,
+                           struct nvkm_iccsense_rail *);
 
                if (!r->mode || r->resistor_mohm == 0)
                        continue;
@@ -284,31 +286,31 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
                if (!sensor)
                        continue;
 
-               rail = kmalloc(sizeof(*rail), GFP_KERNEL);
-               if (!rail)
-                       return -ENOMEM;
-
                switch (sensor->type) {
                case NVBIOS_EXTDEV_INA209:
                        if (r->rail != 0)
                                continue;
-                       rail->read = nvkm_iccsense_ina209_read;
+                       read = nvkm_iccsense_ina209_read;
                        break;
                case NVBIOS_EXTDEV_INA219:
                        if (r->rail != 0)
                                continue;
-                       rail->read = nvkm_iccsense_ina219_read;
+                       read = nvkm_iccsense_ina219_read;
                        break;
                case NVBIOS_EXTDEV_INA3221:
                        if (r->rail >= 3)
                                continue;
-                       rail->read = nvkm_iccsense_ina3221_read;
+                       read = nvkm_iccsense_ina3221_read;
                        break;
                default:
                        continue;
                }
 
+               rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+               if (!rail)
+                       return -ENOMEM;
                sensor->rail_mask |= 1 << r->rail;
+               rail->read = read;
                rail->sensor = sensor;
                rail->idx = r->rail;
                rail->mohm = r->resistor_mohm;
index 932b366598aa1a215d6408e2b1c198ab82c91236..12d6f4f102cb6d3c65244dcd1729bae36f89d9de 100644 (file)
@@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/ltc/gf100.o
 nvkm-y += nvkm/subdev/ltc/gk104.o
 nvkm-y += nvkm/subdev/ltc/gm107.o
 nvkm-y += nvkm/subdev/ltc/gm200.o
+nvkm-y += nvkm/subdev/ltc/gp100.o
index c9eb677967a84455d325f7eef9fc9f0128ca41b2..4a0fa0a9b80287b5f68b292764f985502a27277d 100644 (file)
@@ -23,7 +23,6 @@
  */
 #include "priv.h"
 
-#include <core/enum.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
 
@@ -71,7 +70,7 @@ gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
        nvkm_wr32(device, 0x17ea58, depth);
 }
 
-static const struct nvkm_bitfield
+const struct nvkm_bitfield
 gf100_ltc_lts_intr_name[] = {
        { 0x00000001, "IDLE_ERROR_IQ" },
        { 0x00000002, "IDLE_ERROR_CBC" },
index e292f5679418ccfdf23e53e9dbbec7b92b231539..ec0a3844b2d14995e342e0fdaa9fa638bdc17de8 100644 (file)
@@ -68,18 +68,22 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
        nvkm_wr32(device, 0x17e34c, depth);
 }
 
-static void
-gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
+void
+gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
 {
        struct nvkm_subdev *subdev = &ltc->subdev;
        struct nvkm_device *device = subdev->device;
-       u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
-       u32 stat = nvkm_rd32(device, base + 0x00c);
+       u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
+       u32 intr = nvkm_rd32(device, base + 0x00c);
+       u16 stat = intr & 0x0000ffff;
+       char msg[128];
 
        if (stat) {
-               nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat);
-               nvkm_wr32(device, base + 0x00c, stat);
+               nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
+               nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, intr, msg);
        }
+
+       nvkm_wr32(device, base + 0x00c, intr);
 }
 
 void
@@ -92,7 +96,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc)
        while (mask) {
                u32 s, c = __ffs(mask);
                for (s = 0; s < ltc->lts_nr; s++)
-                       gm107_ltc_lts_isr(ltc, c, s);
+                       gm107_ltc_intr_lts(ltc, c, s);
                mask &= ~(1 << c);
        }
 }
index 2a29bfd5125aef5c5f8aa97c7ce0bd91d61dfd94..e18e0dc19ec8b1f988c2ea30ec4e5d274af0b59f 100644 (file)
@@ -46,7 +46,7 @@ static const struct nvkm_ltc_func
 gm200_ltc = {
        .oneinit = gm200_ltc_oneinit,
        .init = gm200_ltc_init,
-       .intr = gm107_ltc_intr, /*XXX: not validated */
+       .intr = gm107_ltc_intr,
        .cbc_clear = gm107_ltc_cbc_clear,
        .cbc_wait = gm107_ltc_cbc_wait,
        .zbc = 16,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
new file mode 100644 (file)
index 0000000..0bdfb2f
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static void
+gp100_ltc_intr(struct nvkm_ltc *ltc)
+{
+       struct nvkm_device *device = ltc->subdev.device;
+       u32 mask;
+
+       mask = nvkm_rd32(device, 0x0001c0);
+       while (mask) {
+               u32 s, c = __ffs(mask);
+               for (s = 0; s < ltc->lts_nr; s++)
+                       gm107_ltc_intr_lts(ltc, c, s);
+               mask &= ~(1 << c);
+       }
+}
+
+static int
+gp100_ltc_oneinit(struct nvkm_ltc *ltc)
+{
+       struct nvkm_device *device = ltc->subdev.device;
+       ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
+       ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
+       /*XXX: tagram allocation - TBD */
+       return nvkm_mm_init(&ltc->tags, 0, 0, 1);
+}
+
+static void
+gp100_ltc_init(struct nvkm_ltc *ltc)
+{
+       /*XXX: PMU LS call to setup tagram address */
+}
+
+static const struct nvkm_ltc_func
+gp100_ltc = {
+       .oneinit = gp100_ltc_oneinit,
+       .init = gp100_ltc_init,
+       .intr = gp100_ltc_intr,
+       .cbc_clear = gm107_ltc_cbc_clear,
+       .cbc_wait = gm107_ltc_cbc_wait,
+       .zbc = 16,
+       .zbc_clear_color = gm107_ltc_zbc_clear_color,
+       .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
+       .invalidate = gf100_ltc_invalidate,
+       .flush = gf100_ltc_flush,
+};
+
+int
+gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+       return nvkm_ltc_new_(&gp100_ltc, device, index, pltc);
+}
index 6d81c695ed0d9f5312f85be51b6f1829ccee093b..8b95f96e3ffa1fad8cd26cf87a8b9f58ef872192 100644 (file)
@@ -2,6 +2,7 @@
 #define __NVKM_LTC_PRIV_H__
 #define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
 #include <subdev/ltc.h>
+#include <core/enum.h>
 
 int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
                  int index, struct nvkm_ltc **);
@@ -31,8 +32,10 @@ void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
 void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
 void gf100_ltc_invalidate(struct nvkm_ltc *);
 void gf100_ltc_flush(struct nvkm_ltc *);
+extern const struct nvkm_bitfield gf100_ltc_lts_intr_name[];
 
 void gm107_ltc_intr(struct nvkm_ltc *);
+void gm107_ltc_intr_lts(struct nvkm_ltc *, int ltc, int lts);
 void gm107_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
 void gm107_ltc_cbc_wait(struct nvkm_ltc *);
 void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
index 49695ac7be2e66331a8b96fae726fff2e17c613f..12943f92c20608e64f8f4b5a90921c35506bcce2 100644 (file)
@@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/mc/gt215.o
 nvkm-y += nvkm/subdev/mc/gf100.o
 nvkm-y += nvkm/subdev/mc/gk104.o
 nvkm-y += nvkm/subdev/mc/gk20a.o
+nvkm-y += nvkm/subdev/mc/gp100.o
index 350a8caa84c8dbd9b565cb2ea4b71a2b428bb116..6b25e25f9ebaeef8f5609ff635fb41cb77f9b2df 100644 (file)
 #include <subdev/top.h>
 
 void
-nvkm_mc_unk260(struct nvkm_mc *mc, u32 data)
+nvkm_mc_unk260(struct nvkm_device *device, u32 data)
 {
-       if (mc->func->unk260)
+       struct nvkm_mc *mc = device->mc;
+       if (likely(mc) && mc->func->unk260)
                mc->func->unk260(mc, data);
 }
 
 void
-nvkm_mc_intr_unarm(struct nvkm_mc *mc)
+nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en)
 {
-       return mc->func->intr_unarm(mc);
+       struct nvkm_mc *mc = device->mc;
+       const struct nvkm_mc_map *map;
+       if (likely(mc) && mc->func->intr_mask) {
+               u32 mask = nvkm_top_intr_mask(device, devidx);
+               for (map = mc->func->intr; !mask && map->stat; map++) {
+                       if (map->unit == devidx)
+                               mask = map->stat;
+               }
+               mc->func->intr_mask(mc, mask, en ? mask : 0);
+       }
+}
+
+void
+nvkm_mc_intr_unarm(struct nvkm_device *device)
+{
+       struct nvkm_mc *mc = device->mc;
+       if (likely(mc))
+               mc->func->intr_unarm(mc);
 }
 
 void
-nvkm_mc_intr_rearm(struct nvkm_mc *mc)
+nvkm_mc_intr_rearm(struct nvkm_device *device)
 {
-       return mc->func->intr_rearm(mc);
+       struct nvkm_mc *mc = device->mc;
+       if (likely(mc))
+               mc->func->intr_rearm(mc);
 }
 
 static u32
-nvkm_mc_intr_mask(struct nvkm_mc *mc)
+nvkm_mc_intr_stat(struct nvkm_mc *mc)
 {
-       u32 intr = mc->func->intr_mask(mc);
+       u32 intr = mc->func->intr_stat(mc);
        if (WARN_ON_ONCE(intr == 0xffffffff))
                intr = 0; /* likely fallen off the bus */
        return intr;
 }
 
 void
-nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
+nvkm_mc_intr(struct nvkm_device *device, bool *handled)
 {
-       struct nvkm_device *device = mc->subdev.device;
+       struct nvkm_mc *mc = device->mc;
        struct nvkm_subdev *subdev;
-       const struct nvkm_mc_map *map = mc->func->intr;
-       u32 stat, intr = nvkm_mc_intr_mask(mc);
+       const struct nvkm_mc_map *map;
+       u32 stat, intr;
        u64 subdevs;
 
-       stat = nvkm_top_intr(device->top, intr, &subdevs);
+       if (unlikely(!mc))
+               return;
+
+       intr = nvkm_mc_intr_stat(mc);
+       stat = nvkm_top_intr(device, intr, &subdevs);
        while (subdevs) {
                enum nvkm_devidx subidx = __ffs64(subdevs);
                subdev = nvkm_device_subdev(device, subidx);
@@ -72,14 +96,13 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
                subdevs &= ~BIT_ULL(subidx);
        }
 
-       while (map->stat) {
+       for (map = mc->func->intr; map->stat; map++) {
                if (intr & map->stat) {
                        subdev = nvkm_device_subdev(device, map->unit);
                        if (subdev)
                                nvkm_subdev_intr(subdev);
                        stat &= ~map->stat;
                }
-               map++;
        }
 
        if (stat)
@@ -87,22 +110,32 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
        *handled = intr != 0;
 }
 
-static void
-nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+static u32
+nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
+                  enum nvkm_devidx devidx)
 {
-       struct nvkm_device *device = mc->subdev.device;
+       struct nvkm_mc *mc = device->mc;
        const struct nvkm_mc_map *map;
-       u64 pmc_enable;
-
-       if (!(pmc_enable = nvkm_top_reset(device->top, devidx))) {
-               for (map = mc->func->reset; map && map->stat; map++) {
-                       if (map->unit == devidx) {
-                               pmc_enable = map->stat;
-                               break;
+       u64 pmc_enable = 0;
+       if (likely(mc)) {
+               if (!(pmc_enable = nvkm_top_reset(device, devidx))) {
+                       for (map = mc->func->reset; map && map->stat; map++) {
+                               if (!isauto || !map->noauto) {
+                                       if (map->unit == devidx) {
+                                               pmc_enable = map->stat;
+                                               break;
+                                       }
+                               }
                        }
                }
        }
+       return pmc_enable;
+}
 
+void
+nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
+{
+       u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx);
        if (pmc_enable) {
                nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
                nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
@@ -111,17 +144,27 @@ nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx)
 }
 
 void
-nvkm_mc_reset(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx)
 {
-       if (likely(mc))
-               nvkm_mc_reset_(mc, devidx);
+       u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+       if (pmc_enable)
+               nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
+}
+
+void
+nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
+{
+       u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+       if (pmc_enable) {
+               nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
+               nvkm_rd32(device, 0x000200);
+       }
 }
 
 static int
 nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-       struct nvkm_mc *mc = nvkm_mc(subdev);
-       nvkm_mc_intr_unarm(mc);
+       nvkm_mc_intr_unarm(subdev->device);
        return 0;
 }
 
@@ -131,7 +174,7 @@ nvkm_mc_init(struct nvkm_subdev *subdev)
        struct nvkm_mc *mc = nvkm_mc(subdev);
        if (mc->func->init)
                mc->func->init(mc);
-       nvkm_mc_intr_rearm(mc);
+       nvkm_mc_intr_rearm(subdev->device);
        return 0;
 }
 
@@ -148,16 +191,21 @@ nvkm_mc = {
        .fini = nvkm_mc_fini,
 };
 
+void
+nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device,
+            int index, struct nvkm_mc *mc)
+{
+       nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
+       mc->func = func;
+}
+
 int
 nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
             int index, struct nvkm_mc **pmc)
 {
        struct nvkm_mc *mc;
-
        if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
                return -ENOMEM;
-
-       nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
-       mc->func = func;
+       nvkm_mc_ctor(func, device, index, *pmc);
        return 0;
 }
index 5c85b47f071d3a81645a5e3a24cfed5dd667947f..c3d66ef5dc129877607cbbbce62f02b39054cbf7 100644 (file)
@@ -57,7 +57,7 @@ g84_mc = {
        .intr = g84_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = g84_mc_reset,
 };
 
index 0280b43cc10c9eb2dfacbe31aee636488443fca1..93ad4982ce5f656544ffdf3ca16f886a32a8f8c2 100644 (file)
@@ -57,7 +57,7 @@ g98_mc = {
        .intr = g98_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = g98_mc_reset,
 };
 
index 8397e223bd4305295cf714463b099f5c63a8b5ac..d2c4d6033abb0a96ec3851e1fa13cd2a1a648aea 100644 (file)
@@ -76,7 +76,7 @@ gf100_mc_intr_rearm(struct nvkm_mc *mc)
 }
 
 u32
-gf100_mc_intr_mask(struct nvkm_mc *mc)
+gf100_mc_intr_stat(struct nvkm_mc *mc)
 {
        struct nvkm_device *device = mc->subdev.device;
        u32 intr0 = nvkm_rd32(device, 0x000100);
@@ -84,6 +84,14 @@ gf100_mc_intr_mask(struct nvkm_mc *mc)
        return intr0 | intr1;
 }
 
+void
+gf100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat)
+{
+       struct nvkm_device *device = mc->subdev.device;
+       nvkm_mask(device, 0x000640, mask, stat);
+       nvkm_mask(device, 0x000644, mask, stat);
+}
+
 void
 gf100_mc_unk260(struct nvkm_mc *mc, u32 data)
 {
@@ -97,6 +105,7 @@ gf100_mc = {
        .intr_unarm = gf100_mc_intr_unarm,
        .intr_rearm = gf100_mc_intr_rearm,
        .intr_mask = gf100_mc_intr_mask,
+       .intr_stat = gf100_mc_intr_stat,
        .reset = gf100_mc_reset,
        .unk260 = gf100_mc_unk260,
 };
index 317464212c7d1eb75b28cf54dbb210912d2c9566..7b8c6ecad1a5d544d7012d1577cc7f8b03f8b06c 100644 (file)
@@ -26,6 +26,7 @@
 const struct nvkm_mc_map
 gk104_mc_reset[] = {
        { 0x00000100, NVKM_ENGINE_FIFO },
+       { 0x00002000, NVKM_SUBDEV_PMU, true },
        {}
 };
 
@@ -53,6 +54,7 @@ gk104_mc = {
        .intr_unarm = gf100_mc_intr_unarm,
        .intr_rearm = gf100_mc_intr_rearm,
        .intr_mask = gf100_mc_intr_mask,
+       .intr_stat = gf100_mc_intr_stat,
        .reset = gk104_mc_reset,
        .unk260 = gf100_mc_unk260,
 };
index 60b044f517ed5d70aec8d8f424322edf4afd821a..ca1bf3279dbe9a046c958cfe744e7f60fd07cfa8 100644 (file)
@@ -30,6 +30,7 @@ gk20a_mc = {
        .intr_unarm = gf100_mc_intr_unarm,
        .intr_rearm = gf100_mc_intr_rearm,
        .intr_mask = gf100_mc_intr_mask,
+       .intr_stat = gf100_mc_intr_stat,
        .reset = gk104_mc_reset,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
new file mode 100644 (file)
index 0000000..4d22f4a
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gp100_mc(p) container_of((p), struct gp100_mc, base)
+#include "priv.h"
+
+struct gp100_mc {
+       struct nvkm_mc base;
+       spinlock_t lock;
+       bool intr;
+       u32 mask;
+};
+
+static void
+gp100_mc_intr_update(struct gp100_mc *mc)
+{
+       struct nvkm_device *device = mc->base.subdev.device;
+       u32 mask = mc->intr ? mc->mask : 0, i;
+       for (i = 0; i < 2; i++) {
+               nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
+               nvkm_wr32(device, 0x000160 + (i * 0x04),  mask);
+       }
+}
+
+static void
+gp100_mc_intr_unarm(struct nvkm_mc *base)
+{
+       struct gp100_mc *mc = gp100_mc(base);
+       unsigned long flags;
+       spin_lock_irqsave(&mc->lock, flags);
+       mc->intr = false;
+       gp100_mc_intr_update(mc);
+       spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static void
+gp100_mc_intr_rearm(struct nvkm_mc *base)
+{
+       struct gp100_mc *mc = gp100_mc(base);
+       unsigned long flags;
+       spin_lock_irqsave(&mc->lock, flags);
+       mc->intr = true;
+       gp100_mc_intr_update(mc);
+       spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static void
+gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
+{
+       struct gp100_mc *mc = gp100_mc(base);
+       unsigned long flags;
+       spin_lock_irqsave(&mc->lock, flags);
+       mc->mask = (mc->mask & ~mask) | intr;
+       gp100_mc_intr_update(mc);
+       spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static const struct nvkm_mc_func
+gp100_mc = {
+       .init = nv50_mc_init,
+       .intr = gk104_mc_intr,
+       .intr_unarm = gp100_mc_intr_unarm,
+       .intr_rearm = gp100_mc_intr_rearm,
+       .intr_mask = gp100_mc_intr_mask,
+       .intr_stat = gf100_mc_intr_stat,
+       .reset = gk104_mc_reset,
+};
+
+int
+gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+       struct gp100_mc *mc;
+
+       if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+               return -ENOMEM;
+       nvkm_mc_ctor(&gp100_mc, device, index, &mc->base);
+       *pmc = &mc->base;
+
+       spin_lock_init(&mc->lock);
+       mc->intr = false;
+       mc->mask = 0x7fffffff;
+       return 0;
+}
index aad0ba95bf180118ea9c5430cd9be6abb9dde93d..99d50a3d956fc312b3d91ade85f3f36f2b944d22 100644 (file)
@@ -53,13 +53,20 @@ gt215_mc_intr[] = {
        {},
 };
 
+static void
+gt215_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat)
+{
+       nvkm_mask(mc->subdev.device, 0x000640, mask, stat);
+}
+
 static const struct nvkm_mc_func
 gt215_mc = {
        .init = nv50_mc_init,
        .intr = gt215_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_mask = gt215_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = gt215_mc_reset,
 };
 
index a062624e906b3396f4b5b74c3da704be4b1a9765..6509defd14609c1a9675424da7f45f3c6cf5ee8b 100644 (file)
@@ -56,7 +56,7 @@ nv04_mc_intr_rearm(struct nvkm_mc *mc)
 }
 
 u32
-nv04_mc_intr_mask(struct nvkm_mc *mc)
+nv04_mc_intr_stat(struct nvkm_mc *mc)
 {
        return nvkm_rd32(mc->subdev.device, 0x000100);
 }
@@ -75,7 +75,7 @@ nv04_mc = {
        .intr = nv04_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = nv04_mc_reset,
 };
 
index 55f0b9166b52cb207dc5973076b584630d8674b6..9213107901e6de32f51973f2d6d086e2eb6f2edc 100644 (file)
@@ -39,7 +39,7 @@ nv11_mc = {
        .intr = nv11_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = nv04_mc_reset,
 };
 
index c40fa67f79a51d3d122a77011824ad34e591e3f0..64bf5bbf8146cb80e6ace6f69bdca9da0e9c0d2a 100644 (file)
@@ -48,7 +48,7 @@ nv17_mc = {
        .intr = nv17_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = nv17_mc_reset,
 };
 
index cc56271db5643695add777bad34059c27d7f9b49..65fa44a64b985f12437fa3fa7473eaa704b98927 100644 (file)
@@ -43,7 +43,7 @@ nv44_mc = {
        .intr = nv17_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = nv17_mc_reset,
 };
 
index 343b6078580de8bee982c56c267db9d48a6858a0..fe93b4fd71002ccbfcab3505c0a6b13f4bf039c1 100644 (file)
@@ -50,7 +50,7 @@ nv50_mc = {
        .intr = nv50_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = nv17_mc_reset,
 };
 
index a1203811851238b462981a3d5e1a38717926ee33..4f0576a06d2454489ffdcce6d3b601a286653a98 100644 (file)
@@ -3,12 +3,15 @@
 #define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
 #include <subdev/mc.h>
 
+void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *,
+                 int index, struct nvkm_mc *);
 int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
                 int index, struct nvkm_mc **);
 
 struct nvkm_mc_map {
        u32 stat;
        u32 unit;
+       bool noauto;
 };
 
 struct nvkm_mc_func {
@@ -18,8 +21,10 @@ struct nvkm_mc_func {
        void (*intr_unarm)(struct nvkm_mc *);
        /* enable reporting of interrupts to host */
        void (*intr_rearm)(struct nvkm_mc *);
+       /* (un)mask delivery of specific interrupts */
+       void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
        /* retrieve pending interrupt mask (NV_PMC_INTR) */
-       u32 (*intr_mask)(struct nvkm_mc *);
+       u32 (*intr_stat)(struct nvkm_mc *);
        const struct nvkm_mc_map *reset;
        void (*unk260)(struct nvkm_mc *, u32);
 };
@@ -27,7 +32,7 @@ struct nvkm_mc_func {
 void nv04_mc_init(struct nvkm_mc *);
 void nv04_mc_intr_unarm(struct nvkm_mc *);
 void nv04_mc_intr_rearm(struct nvkm_mc *);
-u32 nv04_mc_intr_mask(struct nvkm_mc *);
+u32 nv04_mc_intr_stat(struct nvkm_mc *);
 extern const struct nvkm_mc_map nv04_mc_reset[];
 
 extern const struct nvkm_mc_map nv17_mc_intr[];
@@ -39,7 +44,8 @@ void nv50_mc_init(struct nvkm_mc *);
 
 void gf100_mc_intr_unarm(struct nvkm_mc *);
 void gf100_mc_intr_rearm(struct nvkm_mc *);
-u32 gf100_mc_intr_mask(struct nvkm_mc *);
+void gf100_mc_intr_mask(struct nvkm_mc *, u32, u32);
+u32 gf100_mc_intr_stat(struct nvkm_mc *);
 void gf100_mc_unk260(struct nvkm_mc *, u32);
 
 extern const struct nvkm_mc_map gk104_mc_intr[];
index 3c2519fdeb813afcdab2a20c32bf92bf1add6f67..2a31b7d66a6ddc852aee66cdaae4b196e9a86419 100644 (file)
@@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/pci/g94.o
 nvkm-y += nvkm/subdev/pci/gf100.o
 nvkm-y += nvkm/subdev/pci/gf106.o
 nvkm-y += nvkm/subdev/pci/gk104.o
+nvkm-y += nvkm/subdev/pci/gp100.o
index 6b0328bd7eede2ee2d43d55152f0b6022218f76b..eb9b278198b229672df91bf6426432ee7d6f9f65 100644 (file)
@@ -69,15 +69,13 @@ static irqreturn_t
 nvkm_pci_intr(int irq, void *arg)
 {
        struct nvkm_pci *pci = arg;
-       struct nvkm_mc *mc = pci->subdev.device->mc;
+       struct nvkm_device *device = pci->subdev.device;
        bool handled = false;
-       if (likely(mc)) {
-               nvkm_mc_intr_unarm(mc);
-               if (pci->msi)
-                       pci->func->msi_rearm(pci);
-               nvkm_mc_intr(mc, &handled);
-               nvkm_mc_intr_rearm(mc);
-       }
+       nvkm_mc_intr_unarm(device);
+       if (pci->msi)
+               pci->func->msi_rearm(pci);
+       nvkm_mc_intr(device, &handled);
+       nvkm_mc_intr_rearm(device);
        return handled ? IRQ_HANDLED : IRQ_NONE;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
new file mode 100644 (file)
index 0000000..82c5234
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+gp100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+       nvkm_pci_wr32(pci, 0x0704, 0x00000000);
+}
+
+static const struct nvkm_pci_func
+gp100_pci_func = {
+       .rd32 = nv40_pci_rd32,
+       .wr08 = nv40_pci_wr08,
+       .wr32 = nv40_pci_wr32,
+       .msi_rearm = gp100_pci_msi_rearm,
+};
+
+int
+gp100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+       return nvkm_pci_new_(&gp100_pci_func, device, index, ppci);
+}
index 213fdba6cfa0c8813cdf7276ed3c9dc72e2c40d6..314be2192b7df0451e1f566d3f6897682b3c5573 100644 (file)
@@ -19,8 +19,9 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-
 #include "priv.h"
+
+#include <subdev/mc.h>
 #include <subdev/timer.h>
 
 static const char *
@@ -70,12 +71,11 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
        int ret;
 
        /* enable engine */
-       nvkm_mask(device, 0x200, sb->enable_mask, sb->enable_mask);
-       nvkm_rd32(device, 0x200);
+       nvkm_mc_enable(device, sb->devidx);
        ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0);
        if (ret < 0) {
-               nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
                nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n");
+               nvkm_mc_disable(device, sb->devidx);
                return ret;
        }
 
@@ -85,8 +85,7 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
 
        /* enable IRQs */
        nvkm_wr32(device, sb->base + 0x010, 0xff);
-       nvkm_mask(device, 0x640, sb->irq_mask, sb->irq_mask);
-       nvkm_mask(device, 0x644, sb->irq_mask, sb->irq_mask);
+       nvkm_mc_intr_mask(device, sb->devidx, true);
 
        return 0;
 }
@@ -97,14 +96,13 @@ nvkm_secboot_falcon_disable(struct nvkm_secboot *sb)
        struct nvkm_device *device = sb->subdev.device;
 
        /* disable IRQs and wait for any previous code to complete */
-       nvkm_mask(device, 0x644, sb->irq_mask, 0x0);
-       nvkm_mask(device, 0x640, sb->irq_mask, 0x0);
+       nvkm_mc_intr_mask(device, sb->devidx, false);
        nvkm_wr32(device, sb->base + 0x014, 0xff);
 
        falcon_wait_idle(device, sb->base);
 
        /* disable engine */
-       nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
+       nvkm_mc_disable(device, sb->devidx);
 
        return 0;
 }
@@ -216,14 +214,7 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
                return ret;
        }
 
-       /*
-        * Build all blobs - the same blobs can be used to perform secure boot
-        * multiple times
-        */
-       if (sb->func->prepare_blobs)
-               ret = sb->func->prepare_blobs(sb);
-
-       return ret;
+       return 0;
 }
 
 static int
@@ -270,9 +261,8 @@ nvkm_secboot_ctor(const struct nvkm_secboot_func *func,
        /* setup the performing falcon's base address and masks */
        switch (func->boot_falcon) {
        case NVKM_SECBOOT_FALCON_PMU:
+               sb->devidx = NVKM_SUBDEV_PMU;
                sb->base = 0x10a000;
-               sb->irq_mask = 0x1000000;
-               sb->enable_mask = 0x2000;
                break;
        default:
                nvkm_error(&sb->subdev, "invalid secure boot falcon\n");
index cc100dc940eaa091bc964a52b9031c5b3c75b6ff..f1e2dc914366d6bf2d10b5c18eb530ac6c4124ce 100644 (file)
@@ -860,6 +860,8 @@ gm200_secboot_prepare_ls_blob(struct gm200_secboot *gsb)
 
        /* Write LS blob */
        ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob);
+       if (ret)
+               nvkm_gpuobj_del(&gsb->ls_blob);
 
 cleanup:
        ls_ucode_mgr_cleanup(&mgr);
@@ -1023,29 +1025,34 @@ gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb)
        int ret;
 
        /* Load and prepare the managed falcon's firmwares */
-       ret = gm200_secboot_prepare_ls_blob(gsb);
-       if (ret)
-               return ret;
+       if (!gsb->ls_blob) {
+               ret = gm200_secboot_prepare_ls_blob(gsb);
+               if (ret)
+                       return ret;
+       }
 
        /* Load the HS firmware that will load the LS firmwares */
-       ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
-                                           &gsb->acr_load_blob,
-                                           &gsb->acr_load_bl_desc, true);
-       if (ret)
-               return ret;
+       if (!gsb->acr_load_blob) {
+               ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
+                                               &gsb->acr_load_blob,
+                                               &gsb->acr_load_bl_desc, true);
+               if (ret)
+                       return ret;
+       }
 
        /* Load the HS firmware bootloader */
-       ret = gm200_secboot_prepare_hsbl_blob(gsb);
-       if (ret)
-               return ret;
+       if (!gsb->hsbl_blob) {
+               ret = gm200_secboot_prepare_hsbl_blob(gsb);
+               if (ret)
+                       return ret;
+       }
 
        return 0;
 }
 
 static int
-gm200_secboot_prepare_blobs(struct nvkm_secboot *sb)
+gm200_secboot_prepare_blobs(struct gm200_secboot *gsb)
 {
-       struct gm200_secboot *gsb = gm200_secboot(sb);
        int ret;
 
        ret = gm20x_secboot_prepare_blobs(gsb);
@@ -1053,15 +1060,37 @@ gm200_secboot_prepare_blobs(struct nvkm_secboot *sb)
                return ret;
 
        /* dGPU only: load the HS firmware that unprotects the WPR region */
-       ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
-                                           &gsb->acr_unload_blob,
-                                           &gsb->acr_unload_bl_desc, false);
-       if (ret)
-               return ret;
+       if (!gsb->acr_unload_blob) {
+               ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
+                                              &gsb->acr_unload_blob,
+                                              &gsb->acr_unload_bl_desc, false);
+               if (ret)
+                       return ret;
+       }
 
        return 0;
 }
 
+static int
+gm200_secboot_blobs_ready(struct gm200_secboot *gsb)
+{
+       struct nvkm_subdev *subdev = &gsb->base.subdev;
+       int ret;
+
+       /* firmware already loaded, nothing to do... */
+       if (gsb->firmware_ok)
+               return 0;
+
+       ret = gsb->func->prepare_blobs(gsb);
+       if (ret) {
+               nvkm_error(subdev, "failed to load secure firmware\n");
+               return ret;
+       }
+
+       gsb->firmware_ok = true;
+
+       return 0;
+}
 
 
 /*
@@ -1234,6 +1263,11 @@ gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
        struct gm200_secboot *gsb = gm200_secboot(sb);
        int ret;
 
+       /* Make sure all blobs are ready */
+       ret = gm200_secboot_blobs_ready(gsb);
+       if (ret)
+               return ret;
+
        /*
         * Dummy GM200 implementation: perform secure boot each time we are
         * called on FECS. Since only FECS and GPCCS are managed and started
@@ -1373,7 +1407,6 @@ gm200_secboot = {
        .dtor = gm200_secboot_dtor,
        .init = gm200_secboot_init,
        .fini = gm200_secboot_fini,
-       .prepare_blobs = gm200_secboot_prepare_blobs,
        .reset = gm200_secboot_reset,
        .start = gm200_secboot_start,
        .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) |
@@ -1415,6 +1448,7 @@ gm200_secboot_func = {
        .bl_desc_size = sizeof(struct gm200_flcn_bl_desc),
        .fixup_bl_desc = gm200_secboot_fixup_bl_desc,
        .fixup_hs_desc = gm200_secboot_fixup_hs_desc,
+       .prepare_blobs = gm200_secboot_prepare_blobs,
 };
 
 int
@@ -1487,3 +1521,19 @@ MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
 MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
 MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
 MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
index 684320484b70186bb361e4357f2d458c0a2562d9..d5395ebfe8d345e1b1114d7f12caaa360973c108 100644 (file)
@@ -42,6 +42,32 @@ struct gm20b_flcn_bl_desc {
        u32 data_size;
 };
 
+static int
+gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb)
+{
+       struct nvkm_subdev *subdev = &gsb->base.subdev;
+       int acr_size;
+       int ret;
+
+       ret = gm20x_secboot_prepare_blobs(gsb);
+       if (ret)
+               return ret;
+
+       acr_size = gsb->acr_load_blob->size;
+       /*
+        * On Tegra the WPR region is set by the bootloader. It is illegal for
+        * the HS blob to be larger than this region.
+        */
+       if (acr_size > gsb->wpr_size) {
+               nvkm_error(subdev, "WPR region too small for FW blob!\n");
+               nvkm_error(subdev, "required: %dB\n", acr_size);
+               nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size);
+               return -ENOSPC;
+       }
+
+       return 0;
+}
+
 /**
  * gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW
  *
@@ -88,6 +114,7 @@ gm20b_secboot_func = {
        .bl_desc_size = sizeof(struct gm20b_flcn_bl_desc),
        .fixup_bl_desc = gm20b_secboot_fixup_bl_desc,
        .fixup_hs_desc = gm20b_secboot_fixup_hs_desc,
+       .prepare_blobs = gm20b_secboot_prepare_blobs,
 };
 
 
@@ -146,32 +173,6 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
 }
 #endif
 
-static int
-gm20b_secboot_prepare_blobs(struct nvkm_secboot *sb)
-{
-       struct gm200_secboot *gsb = gm200_secboot(sb);
-       int acr_size;
-       int ret;
-
-       ret = gm20x_secboot_prepare_blobs(gsb);
-       if (ret)
-               return ret;
-
-       acr_size = gsb->acr_load_blob->size;
-       /*
-        * On Tegra the WPR region is set by the bootloader. It is illegal for
-        * the HS blob to be larger than this region.
-        */
-       if (acr_size > gsb->wpr_size) {
-               nvkm_error(&sb->subdev, "WPR region too small for FW blob!\n");
-               nvkm_error(&sb->subdev, "required: %dB\n", acr_size);
-               nvkm_error(&sb->subdev, "WPR size: %dB\n", gsb->wpr_size);
-               return -ENOSPC;
-       }
-
-       return 0;
-}
-
 static int
 gm20b_secboot_init(struct nvkm_secboot *sb)
 {
@@ -189,7 +190,6 @@ static const struct nvkm_secboot_func
 gm20b_secboot = {
        .dtor = gm200_secboot_dtor,
        .init = gm20b_secboot_init,
-       .prepare_blobs = gm20b_secboot_prepare_blobs,
        .reset = gm200_secboot_reset,
        .start = gm200_secboot_start,
        .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS),
index f2b09dee7c5d1eead4d037b62443f3c2ede2bc93..a9a8a0e1017e8921187e5b70074ea794e5e93222 100644 (file)
@@ -30,7 +30,6 @@ struct nvkm_secboot_func {
        int (*init)(struct nvkm_secboot *);
        int (*fini)(struct nvkm_secboot *, bool suspend);
        void *(*dtor)(struct nvkm_secboot *);
-       int (*prepare_blobs)(struct nvkm_secboot *);
        int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
        int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
 
@@ -147,10 +146,8 @@ struct hsflcn_acr_desc {
  * @inst:              instance block for HS falcon
  * @pgd:               page directory for the HS falcon
  * @vm:                        address space used by the HS falcon
- * @bl_desc_size:      size of the BL descriptor used by this chip.
- * @fixup_bl_desc:     hook that generates the proper BL descriptor format from
- *                     the generic GM200 format into a data array of size
- *                     bl_desc_size
+ * @falcon_state:      current state of the managed falcons
+ * @firmware_ok:       whether the firmware blobs have been created
  */
 struct gm200_secboot {
        struct nvkm_secboot base;
@@ -196,9 +193,19 @@ struct gm200_secboot {
                RUNNING,
        } falcon_state[NVKM_SECBOOT_FALCON_END];
 
+       bool firmware_ok;
 };
 #define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
 
+/**
+ * Contains functions we wish to abstract between GM200-like implementations
+ * @bl_desc_size:      size of the BL descriptor used by this chip.
+ * @fixup_bl_desc:     hook that generates the proper BL descriptor format from
+ *                     the generic GM200 format into a data array of size
+ *                     bl_desc_size
+ * @fixup_hs_desc:     hook that twiddles the HS descriptor before it is used
+ * @prepare_blobs:     prepares the various blobs needed for secure booting
+ */
 struct gm200_secboot_func {
        /*
         * Size of the bootloader descriptor for this chip. A block of this
@@ -214,6 +221,7 @@ struct gm200_secboot_func {
         * we want the HS FW to set up.
         */
        void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *);
+       int (*prepare_blobs)(struct gm200_secboot *);
 };
 
 int gm200_secboot_init(struct nvkm_secboot *);
index a1b264664aad9cb2e4785a873cb984b647c27c50..fe063d5728e21b600556cfb6724cde8d979800c9 100644 (file)
@@ -41,8 +41,9 @@ nvkm_top_device_new(struct nvkm_top *top)
 }
 
 u32
-nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index)
+nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
 {
+       struct nvkm_top *top = device->top;
        struct nvkm_top_device *info;
 
        if (top) {
@@ -56,8 +57,25 @@ nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index)
 }
 
 u32
-nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs)
+nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
 {
+       struct nvkm_top *top = device->top;
+       struct nvkm_top_device *info;
+
+       if (top) {
+               list_for_each_entry(info, &top->device, head) {
+                       if (info->index == devidx && info->intr >= 0)
+                               return BIT(info->intr);
+               }
+       }
+
+       return 0;
+}
+
+u32
+nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
+{
+       struct nvkm_top *top = device->top;
        struct nvkm_top_device *info;
        u64 subdevs = 0;
        u32 handled = 0;
@@ -78,8 +96,9 @@ nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs)
 }
 
 enum nvkm_devidx
-nvkm_top_fault(struct nvkm_top *top, int fault)
+nvkm_top_fault(struct nvkm_device *device, int fault)
 {
+       struct nvkm_top *top = device->top;
        struct nvkm_top_device *info;
 
        list_for_each_entry(info, &top->device, head) {
@@ -91,8 +110,9 @@ nvkm_top_fault(struct nvkm_top *top, int fault)
 }
 
 enum nvkm_devidx
-nvkm_top_engine(struct nvkm_top *top, int index, int *runl, int *engn)
+nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn)
 {
+       struct nvkm_top *top = device->top;
        struct nvkm_top_device *info;
        int n = 0;
 
index e06acc340e99b8c5720fc379808e3f364d9c216a..efac3402f9ddb0663391197dc4cdfea879931c60 100644 (file)
@@ -29,7 +29,7 @@ gk104_top_oneinit(struct nvkm_top *top)
        struct nvkm_subdev *subdev = &top->subdev;
        struct nvkm_device *device = subdev->device;
        struct nvkm_top_device *info = NULL;
-       u32 data, type;
+       u32 data, type, inst;
        int i;
 
        for (i = 0; i < 64; i++) {
@@ -37,6 +37,7 @@ gk104_top_oneinit(struct nvkm_top *top)
                        if (!(info = nvkm_top_device_new(top)))
                                return -ENOMEM;
                        type = ~0;
+                       inst = 0;
                }
 
                data = nvkm_rd32(device, 0x022700 + (i * 0x04));
@@ -45,6 +46,7 @@ gk104_top_oneinit(struct nvkm_top *top)
                case 0x00000000: /* NOT_VALID */
                        continue;
                case 0x00000001: /* DATA */
+                       inst        = (data & 0x3c000000) >> 26;
                        info->addr  = (data & 0x00fff000);
                        info->fault = (data & 0x000000f8) >> 3;
                        break;
@@ -67,27 +69,32 @@ gk104_top_oneinit(struct nvkm_top *top)
                        continue;
 
                /* Translate engine type to NVKM engine identifier. */
+#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A
+#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1)      \
+               info->index = NVKM_ENGINE_##A##0 + inst
                switch (type) {
-               case 0x00000000: info->index = NVKM_ENGINE_GR; break;
-               case 0x00000001: info->index = NVKM_ENGINE_CE0; break;
-               case 0x00000002: info->index = NVKM_ENGINE_CE1; break;
-               case 0x00000003: info->index = NVKM_ENGINE_CE2; break;
-               case 0x00000008: info->index = NVKM_ENGINE_MSPDEC; break;
-               case 0x00000009: info->index = NVKM_ENGINE_MSPPP; break;
-               case 0x0000000a: info->index = NVKM_ENGINE_MSVLD; break;
-               case 0x0000000b: info->index = NVKM_ENGINE_MSENC; break;
-               case 0x0000000c: info->index = NVKM_ENGINE_VIC; break;
-               case 0x0000000d: info->index = NVKM_ENGINE_SEC; break;
-               case 0x0000000e: info->index = NVKM_ENGINE_NVENC0; break;
-               case 0x0000000f: info->index = NVKM_ENGINE_NVENC1; break;
-               case 0x00000010: info->index = NVKM_ENGINE_NVDEC; break;
+               case 0x00000000: A_(GR    ); break;
+               case 0x00000001: A_(CE0   ); break;
+               case 0x00000002: A_(CE1   ); break;
+               case 0x00000003: A_(CE2   ); break;
+               case 0x00000008: A_(MSPDEC); break;
+               case 0x00000009: A_(MSPPP ); break;
+               case 0x0000000a: A_(MSVLD ); break;
+               case 0x0000000b: A_(MSENC ); break;
+               case 0x0000000c: A_(VIC   ); break;
+               case 0x0000000d: A_(SEC   ); break;
+               case 0x0000000e: B_(NVENC ); break;
+               case 0x0000000f: A_(NVENC1); break;
+               case 0x00000010: A_(NVDEC ); break;
+               case 0x00000013: B_(CE    ); break;
                        break;
                default:
                        break;
                }
 
-               nvkm_debug(subdev, "%02x (%8s): addr %06x fault %2d engine %2d "
-                                  "runlist %2d intr %2d reset %2d\n", type,
+               nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
+                                  "engine %2d runlist %2d intr %2d "
+                                  "reset %2d\n", type, inst,
                           info->index == NVKM_SUBDEV_NR ? NULL :
                                          nvkm_subdev_name[info->index],
                           info->addr, info->fault, info->engine, info->runlist,
index 6b2d7531a7ff1beff1ee943745a0d5328e108940..1c3d23b0e84ad063c0c11310828327e232ac03bd 100644 (file)
@@ -120,6 +120,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
 
        data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
        if (data && info.vidmask && info.base && info.step) {
+               volt->min_uv = info.min;
+               volt->max_uv = info.max;
                for (i = 0; i < info.vidmask + 1; i++) {
                        if (info.base >= info.min &&
                                info.base <= info.max) {
@@ -131,6 +133,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
                }
                volt->vid_mask = info.vidmask;
        } else if (data && info.vidmask) {
+               volt->min_uv = 0xffffffff;
+               volt->max_uv = 0;
                for (i = 0; i < cnt; i++) {
                        data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
                                                       &ivid);
@@ -138,9 +142,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
                                volt->vid[volt->vid_nr].uv = ivid.voltage;
                                volt->vid[volt->vid_nr].vid = ivid.vid;
                                volt->vid_nr++;
+                               volt->min_uv = min(volt->min_uv, ivid.voltage);
+                               volt->max_uv = max(volt->max_uv, ivid.voltage);
                        }
                }
                volt->vid_mask = info.vidmask;
+       } else if (data && info.type == NVBIOS_VOLT_PWM) {
+               volt->min_uv = info.base;
+               volt->max_uv = info.base + info.pwm_range;
        }
 }
 
@@ -181,8 +190,11 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
        volt->func = func;
 
        /* Assuming the non-bios device should build the voltage table later */
-       if (bios)
+       if (bios) {
                nvkm_volt_parse_bios(bios, volt);
+               nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
+                          volt->min_uv, volt->max_uv);
+       }
 
        if (volt->vid_nr) {
                for (i = 0; i < volt->vid_nr; i++) {
index d554455326da65c0c879b36525f7ed40072fae1d..ce5d83cdc7cf7900cf86060750b9398ef24063d9 100644 (file)
@@ -77,18 +77,19 @@ gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale,
        return mv;
 }
 
-int
+static int
 gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
 {
+       static const int v_scale = 1000;
        int mv;
 
        mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef);
-       mv = DIV_ROUND_UP(mv, 1000);
+       mv = DIV_ROUND_UP(mv, v_scale);
 
        return mv * 1000;
 }
 
-int
+static int
 gk20a_volt_vid_get(struct nvkm_volt *base)
 {
        struct gk20a_volt *volt = gk20a_volt(base);
@@ -103,7 +104,7 @@ gk20a_volt_vid_get(struct nvkm_volt *base)
        return -EINVAL;
 }
 
-int
+static int
 gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
 {
        struct gk20a_volt *volt = gk20a_volt(base);
@@ -113,7 +114,7 @@ gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
        return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
 }
 
-int
+static int
 gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
 {
        struct gk20a_volt *volt = gk20a_volt(base);
@@ -143,9 +144,9 @@ gk20a_volt = {
 };
 
 int
-_gk20a_volt_ctor(struct nvkm_device *device, int index,
-                const struct cvb_coef *coefs, int nb_coefs,
-                struct gk20a_volt *volt)
+gk20a_volt_ctor(struct nvkm_device *device, int index,
+               const struct cvb_coef *coefs, int nb_coefs,
+               int vmin, struct gk20a_volt *volt)
 {
        struct nvkm_device_tegra *tdev = device->func->tegra(device);
        int i, uv;
@@ -160,9 +161,9 @@ _gk20a_volt_ctor(struct nvkm_device *device, int index,
        volt->base.vid_nr = nb_coefs;
        for (i = 0; i < volt->base.vid_nr; i++) {
                volt->base.vid[i].vid = i;
-               volt->base.vid[i].uv =
-                       gk20a_volt_calc_voltage(&coefs[i],
-                                               tdev->gpu_speedo);
+               volt->base.vid[i].uv = max(
+                       gk20a_volt_calc_voltage(&coefs[i], tdev->gpu_speedo),
+                       vmin);
                nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
                           volt->base.vid[i].vid, volt->base.vid[i].uv);
        }
@@ -180,6 +181,6 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
                return -ENOMEM;
        *pvolt = &volt->base;
 
-       return _gk20a_volt_ctor(device, index, gk20a_cvb_coef,
-                               ARRAY_SIZE(gk20a_cvb_coef), volt);
+       return gk20a_volt_ctor(device, index, gk20a_cvb_coef,
+                              ARRAY_SIZE(gk20a_cvb_coef), 0, volt);
 }
index 0fa3b502bcf802c995070bf3a150821c501bd047..6a6c97f9684e0c1e560d03209fbed77755ac4dac 100644 (file)
@@ -37,13 +37,8 @@ struct gk20a_volt {
        struct regulator *vdd;
 };
 
-int _gk20a_volt_ctor(struct nvkm_device *device, int index,
-                    const struct cvb_coef *coefs, int nb_coefs,
-                    struct gk20a_volt *volt);
-
-int gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo);
-int gk20a_volt_vid_get(struct nvkm_volt *volt);
-int gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid);
-int gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition);
+int gk20a_volt_ctor(struct nvkm_device *device, int index,
+                   const struct cvb_coef *coefs, int nb_coefs,
+                   int vmin, struct gk20a_volt *volt);
 
 #endif
index 49b5ecb701e4c1ba91a1edc20a59e37c8d3344f5..74db4d28930fd410994041a2e927c34d6876cbac 100644 (file)
@@ -41,16 +41,52 @@ const struct cvb_coef gm20b_cvb_coef[] = {
        /* 921600 */ { 2647676, -106455, 1632 },
 };
 
+static const struct cvb_coef gm20b_na_cvb_coef[] = {
+       /* KHz,         c0,     c1,   c2,    c3,     c4,   c5 */
+       /*  76800 */ {  814294, 8144, -940, 808, -21583, 226 },
+       /* 153600 */ {  856185, 8144, -940, 808, -21583, 226 },
+       /* 230400 */ {  898077, 8144, -940, 808, -21583, 226 },
+       /* 307200 */ {  939968, 8144, -940, 808, -21583, 226 },
+       /* 384000 */ {  981860, 8144, -940, 808, -21583, 226 },
+       /* 460800 */ { 1023751, 8144, -940, 808, -21583, 226 },
+       /* 537600 */ { 1065642, 8144, -940, 808, -21583, 226 },
+       /* 614400 */ { 1107534, 8144, -940, 808, -21583, 226 },
+       /* 691200 */ { 1149425, 8144, -940, 808, -21583, 226 },
+       /* 768000 */ { 1191317, 8144, -940, 808, -21583, 226 },
+       /* 844800 */ { 1233208, 8144, -940, 808, -21583, 226 },
+       /* 921600 */ { 1275100, 8144, -940, 808, -21583, 226 },
+       /* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 },
+};
+
+const u32 speedo_to_vmin[] = {
+       /*   0,      1,      2,      3,      4, */
+       950000, 840000, 818750, 840000, 810000,
+};
+
 int
 gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
 {
+       struct nvkm_device_tegra *tdev = device->func->tegra(device);
        struct gk20a_volt *volt;
+       u32 vmin;
+
+       if (tdev->gpu_speedo_id >= ARRAY_SIZE(speedo_to_vmin)) {
+               nvdev_error(device, "unsupported speedo %d\n",
+                           tdev->gpu_speedo_id);
+               return -EINVAL;
+       }
 
        volt = kzalloc(sizeof(*volt), GFP_KERNEL);
        if (!volt)
                return -ENOMEM;
        *pvolt = &volt->base;
 
-       return _gk20a_volt_ctor(device, index, gm20b_cvb_coef,
-                               ARRAY_SIZE(gm20b_cvb_coef), volt);
+       vmin = speedo_to_vmin[tdev->gpu_speedo_id];
+
+       if (tdev->gpu_speedo_id >= 1)
+               return gk20a_volt_ctor(device, index, gm20b_na_cvb_coef,
+                                    ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
+       else
+               return gk20a_volt_ctor(device, index, gm20b_cvb_coef,
+                                       ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
 }
index 6f45e9d00b41ae523100e41ea17a21585e10808f..e1be5e795cd807aadb0cff143ca526fb8612440b 100644 (file)
@@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
 {
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct regulator *vdds_dsi;
-       int r;
 
        if (dsi->vdds_dsi_reg != NULL)
                return 0;
index 061f9bab4c9b8098190f43fbc06d5bd26631b157..0c0a5139a30103dd6686e2964102d83eab08fb00 100644 (file)
@@ -120,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
 
 static int hdmi_init_regulator(void)
 {
-       int r;
        struct regulator *reg;
 
        if (hdmi.vdda_reg != NULL)
index 3a7bdf1c842b8f2b3ee9215a5a542dc7f76e3ab5..85143d1b9b3168062aef554e7041956cf4f62ec0 100644 (file)
@@ -168,6 +168,7 @@ static int panel_simple_disable(struct drm_panel *panel)
 
        if (p->backlight) {
                p->backlight->props.power = FB_BLANK_POWERDOWN;
+               p->backlight->props.state |= BL_CORE_FBBLANK;
                backlight_update_status(p->backlight);
        }
 
@@ -235,6 +236,7 @@ static int panel_simple_enable(struct drm_panel *panel)
                msleep(p->desc->delay.enable);
 
        if (p->backlight) {
+               p->backlight->props.state &= ~BL_CORE_FBBLANK;
                p->backlight->props.power = FB_BLANK_UNBLANK;
                backlight_update_status(p->backlight);
        }
@@ -964,8 +966,8 @@ static const struct panel_desc innolux_zj070na_01p = {
        .num_modes = 1,
        .bpc = 6,
        .size = {
-               .width = 1024,
-               .height = 600,
+               .width = 154,
+               .height = 90,
        },
 };
 
@@ -1017,6 +1019,51 @@ static const struct panel_desc lg_lb070wv8 = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
 };
 
+static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
+       .clock = 200000,
+       .hdisplay = 1536,
+       .hsync_start = 1536 + 12,
+       .hsync_end = 1536 + 12 + 16,
+       .htotal = 1536 + 12 + 16 + 48,
+       .vdisplay = 2048,
+       .vsync_start = 2048 + 8,
+       .vsync_end = 2048 + 8 + 4,
+       .vtotal = 2048 + 8 + 4 + 8,
+       .vrefresh = 60,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc lg_lp079qx1_sp0v = {
+       .modes = &lg_lp079qx1_sp0v_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 129,
+               .height = 171,
+       },
+};
+
+static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
+       .clock = 205210,
+       .hdisplay = 2048,
+       .hsync_start = 2048 + 150,
+       .hsync_end = 2048 + 150 + 5,
+       .htotal = 2048 + 150 + 5 + 5,
+       .vdisplay = 1536,
+       .vsync_start = 1536 + 3,
+       .vsync_end = 1536 + 3 + 1,
+       .vtotal = 1536 + 3 + 1 + 9,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc lg_lp097qx1_spa1 = {
+       .modes = &lg_lp097qx1_spa1_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 208,
+               .height = 147,
+       },
+};
+
 static const struct drm_display_mode lg_lp120up1_mode = {
        .clock = 162300,
        .hdisplay = 1920,
@@ -1224,6 +1271,28 @@ static const struct panel_desc qd43003c0_40 = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
+static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
+       .clock = 271560,
+       .hdisplay = 2560,
+       .hsync_start = 2560 + 48,
+       .hsync_end = 2560 + 48 + 32,
+       .htotal = 2560 + 48 + 32 + 80,
+       .vdisplay = 1600,
+       .vsync_start = 1600 + 2,
+       .vsync_end = 1600 + 2 + 5,
+       .vtotal = 1600 + 2 + 5 + 57,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc samsung_lsn122dl01_c01 = {
+       .modes = &samsung_lsn122dl01_c01_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 263,
+               .height = 164,
+       },
+};
+
 static const struct drm_display_mode samsung_ltn101nt05_mode = {
        .clock = 54030,
        .hdisplay = 1024,
@@ -1242,8 +1311,8 @@ static const struct panel_desc samsung_ltn101nt05 = {
        .num_modes = 1,
        .bpc = 6,
        .size = {
-               .width = 1024,
-               .height = 600,
+               .width = 223,
+               .height = 125,
        },
 };
 
@@ -1270,6 +1339,53 @@ static const struct panel_desc samsung_ltn140at29_301 = {
        },
 };
 
+static const struct display_timing sharp_lq101k1ly04_timing = {
+       .pixelclock = { 60000000, 65000000, 80000000 },
+       .hactive = { 1280, 1280, 1280 },
+       .hfront_porch = { 20, 20, 20 },
+       .hback_porch = { 20, 20, 20 },
+       .hsync_len = { 10, 10, 10 },
+       .vactive = { 800, 800, 800 },
+       .vfront_porch = { 4, 4, 4 },
+       .vback_porch = { 4, 4, 4 },
+       .vsync_len = { 4, 4, 4 },
+       .flags = DISPLAY_FLAGS_PIXDATA_POSEDGE,
+};
+
+static const struct panel_desc sharp_lq101k1ly04 = {
+       .timings = &sharp_lq101k1ly04_timing,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 217,
+               .height = 136,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+};
+
+static const struct drm_display_mode sharp_lq123p1jx31_mode = {
+       .clock = 252750,
+       .hdisplay = 2400,
+       .hsync_start = 2400 + 48,
+       .hsync_end = 2400 + 48 + 32,
+       .htotal = 2400 + 48 + 32 + 80,
+       .vdisplay = 1600,
+       .vsync_start = 1600 + 3,
+       .vsync_end = 1600 + 3 + 10,
+       .vtotal = 1600 + 3 + 10 + 33,
+       .vrefresh = 60,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc sharp_lq123p1jx31 = {
+       .modes = &sharp_lq123p1jx31_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 259,
+               .height = 173,
+       },
+};
+
 static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
        .clock = 33300,
        .hdisplay = 800,
@@ -1293,6 +1409,29 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
        .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
+static const struct drm_display_mode starry_kr122ea0sra_mode = {
+       .clock = 147000,
+       .hdisplay = 1920,
+       .hsync_start = 1920 + 16,
+       .hsync_end = 1920 + 16 + 16,
+       .htotal = 1920 + 16 + 16 + 32,
+       .vdisplay = 1200,
+       .vsync_start = 1200 + 15,
+       .vsync_end = 1200 + 15 + 2,
+       .vtotal = 1200 + 15 + 2 + 18,
+       .vrefresh = 60,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc starry_kr122ea0sra = {
+       .modes = &starry_kr122ea0sra_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 263,
+               .height = 164,
+       },
+};
+
 static const struct drm_display_mode tpk_f07a_0102_mode = {
        .clock = 33260,
        .hdisplay = 800,
@@ -1456,6 +1595,12 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "lg,lb070wv8",
                .data = &lg_lb070wv8,
+       }, {
+               .compatible = "lg,lp079qx1-sp0v",
+               .data = &lg_lp079qx1_sp0v,
+       }, {
+               .compatible = "lg,lp097qx1-spa1",
+               .data = &lg_lp097qx1_spa1,
        }, {
                .compatible = "lg,lp120up1",
                .data = &lg_lp120up1,
@@ -1480,15 +1625,27 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "qiaodian,qd43003c0-40",
                .data = &qd43003c0_40,
+       }, {
+               .compatible = "samsung,lsn122dl01-c01",
+               .data = &samsung_lsn122dl01_c01,
        }, {
                .compatible = "samsung,ltn101nt05",
                .data = &samsung_ltn101nt05,
        }, {
                .compatible = "samsung,ltn140at29-301",
                .data = &samsung_ltn140at29_301,
+       }, {
+               .compatible = "sharp,lq101k1ly04",
+               .data = &sharp_lq101k1ly04,
+       }, {
+               .compatible = "sharp,lq123p1jx31",
+               .data = &sharp_lq123p1jx31,
        }, {
                .compatible = "shelly,sca07010-bfn-lnn",
                .data = &shelly_sca07010_bfn_lnn,
+       }, {
+               .compatible = "starry,kr122ea0sra",
+               .data = &starry_kr122ea0sra,
        }, {
                .compatible = "tpk,f07a-0102",
                .data = &tpk_f07a_0102,
@@ -1701,7 +1858,6 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
        .lanes = 4,
 };
 
-
 static const struct of_device_id dsi_of_match[] = {
        {
                .compatible = "auo,b080uan01",
index 0738d74c8d049faf18f0153378de60a1978a49e7..d50c9679e631a8e8fc226c382cdb9f88fee64fc9 100644 (file)
@@ -350,11 +350,19 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
                       struct ttm_mem_reg *new_mem)
 {
        struct ttm_mem_reg *old_mem = &bo->mem;
+       int ret;
+
+       ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+       if (ret)
+               return ret;
+
+
        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
                qxl_move_null(bo, new_mem);
                return 0;
        }
-       return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+       return ttm_bo_move_memcpy(bo, evict, interruptible,
+                                 no_wait_gpu, new_mem);
 }
 
 static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
index e91763d5d800ab57eb7422b247ebc853b972bb4c..a97abc8af657e879dc0f7ee3b29334b1352cffb0 100644 (file)
@@ -589,7 +589,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                /* use frac fb div on RS780/RS880 */
-               if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+               if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+                   && !radeon_crtc->ss_enabled)
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
@@ -626,7 +627,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                        if (radeon_crtc->ss.refdiv) {
                                radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
                                radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
-                               if (ASIC_IS_AVIVO(rdev))
+                               if (rdev->family >= CHIP_RV770)
                                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                        }
                }
index ba192a35c60771cbcd9461b6d127379ca17295b2..0c1b9ff433af706c15347467a756a7f71e36746a 100644 (file)
@@ -53,6 +53,7 @@ MODULE_FIRMWARE("radeon/bonaire_mc.bin");
 MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
 MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
 MODULE_FIRMWARE("radeon/HAWAII_me.bin");
@@ -72,6 +73,7 @@ MODULE_FIRMWARE("radeon/hawaii_mc.bin");
 MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
 MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
 MODULE_FIRMWARE("radeon/KAVERI_me.bin");
@@ -1990,12 +1992,17 @@ static int cik_init_microcode(struct radeon_device *rdev)
        int new_fw = 0;
        int err;
        int num_fw;
+       bool new_smc = false;
 
        DRM_DEBUG("\n");
 
        switch (rdev->family) {
        case CHIP_BONAIRE:
                chip_name = "BONAIRE";
+               if ((rdev->pdev->revision == 0x80) ||
+                   (rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->device == 0x665f))
+                       new_smc = true;
                new_chip_name = "bonaire";
                pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
                me_req_size = CIK_ME_UCODE_SIZE * 4;
@@ -2010,6 +2017,8 @@ static int cik_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_HAWAII:
                chip_name = "HAWAII";
+               if (rdev->pdev->revision == 0x80)
+                       new_smc = true;
                new_chip_name = "hawaii";
                pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
                me_req_size = CIK_ME_UCODE_SIZE * 4;
@@ -2259,7 +2268,10 @@ static int cik_init_microcode(struct radeon_device *rdev)
                        }
                }
 
-               snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
+               if (new_smc)
+                       snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
+               else
+                       snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
                err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
                if (err) {
                        snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
@@ -8354,7 +8366,8 @@ static int cik_startup(struct radeon_device *rdev)
                }
        }
        rdev->rlc.cs_data = ci_cs_data;
-       rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+       rdev->rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
+       rdev->rlc.cp_table_size += 64 * 1024; /* GDS */
        r = sumo_rlc_init(rdev);
        if (r) {
                DRM_ERROR("Failed to init rlc BOs!\n");
index 0d3f744de35aa4c7fb5a8ad3389d896a719b2a60..d960d3915408963569e6acbfe6612b70af631209 100644 (file)
@@ -2209,6 +2209,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                }
                break;
        }
+       case PACKET3_PFP_SYNC_ME:
+               if (pkt->count) {
+                       DRM_ERROR("bad PFP_SYNC_ME\n");
+                       return -EINVAL;
+               }
+               break;
        case PACKET3_SURFACE_SYNC:
                if (pkt->count != 3) {
                        DRM_ERROR("bad SURFACE_SYNC\n");
@@ -3381,6 +3387,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
        case PACKET3_MPEG_INDEX:
        case PACKET3_WAIT_REG_MEM:
        case PACKET3_MEM_WRITE:
+       case PACKET3_PFP_SYNC_ME:
        case PACKET3_SURFACE_SYNC:
        case PACKET3_EVENT_WRITE:
        case PACKET3_EVENT_WRITE_EOP:
index 0b174e14e9a688a84da9fbf575253fb72a868a99..c8e3d394cde70033798b696305da88c38a9c0c9c 100644 (file)
                 */
 #              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
 #              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#define        PACKET3_PFP_SYNC_ME                             0x42
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
index 95f4fea893021cb8f233244404fed6878742dd3b..86dcdf38b732176ec94865d213648953e344cf04 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/slab.h>
 #include <linux/acpi.h>
 #include <linux/pci.h>
+#include <linux/delay.h>
 
 #include "radeon_acpi.h"
 
@@ -27,6 +28,7 @@ struct radeon_atpx_functions {
 struct radeon_atpx {
        acpi_handle handle;
        struct radeon_atpx_functions functions;
+       bool is_hybrid;
 };
 
 static struct radeon_atpx_priv {
@@ -62,6 +64,14 @@ bool radeon_has_atpx(void) {
        return radeon_atpx_priv.atpx_detected;
 }
 
+bool radeon_has_atpx_dgpu_power_cntl(void) {
+       return radeon_atpx_priv.atpx.functions.power_cntl;
+}
+
+bool radeon_is_atpx_hybrid(void) {
+       return radeon_atpx_priv.atpx.is_hybrid;
+}
+
 /**
  * radeon_atpx_call - call an ATPX method
  *
@@ -141,18 +151,12 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
  */
 static int radeon_atpx_validate(struct radeon_atpx *atpx)
 {
-       /* make sure required functions are enabled */
-       /* dGPU power control is required */
-       if (atpx->functions.power_cntl == false) {
-               printk("ATPX dGPU power cntl not present, forcing\n");
-               atpx->functions.power_cntl = true;
-       }
+       u32 valid_bits = 0;
 
        if (atpx->functions.px_params) {
                union acpi_object *info;
                struct atpx_px_params output;
                size_t size;
-               u32 valid_bits;
 
                info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
                if (!info)
@@ -171,19 +175,42 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
                memcpy(&output, info->buffer.pointer, size);
 
                valid_bits = output.flags & output.valid_flags;
-               /* if separate mux flag is set, mux controls are required */
-               if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
-                       atpx->functions.i2c_mux_cntl = true;
-                       atpx->functions.disp_mux_cntl = true;
-               }
-               /* if any outputs are muxed, mux controls are required */
-               if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
-                                 ATPX_TV_SIGNAL_MUXED |
-                                 ATPX_DFP_SIGNAL_MUXED))
-                       atpx->functions.disp_mux_cntl = true;
 
                kfree(info);
        }
+
+       /* if separate mux flag is set, mux controls are required */
+       if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+               atpx->functions.i2c_mux_cntl = true;
+               atpx->functions.disp_mux_cntl = true;
+       }
+       /* if any outputs are muxed, mux controls are required */
+       if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+                         ATPX_TV_SIGNAL_MUXED |
+                         ATPX_DFP_SIGNAL_MUXED))
+               atpx->functions.disp_mux_cntl = true;
+
+       /* some bioses set these bits rather than flagging power_cntl as supported */
+       if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
+                         ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
+               atpx->functions.power_cntl = true;
+
+       atpx->is_hybrid = false;
+       if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+               printk("ATPX Hybrid Graphics\n");
+#if 1
+               /* This is a temporary hack until the D3 cold support
+                * makes it upstream.  The ATPX power_control method seems
+                * to still work on even if the system should be using
+                * the new standardized hybrid D3 cold ACPI interface.
+                */
+               atpx->functions.power_cntl = true;
+#else
+               atpx->functions.power_cntl = false;
+#endif
+               atpx->is_hybrid = true;
+       }
+
        return 0;
 }
 
@@ -258,6 +285,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
                if (!info)
                        return -EIO;
                kfree(info);
+
+               /* 200ms delay is required after off */
+               if (state == 0)
+                       msleep(200);
        }
        return 0;
 }
index 81a63d7f5cd9df6e41ef7748d4635a2cddf5b508..b79f3b002471a5c45b9ecbb109bafb075c973d50 100644 (file)
@@ -2064,7 +2064,6 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                           RADEON_OUTPUT_CSC_BYPASS);
                        /* no HPD on analog connectors */
                        radeon_connector->hpd.hpd = RADEON_HPD_NONE;
-                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
                        connector->interlace_allowed = true;
                        connector->doublescan_allowed = true;
                        break;
@@ -2314,8 +2313,10 @@ radeon_add_atom_connector(struct drm_device *dev,
        }
 
        if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
-               if (i2c_bus->valid)
-                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+               if (i2c_bus->valid) {
+                       connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                                           DRM_CONNECTOR_POLL_DISCONNECT;
+               }
        } else
                connector->polled = DRM_CONNECTOR_POLL_HPD;
 
@@ -2391,7 +2392,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
                                              1);
                /* no HPD on analog connectors */
                radeon_connector->hpd.hpd = RADEON_HPD_NONE;
-               connector->polled = DRM_CONNECTOR_POLL_CONNECT;
                connector->interlace_allowed = true;
                connector->doublescan_allowed = true;
                break;
@@ -2476,10 +2476,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
        }
 
        if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
-               if (i2c_bus->valid)
-                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+               if (i2c_bus->valid) {
+                       connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                                           DRM_CONNECTOR_POLL_DISCONNECT;
+               }
        } else
                connector->polled = DRM_CONNECTOR_POLL_HPD;
+
        connector->display_info.subpixel_order = subpixel_order;
        drm_connector_register(connector);
 }
index e0bf77843d790f0450dac098128fdc431d7c0003..a00dd2f745271ec760bff14d5455571388313ebd 100644 (file)
@@ -631,6 +631,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 /*
  * GPU helpers function.
  */
+
+/**
+ * radeon_device_is_virtual - check if we are running is a virtual environment
+ *
+ * Check if the asic has been passed through to a VM (all asics).
+ * Used at driver startup.
+ * Returns true if virtual or false if not.
+ */
+static bool radeon_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+       return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+       return false;
+#endif
+}
+
 /**
  * radeon_card_posted - check if the hw has already been initialized
  *
@@ -644,6 +661,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
 {
        uint32_t reg;
 
+       /* for pass through, always force asic_init */
+       if (radeon_device_is_virtual())
+               return false;
+
        /* required for EFI mode on macbook2,1 which uses an r5xx asic */
        if (efi_enabled(EFI_BOOT) &&
            (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
@@ -1635,7 +1656,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
        radeon_agp_suspend(rdev);
 
        pci_save_state(dev->pdev);
-       if (freeze && rdev->family >= CHIP_R600) {
+       if (freeze && rdev->family >= CHIP_CEDAR) {
                rdev->asic->asic_reset(rdev, true);
                pci_restore_state(dev->pdev);
        } else if (suspend) {
index a455dc7d4aa1a146ab3a59e4bbd6f0b9e2244159..c01a7c6abb491c4227ad02fd6525813a7a9d3d55 100644 (file)
  *   2.43.0 - RADEON_INFO_GPU_RESET_COUNTER
  *   2.44.0 - SET_APPEND_CNT packet3 support
  *   2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
+ *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       45
+#define KMS_DRIVER_MINOR       46
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -162,9 +163,13 @@ void radeon_debugfs_cleanup(struct drm_minor *minor);
 #if defined(CONFIG_VGA_SWITCHEROO)
 void radeon_register_atpx_handler(void);
 void radeon_unregister_atpx_handler(void);
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
 #else
 static inline void radeon_register_atpx_handler(void) {}
 static inline void radeon_unregister_atpx_handler(void) {}
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
 #endif
 
 int radeon_no_wb;
@@ -404,7 +409,10 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
        pci_save_state(pdev);
        pci_disable_device(pdev);
        pci_ignore_hotplug(pdev);
-       pci_set_power_state(pdev, PCI_D3cold);
+       if (radeon_is_atpx_hybrid())
+               pci_set_power_state(pdev, PCI_D3cold);
+       else if (!radeon_has_atpx_dgpu_power_cntl())
+               pci_set_power_state(pdev, PCI_D3hot);
        drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
 
        return 0;
@@ -421,7 +429,9 @@ static int radeon_pmops_runtime_resume(struct device *dev)
 
        drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
-       pci_set_power_state(pdev, PCI_D0);
+       if (radeon_is_atpx_hybrid() ||
+           !radeon_has_atpx_dgpu_power_cntl())
+               pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        ret = pci_enable_device(pdev);
        if (ret)
index 590b0377fbe267f859b7cc7649b309b9c34d4436..ffdad81ef9647cbc96612e3d0dc4d41b85009dff 100644 (file)
@@ -300,8 +300,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
        if (IS_ERR(fence))
                return PTR_ERR(fence);
 
-       r = ttm_bo_move_accel_cleanup(bo, &fence->base,
-                                     evict, no_wait_gpu, new_mem);
+       r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
        radeon_fence_unref(&fence);
        return r;
 }
@@ -403,6 +402,10 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
        struct ttm_mem_reg *old_mem = &bo->mem;
        int r;
 
+       r = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+       if (r)
+               return r;
+
        /* Can't move a pinned BO */
        rbo = container_of(bo, struct radeon_bo, tbo);
        if (WARN_ON_ONCE(rbo->pin_count > 0))
@@ -441,7 +444,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
 
        if (r) {
 memcpy:
-               r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+               r = ttm_bo_move_memcpy(bo, evict, interruptible,
+                                      no_wait_gpu, new_mem);
                if (r) {
                        return r;
                }
index b30e719dd56d87f1a195c443de85b8fc48a462b7..2523ca96c6c7d5e36fd9633cab723f2656d241fb 100644 (file)
@@ -50,6 +50,7 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin");
 MODULE_FIRMWARE("radeon/tahiti_mc.bin");
 MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
 MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
 MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
@@ -65,6 +66,7 @@ MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
 MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
 MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
 MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
 MODULE_FIRMWARE("radeon/VERDE_me.bin");
@@ -80,6 +82,7 @@ MODULE_FIRMWARE("radeon/verde_ce.bin");
 MODULE_FIRMWARE("radeon/verde_mc.bin");
 MODULE_FIRMWARE("radeon/verde_rlc.bin");
 MODULE_FIRMWARE("radeon/verde_smc.bin");
+MODULE_FIRMWARE("radeon/verde_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
 MODULE_FIRMWARE("radeon/OLAND_me.bin");
@@ -95,6 +98,7 @@ MODULE_FIRMWARE("radeon/oland_ce.bin");
 MODULE_FIRMWARE("radeon/oland_mc.bin");
 MODULE_FIRMWARE("radeon/oland_rlc.bin");
 MODULE_FIRMWARE("radeon/oland_smc.bin");
+MODULE_FIRMWARE("radeon/oland_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
 MODULE_FIRMWARE("radeon/HAINAN_me.bin");
@@ -110,6 +114,7 @@ MODULE_FIRMWARE("radeon/hainan_ce.bin");
 MODULE_FIRMWARE("radeon/hainan_mc.bin");
 MODULE_FIRMWARE("radeon/hainan_rlc.bin");
 MODULE_FIRMWARE("radeon/hainan_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
 
 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
 static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1653,12 +1658,16 @@ static int si_init_microcode(struct radeon_device *rdev)
        char fw_name[30];
        int err;
        int new_fw = 0;
+       bool new_smc = false;
 
        DRM_DEBUG("\n");
 
        switch (rdev->family) {
        case CHIP_TAHITI:
                chip_name = "TAHITI";
+               /* XXX: figure out which Tahitis need the new ucode */
+               if (0)
+                       new_smc = true;
                new_chip_name = "tahiti";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1670,6 +1679,13 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_PITCAIRN:
                chip_name = "PITCAIRN";
+               if ((rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->device == 0x6810) ||
+                   (rdev->pdev->device == 0x6811) ||
+                   (rdev->pdev->device == 0x6816) ||
+                   (rdev->pdev->device == 0x6817) ||
+                   (rdev->pdev->device == 0x6806))
+                       new_smc = true;
                new_chip_name = "pitcairn";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1681,6 +1697,16 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_VERDE:
                chip_name = "VERDE";
+               if ((rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->revision == 0x83) ||
+                   (rdev->pdev->revision == 0x87) ||
+                   (rdev->pdev->device == 0x6820) ||
+                   (rdev->pdev->device == 0x6821) ||
+                   (rdev->pdev->device == 0x6822) ||
+                   (rdev->pdev->device == 0x6823) ||
+                   (rdev->pdev->device == 0x682A) ||
+                   (rdev->pdev->device == 0x682B))
+                       new_smc = true;
                new_chip_name = "verde";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1692,6 +1718,13 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_OLAND:
                chip_name = "OLAND";
+               if ((rdev->pdev->revision == 0xC7) ||
+                   (rdev->pdev->revision == 0x80) ||
+                   (rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->revision == 0x83) ||
+                   (rdev->pdev->device == 0x6604) ||
+                   (rdev->pdev->device == 0x6605))
+                       new_smc = true;
                new_chip_name = "oland";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1702,6 +1735,13 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_HAINAN:
                chip_name = "HAINAN";
+               if ((rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->revision == 0x83) ||
+                   (rdev->pdev->revision == 0xC3) ||
+                   (rdev->pdev->device == 0x6664) ||
+                   (rdev->pdev->device == 0x6665) ||
+                   (rdev->pdev->device == 0x6667))
+                       new_smc = true;
                new_chip_name = "hainan";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1847,7 +1887,10 @@ static int si_init_microcode(struct radeon_device *rdev)
                }
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
+       if (new_smc)
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
        err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
        if (err) {
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
index c120172add5c04423038adf11bccb4027c16fe3d..e81e19a660adcbd10736a01ab89934c4b06e0bae 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/component.h>
 #include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
 
+#define RK3288_GRF_SOC_CON6            0x25c
+#define RK3288_EDP_LCDC_SEL            BIT(5)
+#define RK3399_GRF_SOC_CON20           0x6250
+#define RK3399_EDP_LCDC_SEL            BIT(5)
+
+#define HIWORD_UPDATE(val, mask)       (val | (mask) << 16)
+
 #define to_dp(nm)      container_of(nm, struct rockchip_dp_device, nm)
 
-/* dp grf register offset */
-#define GRF_SOC_CON6                            0x025c
-#define GRF_EDP_LCD_SEL_MASK                    BIT(5)
-#define GRF_EDP_SEL_VOP_LIT                     BIT(5)
-#define GRF_EDP_SEL_VOP_BIG                     0
+/**
+ * struct rockchip_dp_chip_data - splite the grf setting of kind of chips
+ * @lcdsel_grf_reg: grf register offset of lcdc select
+ * @lcdsel_big: reg value of selecting vop big for eDP
+ * @lcdsel_lit: reg value of selecting vop little for eDP
+ * @chip_type: specific chip type
+ */
+struct rockchip_dp_chip_data {
+       u32     lcdsel_grf_reg;
+       u32     lcdsel_big;
+       u32     lcdsel_lit;
+       u32     chip_type;
+};
 
 struct rockchip_dp_device {
        struct drm_device        *drm_dev;
@@ -48,9 +64,12 @@ struct rockchip_dp_device {
        struct drm_display_mode  mode;
 
        struct clk               *pclk;
+       struct clk               *grfclk;
        struct regmap            *grf;
        struct reset_control     *rst;
 
+       const struct rockchip_dp_chip_data *data;
+
        struct analogix_dp_plat_data plat_data;
 };
 
@@ -92,6 +111,23 @@ static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
        return 0;
 }
 
+static int rockchip_dp_get_modes(struct analogix_dp_plat_data *plat_data,
+                                struct drm_connector *connector)
+{
+       struct drm_display_info *di = &connector->display_info;
+       /* VOP couldn't output YUV video format for eDP rightly */
+       u32 mask = DRM_COLOR_FORMAT_YCRCB444 | DRM_COLOR_FORMAT_YCRCB422;
+
+       if ((di->color_formats & mask)) {
+               DRM_DEBUG_KMS("Swapping display color format from YUV to RGB\n");
+               di->color_formats &= ~mask;
+               di->color_formats |= DRM_COLOR_FORMAT_RGB444;
+               di->bpc = 8;
+       }
+
+       return 0;
+}
+
 static bool
 rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder,
                                   const struct drm_display_mode *mode,
@@ -119,17 +155,23 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
                return;
 
        if (ret)
-               val = GRF_EDP_SEL_VOP_LIT | (GRF_EDP_LCD_SEL_MASK << 16);
+               val = dp->data->lcdsel_lit;
        else
-               val = GRF_EDP_SEL_VOP_BIG | (GRF_EDP_LCD_SEL_MASK << 16);
+               val = dp->data->lcdsel_big;
 
        dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
 
-       ret = regmap_write(dp->grf, GRF_SOC_CON6, val);
-       if (ret != 0) {
-               dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+       ret = clk_prepare_enable(dp->grfclk);
+       if (ret < 0) {
+               dev_err(dp->dev, "failed to enable grfclk %d\n", ret);
                return;
        }
+
+       ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val);
+       if (ret != 0)
+               dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+
+       clk_disable_unprepare(dp->grfclk);
 }
 
 static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder)
@@ -143,22 +185,29 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
                                      struct drm_connector_state *conn_state)
 {
        struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+       struct rockchip_dp_device *dp = to_dp(encoder);
+       int ret;
 
        /*
-        * FIXME(Yakir): driver should configure the CRTC output video
-        * mode with the display information which indicated the monitor
-        * support colorimetry.
-        *
-        * But don't know why the CRTC driver seems could only output the
-        * RGBaaa rightly. For example, if connect the "innolux,n116bge"
-        * eDP screen, EDID would indicated that screen only accepted the
-        * 6bpc mode. But if I configure CRTC to RGB666 output, then eDP
-        * screen would show a blue picture (RGB888 show a green picture).
-        * But if I configure CTRC to RGBaaa, and eDP driver still keep
-        * RGB666 input video mode, then screen would works prefect.
+        * The hardware IC designed that VOP must output the RGB10 video
+        * format to eDP controller, and if eDP panel only support RGB8,
+        * then eDP controller should cut down the video data, not via VOP
+        * controller, that's why we need to hardcode the VOP output mode
+        * to RGA10 here.
         */
+
        s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
        s->output_type = DRM_MODE_CONNECTOR_eDP;
+       if (dp->data->chip_type == RK3399_EDP) {
+               /*
+                * For RK3399, VOP Lit must code the out mode to RGB888,
+                * VOP Big must code the out mode to RGB10.
+                */
+               ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
+                                                       encoder);
+               if (ret > 0)
+                       s->output_mode = ROCKCHIP_OUT_MODE_P888;
+       }
 
        return 0;
 }
@@ -192,6 +241,16 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
                return PTR_ERR(dp->grf);
        }
 
+       dp->grfclk = devm_clk_get(dev, "grf");
+       if (PTR_ERR(dp->grfclk) == -ENOENT) {
+               dp->grfclk = NULL;
+       } else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) {
+               return -EPROBE_DEFER;
+       } else if (IS_ERR(dp->grfclk)) {
+               dev_err(dev, "failed to get grf clock\n");
+               return PTR_ERR(dp->grfclk);
+       }
+
        dp->pclk = devm_clk_get(dev, "pclk");
        if (IS_ERR(dp->pclk)) {
                dev_err(dev, "failed to get pclk property\n");
@@ -246,6 +305,7 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
                            void *data)
 {
        struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+       const struct rockchip_dp_chip_data *dp_data;
        struct drm_device *drm_dev = data;
        int ret;
 
@@ -256,10 +316,15 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
         */
        dev_set_drvdata(dev, NULL);
 
+       dp_data = of_device_get_match_data(dev);
+       if (!dp_data)
+               return -ENODEV;
+
        ret = rockchip_dp_init(dp);
        if (ret < 0)
                return ret;
 
+       dp->data = dp_data;
        dp->drm_dev = drm_dev;
 
        ret = rockchip_dp_drm_create_encoder(dp);
@@ -270,9 +335,10 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
 
        dp->plat_data.encoder = &dp->encoder;
 
-       dp->plat_data.dev_type = RK3288_DP;
+       dp->plat_data.dev_type = dp->data->chip_type;
        dp->plat_data.power_on = rockchip_dp_poweron;
        dp->plat_data.power_off = rockchip_dp_powerdown;
+       dp->plat_data.get_modes = rockchip_dp_get_modes;
 
        return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
 }
@@ -292,38 +358,33 @@ static int rockchip_dp_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct device_node *panel_node, *port, *endpoint;
+       struct drm_panel *panel = NULL;
        struct rockchip_dp_device *dp;
-       struct drm_panel *panel;
 
        port = of_graph_get_port_by_id(dev->of_node, 1);
-       if (!port) {
-               dev_err(dev, "can't find output port\n");
-               return -EINVAL;
-       }
-
-       endpoint = of_get_child_by_name(port, "endpoint");
-       of_node_put(port);
-       if (!endpoint) {
-               dev_err(dev, "no output endpoint found\n");
-               return -EINVAL;
-       }
-
-       panel_node = of_graph_get_remote_port_parent(endpoint);
-       of_node_put(endpoint);
-       if (!panel_node) {
-               dev_err(dev, "no output node found\n");
-               return -EINVAL;
-       }
-
-       panel = of_drm_find_panel(panel_node);
-       if (!panel) {
-               DRM_ERROR("failed to find panel\n");
+       if (port) {
+               endpoint = of_get_child_by_name(port, "endpoint");
+               of_node_put(port);
+               if (!endpoint) {
+                       dev_err(dev, "no output endpoint found\n");
+                       return -EINVAL;
+               }
+
+               panel_node = of_graph_get_remote_port_parent(endpoint);
+               of_node_put(endpoint);
+               if (!panel_node) {
+                       dev_err(dev, "no output node found\n");
+                       return -EINVAL;
+               }
+
+               panel = of_drm_find_panel(panel_node);
                of_node_put(panel_node);
-               return -EPROBE_DEFER;
+               if (!panel) {
+                       DRM_ERROR("failed to find panel\n");
+                       return -EPROBE_DEFER;
+               }
        }
 
-       of_node_put(panel_node);
-
        dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
        if (!dp)
                return -ENOMEM;
@@ -356,8 +417,23 @@ static const struct dev_pm_ops rockchip_dp_pm_ops = {
 #endif
 };
 
+static const struct rockchip_dp_chip_data rk3399_edp = {
+       .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
+       .lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL),
+       .lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL),
+       .chip_type = RK3399_EDP,
+};
+
+static const struct rockchip_dp_chip_data rk3288_dp = {
+       .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
+       .lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL),
+       .lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL),
+       .chip_type = RK3288_DP,
+};
+
 static const struct of_device_id rockchip_dp_dt_ids[] = {
-       {.compatible = "rockchip,rk3288-dp",},
+       {.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp },
+       {.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp },
        {}
 };
 MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
index 5ad43a1bb260a828649dacb3f095c3619b2c9e0d..494ab257f77c3917ebdeb8cd9aeaa940aac4876b 100644 (file)
@@ -7,5 +7,6 @@ config DRM_STI
        select DRM_KMS_CMA_HELPER
        select DRM_PANEL
        select FW_LOADER
+       select SND_SOC_HDMI_CODEC if SND_SOC
        help
          Choose this option to enable DRM on STM stiH41x chipset
index a516eb869f6fe81c71b8b77106a9b6a13777105b..2da7d6866d5de3d566bf8b40211730d3f3fc8f19 100644 (file)
@@ -6,6 +6,8 @@
 
 #include "sti_awg_utils.h"
 
+#define AWG_DELAY (-5)
+
 #define AWG_OPCODE_OFFSET 10
 #define AWG_MAX_ARG       0x3ff
 
@@ -125,7 +127,7 @@ static int awg_generate_line_signal(
                val = timing->blanking_level;
                ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
 
-               val = timing->trailing_pixels - 1;
+               val = timing->trailing_pixels - 1 + AWG_DELAY;
                ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams);
        }
 
index 7fab3af7473b26883db2ce980efadf0271c4735a..c7d734dc3cf405a1799accc0216efd54e1dac3d9 100644 (file)
 static void sti_crtc_enable(struct drm_crtc *crtc)
 {
        struct sti_mixer *mixer = to_sti_mixer(crtc);
-       struct device *dev = mixer->dev;
-       struct sti_compositor *compo = dev_get_drvdata(dev);
 
        DRM_DEBUG_DRIVER("\n");
 
        mixer->status = STI_MIXER_READY;
 
-       /* Prepare and enable the compo IP clock */
-       if (mixer->id == STI_MIXER_MAIN) {
-               if (clk_prepare_enable(compo->clk_compo_main))
-                       DRM_INFO("Failed to prepare/enable compo_main clk\n");
-       } else {
-               if (clk_prepare_enable(compo->clk_compo_aux))
-                       DRM_INFO("Failed to prepare/enable compo_aux clk\n");
-       }
-
        drm_crtc_vblank_on(crtc);
 }
 
@@ -57,9 +46,8 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
        struct sti_mixer *mixer = to_sti_mixer(crtc);
        struct device *dev = mixer->dev;
        struct sti_compositor *compo = dev_get_drvdata(dev);
-       struct clk *clk;
+       struct clk *compo_clk, *pix_clk;
        int rate = mode->clock * 1000;
-       int res;
 
        DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
                      crtc->base.id, sti_mixer_to_str(mixer),
@@ -74,32 +62,46 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
                      mode->vsync_start, mode->vsync_end,
                      mode->vtotal, mode->type, mode->flags);
 
-       /* Set rate and prepare/enable pixel clock */
-       if (mixer->id == STI_MIXER_MAIN)
-               clk = compo->clk_pix_main;
-       else
-               clk = compo->clk_pix_aux;
+       if (mixer->id == STI_MIXER_MAIN) {
+               compo_clk = compo->clk_compo_main;
+               pix_clk = compo->clk_pix_main;
+       } else {
+               compo_clk = compo->clk_compo_aux;
+               pix_clk = compo->clk_pix_aux;
+       }
+
+       /* Prepare and enable the compo IP clock */
+       if (clk_prepare_enable(compo_clk)) {
+               DRM_INFO("Failed to prepare/enable compositor clk\n");
+               goto compo_error;
+       }
 
-       res = clk_set_rate(clk, rate);
-       if (res < 0) {
+       /* Set rate and prepare/enable pixel clock */
+       if (clk_set_rate(pix_clk, rate) < 0) {
                DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
-               return -EINVAL;
+               goto pix_error;
        }
-       if (clk_prepare_enable(clk)) {
+       if (clk_prepare_enable(pix_clk)) {
                DRM_ERROR("Failed to prepare/enable pix clk\n");
-               return -EINVAL;
+               goto pix_error;
        }
 
        sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
                        compo->vtg_main : compo->vtg_aux, &crtc->mode);
 
-       res = sti_mixer_active_video_area(mixer, &crtc->mode);
-       if (res) {
+       if (sti_mixer_active_video_area(mixer, &crtc->mode)) {
                DRM_ERROR("Can't set active video area\n");
-               return -EINVAL;
+               goto mixer_error;
        }
 
-       return res;
+       return 0;
+
+mixer_error:
+       clk_disable_unprepare(pix_clk);
+pix_error:
+       clk_disable_unprepare(compo_clk);
+compo_error:
+       return -EINVAL;
 }
 
 static void sti_crtc_disable(struct drm_crtc *crtc)
@@ -130,7 +132,6 @@ static void sti_crtc_disable(struct drm_crtc *crtc)
 static void
 sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
-       sti_crtc_enable(crtc);
        sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
 }
 
@@ -221,9 +222,7 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
 static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
        .enable = sti_crtc_enable,
        .disable = sti_crtc_disabling,
-       .mode_set = drm_helper_crtc_mode_set,
        .mode_set_nofb = sti_crtc_mode_set_nofb,
-       .mode_set_base = drm_helper_crtc_mode_set_base,
        .atomic_begin = sti_crtc_atomic_begin,
        .atomic_flush = sti_crtc_atomic_flush,
 };
index 8d1402b245bf32efd66726408136d51b6c73e443..fedc17f98d9b029c835a7791e023e127c30e72a7 100644 (file)
@@ -18,6 +18,8 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 
+#include <sound/hdmi-codec.h>
+
 #include "sti_hdmi.h"
 #include "sti_hdmi_tx3g4c28phy.h"
 #include "sti_hdmi_tx3g0c55phy.h"
@@ -35,6 +37,8 @@
 #define HDMI_DFLT_CHL0_DAT              0x0110
 #define HDMI_DFLT_CHL1_DAT              0x0114
 #define HDMI_DFLT_CHL2_DAT              0x0118
+#define HDMI_AUDIO_CFG                  0x0200
+#define HDMI_SPDIF_FIFO_STATUS          0x0204
 #define HDMI_SW_DI_1_HEAD_WORD          0x0210
 #define HDMI_SW_DI_1_PKT_WORD0          0x0214
 #define HDMI_SW_DI_1_PKT_WORD1          0x0218
@@ -44,6 +48,9 @@
 #define HDMI_SW_DI_1_PKT_WORD5          0x0228
 #define HDMI_SW_DI_1_PKT_WORD6          0x022C
 #define HDMI_SW_DI_CFG                  0x0230
+#define HDMI_SAMPLE_FLAT_MASK           0x0244
+#define HDMI_AUDN                       0x0400
+#define HDMI_AUD_CTS                    0x0404
 #define HDMI_SW_DI_2_HEAD_WORD          0x0600
 #define HDMI_SW_DI_2_PKT_WORD0          0x0604
 #define HDMI_SW_DI_2_PKT_WORD1          0x0608
 #define HDMI_INT_DLL_LCK                BIT(5)
 #define HDMI_INT_NEW_FRAME              BIT(6)
 #define HDMI_INT_GENCTRL_PKT            BIT(7)
+#define HDMI_INT_AUDIO_FIFO_XRUN        BIT(8)
 #define HDMI_INT_SINK_TERM_PRESENT      BIT(11)
 
 #define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \
                        | HDMI_INT_GLOBAL)
 
 #define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \
+                       | HDMI_INT_AUDIO_FIFO_XRUN \
                        | HDMI_INT_GENCTRL_PKT \
                        | HDMI_INT_NEW_FRAME \
                        | HDMI_INT_DLL_LCK \
 
 #define HDMI_STA_SW_RST                 BIT(1)
 
+#define HDMI_AUD_CFG_8CH               BIT(0)
+#define HDMI_AUD_CFG_SPDIF_DIV_2       BIT(1)
+#define HDMI_AUD_CFG_SPDIF_DIV_3       BIT(2)
+#define HDMI_AUD_CFG_SPDIF_CLK_DIV_4   (BIT(1) | BIT(2))
+#define HDMI_AUD_CFG_CTS_CLK_256FS     BIT(12)
+#define HDMI_AUD_CFG_DTS_INVALID       BIT(16)
+#define HDMI_AUD_CFG_ONE_BIT_INVALID   (BIT(18) | BIT(19) | BIT(20) |  BIT(21))
+#define HDMI_AUD_CFG_CH12_VALID        BIT(28)
+#define HDMI_AUD_CFG_CH34_VALID        BIT(29)
+#define HDMI_AUD_CFG_CH56_VALID        BIT(30)
+#define HDMI_AUD_CFG_CH78_VALID        BIT(31)
+
+/* sample flat mask */
+#define HDMI_SAMPLE_FLAT_NO     0
+#define HDMI_SAMPLE_FLAT_SP0 BIT(0)
+#define HDMI_SAMPLE_FLAT_SP1 BIT(1)
+#define HDMI_SAMPLE_FLAT_SP2 BIT(2)
+#define HDMI_SAMPLE_FLAT_SP3 BIT(3)
+#define HDMI_SAMPLE_FLAT_ALL (HDMI_SAMPLE_FLAT_SP0 | HDMI_SAMPLE_FLAT_SP1 |\
+                             HDMI_SAMPLE_FLAT_SP2 | HDMI_SAMPLE_FLAT_SP3)
+
 #define HDMI_INFOFRAME_HEADER_TYPE(x)    (((x) & 0xff) <<  0)
 #define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) <<  8)
 #define HDMI_INFOFRAME_HEADER_LEN(x)     (((x) & 0x0f) << 16)
@@ -171,6 +201,10 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
                wake_up_interruptible(&hdmi->wait_event);
        }
 
+       /* Audio FIFO underrun IRQ */
+       if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN)
+               DRM_INFO("Warning: audio FIFO underrun occurs!");
+
        return IRQ_HANDLED;
 }
 
@@ -441,26 +475,29 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
  */
 static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi)
 {
-       struct hdmi_audio_infoframe infofame;
+       struct hdmi_audio_params *audio = &hdmi->audio;
        u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
-       int ret;
-
-       ret = hdmi_audio_infoframe_init(&infofame);
-       if (ret < 0) {
-               DRM_ERROR("failed to setup audio infoframe: %d\n", ret);
-               return ret;
-       }
-
-       infofame.channels = 2;
-
-       ret = hdmi_audio_infoframe_pack(&infofame, buffer, sizeof(buffer));
-       if (ret < 0) {
-               DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
-               return ret;
+       int ret, val;
+
+       DRM_DEBUG_DRIVER("enter %s, AIF %s\n", __func__,
+                        audio->enabled ? "enable" : "disable");
+       if (audio->enabled) {
+               /* set audio parameters stored*/
+               ret = hdmi_audio_infoframe_pack(&audio->cea, buffer,
+                                               sizeof(buffer));
+               if (ret < 0) {
+                       DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
+                       return ret;
+               }
+               hdmi_infoframe_write_infopack(hdmi, buffer, ret);
+       } else {
+               /*disable audio info frame transmission */
+               val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+               val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK,
+                                            HDMI_IFRAME_SLOT_AUDIO);
+               hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
        }
 
-       hdmi_infoframe_write_infopack(hdmi, buffer, ret);
-
        return 0;
 }
 
@@ -650,6 +687,10 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
        DBGFS_DUMP("", HDMI_SW_DI_CFG);
        hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG));
 
+       DBGFS_DUMP("\n", HDMI_AUDIO_CFG);
+       DBGFS_DUMP("\n", HDMI_SPDIF_FIFO_STATUS);
+       DBGFS_DUMP("\n", HDMI_AUDN);
+
        seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):",
                   HDMI_IFRAME_SLOT_AVI);
        DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI);
@@ -854,6 +895,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
 
        count = drm_add_edid_modes(connector, edid);
        drm_mode_connector_update_edid_property(connector, edid);
+       drm_edid_to_eld(connector, edid);
 
        kfree(edid);
        return count;
@@ -1036,6 +1078,207 @@ static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
        return NULL;
 }
 
+/**
+ * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
+ * clocks. None-coherent clocks means that audio and TMDS clocks have not the
+ * same source (drifts between clocks). In this case assumption is that CTS is
+ * automatically calculated by hardware.
+ *
+ * @audio_fs: audio frame clock frequency in Hz
+ *
+ * Values computed are based on table described in HDMI specification 1.4b
+ *
+ * Returns n value.
+ */
+static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs)
+{
+       unsigned int n;
+
+       switch (audio_fs) {
+       case 32000:
+               n = 4096;
+               break;
+       case 44100:
+               n = 6272;
+               break;
+       case 48000:
+               n = 6144;
+               break;
+       case 88200:
+               n = 6272 * 2;
+               break;
+       case 96000:
+               n = 6144 * 2;
+               break;
+       case 176400:
+               n = 6272 * 4;
+               break;
+       case 192000:
+               n = 6144 * 4;
+               break;
+       default:
+               /* Not pre-defined, recommended value: 128 * fs / 1000 */
+               n = (audio_fs * 128) / 1000;
+       }
+
+       return n;
+}
+
+static int hdmi_audio_configure(struct sti_hdmi *hdmi,
+                               struct hdmi_audio_params *params)
+{
+       int audio_cfg, n;
+       struct hdmi_audio_infoframe *info = &params->cea;
+
+       DRM_DEBUG_DRIVER("\n");
+
+       if (!hdmi->enabled)
+               return 0;
+
+       /* update N parameter */
+       n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate);
+
+       DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n",
+                        params->sample_rate, hdmi->mode.clock * 1000, n);
+       hdmi_write(hdmi, n, HDMI_AUDN);
+
+       /* update HDMI registers according to configuration */
+       audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
+                   HDMI_AUD_CFG_ONE_BIT_INVALID;
+
+       switch (info->channels) {
+       case 8:
+               audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
+       case 6:
+               audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
+       case 4:
+               audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
+       case 2:
+               audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
+               break;
+       default:
+               DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n",
+                         info->channels);
+               return -EINVAL;
+       }
+
+       hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
+
+       hdmi->audio = *params;
+
+       return hdmi_audio_infoframe_config(hdmi);
+}
+
+static void hdmi_audio_shutdown(struct device *dev, void *data)
+{
+       struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+       int audio_cfg;
+
+       DRM_DEBUG_DRIVER("\n");
+
+       /* disable audio */
+       audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
+                   HDMI_AUD_CFG_ONE_BIT_INVALID;
+       hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
+
+       hdmi->audio.enabled = 0;
+       hdmi_audio_infoframe_config(hdmi);
+}
+
+static int hdmi_audio_hw_params(struct device *dev,
+                               void *data,
+                               struct hdmi_codec_daifmt *daifmt,
+                               struct hdmi_codec_params *params)
+{
+       struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+       int ret;
+       struct hdmi_audio_params audio = {
+               .sample_width = params->sample_width,
+               .sample_rate = params->sample_rate,
+               .cea = params->cea,
+       };
+
+       DRM_DEBUG_DRIVER("\n");
+
+       if (!hdmi->enabled)
+               return 0;
+
+       if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
+           daifmt->frame_clk_inv || daifmt->bit_clk_master ||
+           daifmt->frame_clk_master) {
+               dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+                       daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+                       daifmt->bit_clk_master,
+                       daifmt->frame_clk_master);
+               return -EINVAL;
+       }
+
+       audio.enabled = 1;
+
+       ret = hdmi_audio_configure(hdmi, &audio);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+       struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+
+       DRM_DEBUG_DRIVER("%s\n", enable ? "enable" : "disable");
+
+       if (enable)
+               hdmi_write(hdmi, HDMI_SAMPLE_FLAT_ALL, HDMI_SAMPLE_FLAT_MASK);
+       else
+               hdmi_write(hdmi, HDMI_SAMPLE_FLAT_NO, HDMI_SAMPLE_FLAT_MASK);
+
+       return 0;
+}
+
+static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
+{
+       struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+       struct drm_connector *connector = hdmi->drm_connector;
+
+       DRM_DEBUG_DRIVER("\n");
+       memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+
+       return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+       .hw_params = hdmi_audio_hw_params,
+       .audio_shutdown = hdmi_audio_shutdown,
+       .digital_mute = hdmi_audio_digital_mute,
+       .get_eld = hdmi_audio_get_eld,
+};
+
+static int sti_hdmi_register_audio_driver(struct device *dev,
+                                         struct sti_hdmi *hdmi)
+{
+       struct hdmi_codec_pdata codec_data = {
+               .ops = &audio_codec_ops,
+               .max_i2s_channels = 8,
+               .i2s = 1,
+       };
+
+       DRM_DEBUG_DRIVER("\n");
+
+       hdmi->audio.enabled = 0;
+
+       hdmi->audio_pdev = platform_device_register_data(
+               dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+               &codec_data, sizeof(codec_data));
+
+       if (IS_ERR(hdmi->audio_pdev))
+               return PTR_ERR(hdmi->audio_pdev);
+
+       DRM_INFO("%s Driver bound %s\n", HDMI_CODEC_DRV_NAME, dev_name(dev));
+
+       return 0;
+}
+
 static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        struct sti_hdmi *hdmi = dev_get_drvdata(dev);
@@ -1082,12 +1325,27 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
        /* initialise property */
        sti_hdmi_connector_init_property(drm_dev, drm_connector);
 
+       hdmi->drm_connector = drm_connector;
+
        err = drm_mode_connector_attach_encoder(drm_connector, encoder);
        if (err) {
                DRM_ERROR("Failed to attach a connector to a encoder\n");
                goto err_sysfs;
        }
 
+       err = sti_hdmi_register_audio_driver(dev, hdmi);
+       if (err) {
+               DRM_ERROR("Failed to attach an audio codec\n");
+               goto err_sysfs;
+       }
+
+       /* Initialize audio infoframe */
+       err = hdmi_audio_infoframe_init(&hdmi->audio.cea);
+       if (err) {
+               DRM_ERROR("Failed to init audio infoframe\n");
+               goto err_sysfs;
+       }
+
        /* Enable default interrupts */
        hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
 
@@ -1095,6 +1353,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
 
 err_sysfs:
        drm_bridge_remove(bridge);
+       hdmi->drm_connector = NULL;
        return -EINVAL;
 }
 
@@ -1244,6 +1503,8 @@ static int sti_hdmi_remove(struct platform_device *pdev)
        struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
 
        i2c_put_adapter(hdmi->ddc_adapt);
+       if (hdmi->audio_pdev)
+               platform_device_unregister(hdmi->audio_pdev);
        component_del(&pdev->dev, &sti_hdmi_ops);
 
        return 0;
index ef3a94583bbdc3857eb48b413fa961675c5cba33..119bc3582ac78fda5c9547e1855c65502557ffd5 100644 (file)
@@ -23,6 +23,13 @@ struct hdmi_phy_ops {
        void (*stop)(struct sti_hdmi *hdmi);
 };
 
+struct hdmi_audio_params {
+       bool enabled;
+       unsigned int sample_width;
+       unsigned int sample_rate;
+       struct hdmi_audio_infoframe cea;
+};
+
 /* values for the framing mode property */
 enum sti_hdmi_modes {
        HDMI_MODE_HDMI,
@@ -67,6 +74,9 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = {
  * @ddc_adapt: i2c ddc adapter
  * @colorspace: current colorspace selected
  * @hdmi_mode: select framing for HDMI or DVI
+ * @audio_pdev: ASoC hdmi-codec platform device
+ * @audio: hdmi audio parameters.
+ * @drm_connector: hdmi connector
  */
 struct sti_hdmi {
        struct device dev;
@@ -89,6 +99,9 @@ struct sti_hdmi {
        struct i2c_adapter *ddc_adapt;
        enum hdmi_colorspace colorspace;
        enum sti_hdmi_modes hdmi_mode;
+       struct platform_device *audio_pdev;
+       struct hdmi_audio_params audio;
+       struct drm_connector *drm_connector;
 };
 
 u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
index 85cee9098439dc62cf80ed4a993cefde20498981..0cf3335ef37c98bae8dac14be1156e284bf10f32 100644 (file)
@@ -45,25 +45,15 @@ const char *sti_plane_to_str(struct sti_plane *plane)
 
 #define STI_FPS_INTERVAL_MS     3000
 
-static int sti_plane_timespec_ms_diff(struct timespec lhs, struct timespec rhs)
-{
-       struct timespec tmp_ts = timespec_sub(lhs, rhs);
-       u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
-
-       do_div(tmp_ns, NSEC_PER_MSEC);
-
-       return (u32)tmp_ns;
-}
-
 void sti_plane_update_fps(struct sti_plane *plane,
                          bool new_frame,
                          bool new_field)
 {
-       struct timespec now;
+       ktime_t now;
        struct sti_fps_info *fps;
        int fpks, fipks, ms_since_last, num_frames, num_fields;
 
-       getrawmonotonic(&now);
+       now = ktime_get();
 
        /* Compute number of frame updates */
        fps = &plane->fps_info;
@@ -76,7 +66,7 @@ void sti_plane_update_fps(struct sti_plane *plane,
                return;
 
        fps->curr_frame_counter++;
-       ms_since_last = sti_plane_timespec_ms_diff(now, fps->last_timestamp);
+       ms_since_last = ktime_to_ms(ktime_sub(now, fps->last_timestamp));
        num_frames = fps->curr_frame_counter - fps->last_frame_counter;
 
        if (num_frames <= 0  || ms_since_last < STI_FPS_INTERVAL_MS)
index 39d39f5b7dd9ade89b2c7cf0c1ccc0c9a2979608..e0ea1dd3bb88984c8880eb9afb7f9e867de9398e 100644 (file)
@@ -55,7 +55,7 @@ struct sti_fps_info {
        unsigned int last_frame_counter;
        unsigned int curr_field_counter;
        unsigned int last_field_counter;
-       struct timespec last_timestamp;
+       ktime_t      last_timestamp;
        char fps_str[FPS_LENGTH];
        char fips_str[FPS_LENGTH];
 };
index 0100c7c668b7e167bc2fef0b7806541d69da9c58..0bdc385eec178663b823df0b83c5ed2a320965e8 100644 (file)
@@ -65,7 +65,7 @@
 #define HDMI_DELAY          (5)
 
 /* Delay introduced by the DVO in nb of pixel */
-#define DVO_DELAY           (2)
+#define DVO_DELAY           (7)
 
 /* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
 #define AWG_DELAY_HD        (-9)
index 99510e64e91aeece76c5cb0abe40764f138ca911..a4b357db8856f436b843811f18d05ebcfde8911a 100644 (file)
@@ -1,6 +1,6 @@
 config DRM_SUN4I
        tristate "DRM Support for Allwinner A10 Display Engine"
-       depends on DRM && ARM
+       depends on DRM && ARM && COMMON_CLK
        depends on ARCH_SUNXI || COMPILE_TEST
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_HELPER
index f7a15c1a93bf1a8a71eadeda0082dc309ef64a99..3ab560450a82e666795eddc285214391ee763e71 100644 (file)
@@ -190,7 +190,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
        /* Get the physical address of the buffer in memory */
        gem = drm_fb_cma_get_gem_obj(fb, 0);
 
-       DRM_DEBUG_DRIVER("Using GEM @ 0x%x\n", gem->paddr);
+       DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
 
        /* Compute the start of the displayed memory */
        bpp = drm_format_plane_cpp(fb->pixel_format, 0);
@@ -198,7 +198,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
        paddr += (state->src_x >> 16) * bpp;
        paddr += (state->src_y >> 16) * fb->pitches[0];
 
-       DRM_DEBUG_DRIVER("Setting buffer address to 0x%x\n", paddr);
+       DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
 
        /* Write the 32 lower bits of the address (in bits) */
        lo_paddr = paddr << 3;
index 3ff668cb463c2af3f3c1f754d58b00e65fa08d4b..5b3463197c488ae3eac98500eeda121443406304 100644 (file)
@@ -72,14 +72,40 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
 static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
                                  unsigned long *parent_rate)
 {
-       return *parent_rate / DIV_ROUND_CLOSEST(*parent_rate, rate);
+       unsigned long best_parent = 0;
+       u8 best_div = 1;
+       int i;
+
+       for (i = 6; i < 127; i++) {
+               unsigned long ideal = rate * i;
+               unsigned long rounded;
+
+               rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
+                                           ideal);
+
+               if (rounded == ideal) {
+                       best_parent = rounded;
+                       best_div = i;
+                       goto out;
+               }
+
+               if ((rounded < ideal) && (rounded > best_parent)) {
+                       best_parent = rounded;
+                       best_div = i;
+               }
+       }
+
+out:
+       *parent_rate = best_parent;
+
+       return best_parent / best_div;
 }
 
 static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
                               unsigned long parent_rate)
 {
        struct sun4i_dclk *dclk = hw_to_dclk(hw);
-       int div = DIV_ROUND_CLOSEST(parent_rate, rate);
+       u8 div = parent_rate / rate;
 
        return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
                                  GENMASK(6, 0), div);
@@ -127,10 +153,14 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
        const char *clk_name, *parent_name;
        struct clk_init_data init;
        struct sun4i_dclk *dclk;
+       int ret;
 
        parent_name = __clk_get_name(tcon->sclk0);
-       of_property_read_string_index(dev->of_node, "clock-output-names", 0,
-                                     &clk_name);
+       ret = of_property_read_string_index(dev->of_node,
+                                           "clock-output-names", 0,
+                                           &clk_name);
+       if (ret)
+               return ret;
 
        dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL);
        if (!dclk)
@@ -140,6 +170,7 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
        init.ops = &sun4i_dclk_ops;
        init.parent_names = &parent_name;
        init.num_parents = 1;
+       init.flags = CLK_SET_RATE_PARENT;
 
        dclk->regmap = tcon->regs;
        dclk->hw.init = &init;
index 9a67f927a53e34bdc4f2f4e762118bc19818d5dc..5b89940edcb1c58140b30af8876cb9bef65cb7bf 100644 (file)
@@ -97,6 +97,22 @@ static struct drm_driver sun4i_drv_driver = {
        .disable_vblank         = sun4i_drv_disable_vblank,
 };
 
+static void sun4i_remove_framebuffers(void)
+{
+       struct apertures_struct *ap;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       /* The framebuffer can be located anywhere in RAM */
+       ap->ranges[0].base = 0;
+       ap->ranges[0].size = ~0;
+
+       remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
+       kfree(ap);
+}
+
 static int sun4i_drv_bind(struct device *dev)
 {
        struct drm_device *drm;
@@ -140,6 +156,9 @@ static int sun4i_drv_bind(struct device *dev)
        }
        drm->irq_enabled = true;
 
+       /* Remove early framebuffers (ie. simplefb) */
+       sun4i_remove_framebuffers();
+
        /* Create our framebuffer */
        drv->fbdev = sun4i_framebuffer_init(drm);
        if (IS_ERR(drv->fbdev)) {
@@ -166,6 +185,7 @@ static void sun4i_drv_unbind(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
 
+       drm_connector_unregister_all(drm);
        drm_dev_unregister(drm);
        drm_kms_helper_poll_fini(drm);
        sun4i_framebuffer_free(drm);
index 442cfe27168805217fbb53d4fcd47c1cb12d6306..f5bbac6efb4c8c01e5f7cd709de037f168a92eb6 100644 (file)
@@ -54,8 +54,13 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
 static int sun4i_rgb_mode_valid(struct drm_connector *connector,
                                struct drm_display_mode *mode)
 {
+       struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
+       struct sun4i_drv *drv = rgb->drv;
+       struct sun4i_tcon *tcon = drv->tcon;
        u32 hsync = mode->hsync_end - mode->hsync_start;
        u32 vsync = mode->vsync_end - mode->vsync_start;
+       unsigned long rate = mode->clock * 1000;
+       long rounded_rate;
 
        DRM_DEBUG_DRIVER("Validating modes...\n");
 
@@ -87,6 +92,15 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
 
        DRM_DEBUG_DRIVER("Vertical parameters OK\n");
 
+       rounded_rate = clk_round_rate(tcon->dclk, rate);
+       if (rounded_rate < rate)
+               return MODE_CLOCK_LOW;
+
+       if (rounded_rate > rate)
+               return MODE_CLOCK_HIGH;
+
+       DRM_DEBUG_DRIVER("Clock rate OK\n");
+
        return MODE_OK;
 }
 
@@ -193,7 +207,7 @@ int sun4i_rgb_init(struct drm_device *drm)
        int ret;
 
        /* If we don't have a panel, there's no point in going on */
-       if (!tcon->panel)
+       if (IS_ERR(tcon->panel))
                return -ENODEV;
 
        rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
index 9f19b0e08560e681f50bfe00635ecefef18ef885..652385f09735c3f4bb11b713809cc6b67034d459 100644 (file)
@@ -425,11 +425,11 @@ static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
 
        remote = of_graph_get_remote_port_parent(end_node);
        if (!remote) {
-               DRM_DEBUG_DRIVER("Enable to parse remote node\n");
+               DRM_DEBUG_DRIVER("Unable to parse remote node\n");
                return ERR_PTR(-EINVAL);
        }
 
-       return of_drm_find_panel(remote);
+       return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
 }
 
 static int sun4i_tcon_bind(struct device *dev, struct device *master,
@@ -490,7 +490,11 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
                return 0;
        }
 
-       return sun4i_rgb_init(drm);
+       ret = sun4i_rgb_init(drm);
+       if (ret < 0)
+               goto err_free_clocks;
+
+       return 0;
 
 err_free_clocks:
        sun4i_tcon_free_clocks(tcon);
@@ -522,12 +526,13 @@ static int sun4i_tcon_probe(struct platform_device *pdev)
         * Defer the probe.
         */
        panel = sun4i_tcon_find_panel(node);
-       if (IS_ERR(panel)) {
-               /*
-                * If we don't have a panel endpoint, just go on
-                */
-               if (PTR_ERR(panel) != -ENODEV)
-                       return -EPROBE_DEFER;
+
+       /*
+        * If we don't have a panel endpoint, just go on
+        */
+       if (PTR_ERR(panel) == -EPROBE_DEFER) {
+               DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n");
+               return -EPROBE_DEFER;
        }
 
        return component_add(&pdev->dev, &sun4i_tcon_ops);
index 39940f5b7c91416e08cb61797798f9b2e0838ff7..8495bd01b5449e886c723b285199a37cf8e8a520 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/clk.h>
 #include <linux/debugfs.h>
 #include <linux/iommu.h>
+#include <linux/pm_runtime.h>
 #include <linux/reset.h>
 
 #include <soc/tegra/pmc.h>
@@ -1216,6 +1217,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
 
        tegra_dc_stats_reset(&dc->stats);
        drm_crtc_vblank_off(crtc);
+
+       pm_runtime_put_sync(dc->dev);
 }
 
 static void tegra_crtc_enable(struct drm_crtc *crtc)
@@ -1225,6 +1228,48 @@ static void tegra_crtc_enable(struct drm_crtc *crtc)
        struct tegra_dc *dc = to_tegra_dc(crtc);
        u32 value;
 
+       pm_runtime_get_sync(dc->dev);
+
+       /* initialize display controller */
+       if (dc->syncpt) {
+               u32 syncpt = host1x_syncpt_id(dc->syncpt);
+
+               value = SYNCPT_CNTRL_NO_STALL;
+               tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+
+               value = SYNCPT_VSYNC_ENABLE | syncpt;
+               tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
+       }
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+       /* initialize timer */
+       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+               WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+               WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+       value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+       if (dc->soc->supports_border_color)
+               tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+
+       /* apply PLL and pixel clock changes */
        tegra_dc_commit_state(dc, state);
 
        /* program display mode */
@@ -1685,7 +1730,6 @@ static int tegra_dc_init(struct host1x_client *client)
        struct tegra_drm *tegra = drm->dev_private;
        struct drm_plane *primary = NULL;
        struct drm_plane *cursor = NULL;
-       u32 value;
        int err;
 
        dc->syncpt = host1x_syncpt_request(dc->dev, flags);
@@ -1755,47 +1799,6 @@ static int tegra_dc_init(struct host1x_client *client)
                goto cleanup;
        }
 
-       /* initialize display controller */
-       if (dc->syncpt) {
-               u32 syncpt = host1x_syncpt_id(dc->syncpt);
-
-               value = SYNCPT_CNTRL_NO_STALL;
-               tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
-
-               value = SYNCPT_VSYNC_ENABLE | syncpt;
-               tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
-       }
-
-       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
-               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
-
-       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
-               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
-
-       /* initialize timer */
-       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
-               WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
-       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
-
-       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
-               WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
-       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
-
-       value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
-               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
-
-       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
-               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
-
-       if (dc->soc->supports_border_color)
-               tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
-
-       tegra_dc_stats_reset(&dc->stats);
-
        return 0;
 
 cleanup:
@@ -1987,33 +1990,15 @@ static int tegra_dc_probe(struct platform_device *pdev)
                return PTR_ERR(dc->rst);
        }
 
+       reset_control_assert(dc->rst);
+
        if (dc->soc->has_powergate) {
                if (dc->pipe == 0)
                        dc->powergate = TEGRA_POWERGATE_DIS;
                else
                        dc->powergate = TEGRA_POWERGATE_DISB;
 
-               err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
-                                                       dc->rst);
-               if (err < 0) {
-                       dev_err(&pdev->dev, "failed to power partition: %d\n",
-                               err);
-                       return err;
-               }
-       } else {
-               err = clk_prepare_enable(dc->clk);
-               if (err < 0) {
-                       dev_err(&pdev->dev, "failed to enable clock: %d\n",
-                               err);
-                       return err;
-               }
-
-               err = reset_control_deassert(dc->rst);
-               if (err < 0) {
-                       dev_err(&pdev->dev, "failed to deassert reset: %d\n",
-                               err);
-                       return err;
-               }
+               tegra_powergate_power_off(dc->powergate);
        }
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2027,16 +2012,19 @@ static int tegra_dc_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
-       INIT_LIST_HEAD(&dc->client.list);
-       dc->client.ops = &dc_client_ops;
-       dc->client.dev = &pdev->dev;
-
        err = tegra_dc_rgb_probe(dc);
        if (err < 0 && err != -ENODEV) {
                dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
                return err;
        }
 
+       platform_set_drvdata(pdev, dc);
+       pm_runtime_enable(&pdev->dev);
+
+       INIT_LIST_HEAD(&dc->client.list);
+       dc->client.ops = &dc_client_ops;
+       dc->client.dev = &pdev->dev;
+
        err = host1x_client_register(&dc->client);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register host1x client: %d\n",
@@ -2044,8 +2032,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
                return err;
        }
 
-       platform_set_drvdata(pdev, dc);
-
        return 0;
 }
 
@@ -2067,7 +2053,22 @@ static int tegra_dc_remove(struct platform_device *pdev)
                return err;
        }
 
-       reset_control_assert(dc->rst);
+       pm_runtime_disable(&pdev->dev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dc_suspend(struct device *dev)
+{
+       struct tegra_dc *dc = dev_get_drvdata(dev);
+       int err;
+
+       err = reset_control_assert(dc->rst);
+       if (err < 0) {
+               dev_err(dev, "failed to assert reset: %d\n", err);
+               return err;
+       }
 
        if (dc->soc->has_powergate)
                tegra_powergate_power_off(dc->powergate);
@@ -2077,10 +2078,45 @@ static int tegra_dc_remove(struct platform_device *pdev)
        return 0;
 }
 
+static int tegra_dc_resume(struct device *dev)
+{
+       struct tegra_dc *dc = dev_get_drvdata(dev);
+       int err;
+
+       if (dc->soc->has_powergate) {
+               err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+                                                       dc->rst);
+               if (err < 0) {
+                       dev_err(dev, "failed to power partition: %d\n", err);
+                       return err;
+               }
+       } else {
+               err = clk_prepare_enable(dc->clk);
+               if (err < 0) {
+                       dev_err(dev, "failed to enable clock: %d\n", err);
+                       return err;
+               }
+
+               err = reset_control_deassert(dc->rst);
+               if (err < 0) {
+                       dev_err(dev, "failed to deassert reset: %d\n", err);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_dc_pm_ops = {
+       SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL)
+};
+
 struct platform_driver tegra_dc_driver = {
        .driver = {
                .name = "tegra-dc",
                .of_match_table = tegra_dc_of_match,
+               .pm = &tegra_dc_pm_ops,
        },
        .probe = tegra_dc_probe,
        .remove = tegra_dc_remove,
index b24a0f14821a53dc65d70c46440a9b1feff92824..059f409556d55dd1c617917b60ce728d7d3a67a4 100644 (file)
@@ -12,6 +12,9 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/of_gpio.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
 #include <linux/regulator/consumer.h>
@@ -44,6 +47,11 @@ struct tegra_dpaux {
        struct completion complete;
        struct work_struct work;
        struct list_head list;
+
+#ifdef CONFIG_GENERIC_PINCONF
+       struct pinctrl_dev *pinctrl;
+       struct pinctrl_desc desc;
+#endif
 };
 
 static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
@@ -267,6 +275,148 @@ static irqreturn_t tegra_dpaux_irq(int irq, void *data)
        return ret;
 }
 
+enum tegra_dpaux_functions {
+       DPAUX_PADCTL_FUNC_AUX,
+       DPAUX_PADCTL_FUNC_I2C,
+       DPAUX_PADCTL_FUNC_OFF,
+};
+
+static void tegra_dpaux_pad_power_down(struct tegra_dpaux *dpaux)
+{
+       u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+
+       value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+
+       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+}
+
+static void tegra_dpaux_pad_power_up(struct tegra_dpaux *dpaux)
+{
+       u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+
+       value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+
+       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+}
+
+static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
+{
+       u32 value;
+
+       switch (function) {
+       case DPAUX_PADCTL_FUNC_AUX:
+               value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
+                       DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
+                       DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+                       DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
+                       DPAUX_HYBRID_PADCTL_MODE_AUX;
+               break;
+
+       case DPAUX_PADCTL_FUNC_I2C:
+               value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
+                       DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+                       DPAUX_HYBRID_PADCTL_MODE_I2C;
+               break;
+
+       case DPAUX_PADCTL_FUNC_OFF:
+               tegra_dpaux_pad_power_down(dpaux);
+               return 0;
+
+       default:
+               return -ENOTSUPP;
+       }
+
+       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+       tegra_dpaux_pad_power_up(dpaux);
+
+       return 0;
+}
+
+#ifdef CONFIG_GENERIC_PINCONF
+static const struct pinctrl_pin_desc tegra_dpaux_pins[] = {
+       PINCTRL_PIN(0, "DP_AUX_CHx_P"),
+       PINCTRL_PIN(1, "DP_AUX_CHx_N"),
+};
+
+static const unsigned tegra_dpaux_pin_numbers[] = { 0, 1 };
+
+static const char * const tegra_dpaux_groups[] = {
+       "dpaux-io",
+};
+
+static const char * const tegra_dpaux_functions[] = {
+       "aux",
+       "i2c",
+       "off",
+};
+
+static int tegra_dpaux_get_groups_count(struct pinctrl_dev *pinctrl)
+{
+       return ARRAY_SIZE(tegra_dpaux_groups);
+}
+
+static const char *tegra_dpaux_get_group_name(struct pinctrl_dev *pinctrl,
+                                             unsigned int group)
+{
+       return tegra_dpaux_groups[group];
+}
+
+static int tegra_dpaux_get_group_pins(struct pinctrl_dev *pinctrl,
+                                     unsigned group, const unsigned **pins,
+                                     unsigned *num_pins)
+{
+       *pins = tegra_dpaux_pin_numbers;
+       *num_pins = ARRAY_SIZE(tegra_dpaux_pin_numbers);
+
+       return 0;
+}
+
+static const struct pinctrl_ops tegra_dpaux_pinctrl_ops = {
+       .get_groups_count = tegra_dpaux_get_groups_count,
+       .get_group_name = tegra_dpaux_get_group_name,
+       .get_group_pins = tegra_dpaux_get_group_pins,
+       .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+       .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int tegra_dpaux_get_functions_count(struct pinctrl_dev *pinctrl)
+{
+       return ARRAY_SIZE(tegra_dpaux_functions);
+}
+
+static const char *tegra_dpaux_get_function_name(struct pinctrl_dev *pinctrl,
+                                                unsigned int function)
+{
+       return tegra_dpaux_functions[function];
+}
+
+static int tegra_dpaux_get_function_groups(struct pinctrl_dev *pinctrl,
+                                          unsigned int function,
+                                          const char * const **groups,
+                                          unsigned * const num_groups)
+{
+       *num_groups = ARRAY_SIZE(tegra_dpaux_groups);
+       *groups = tegra_dpaux_groups;
+
+       return 0;
+}
+
+static int tegra_dpaux_set_mux(struct pinctrl_dev *pinctrl,
+                              unsigned int function, unsigned int group)
+{
+       struct tegra_dpaux *dpaux = pinctrl_dev_get_drvdata(pinctrl);
+
+       return tegra_dpaux_pad_config(dpaux, function);
+}
+
+static const struct pinmux_ops tegra_dpaux_pinmux_ops = {
+       .get_functions_count = tegra_dpaux_get_functions_count,
+       .get_function_name = tegra_dpaux_get_function_name,
+       .get_function_groups = tegra_dpaux_get_function_groups,
+       .set_mux = tegra_dpaux_set_mux,
+};
+#endif
+
 static int tegra_dpaux_probe(struct platform_device *pdev)
 {
        struct tegra_dpaux *dpaux;
@@ -294,11 +444,14 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
-       dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
-       if (IS_ERR(dpaux->rst)) {
-               dev_err(&pdev->dev, "failed to get reset control: %ld\n",
-                       PTR_ERR(dpaux->rst));
-               return PTR_ERR(dpaux->rst);
+       if (!pdev->dev.pm_domain) {
+               dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
+               if (IS_ERR(dpaux->rst)) {
+                       dev_err(&pdev->dev,
+                               "failed to get reset control: %ld\n",
+                               PTR_ERR(dpaux->rst));
+                       return PTR_ERR(dpaux->rst);
+               }
        }
 
        dpaux->clk = devm_clk_get(&pdev->dev, NULL);
@@ -315,34 +468,37 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
                return err;
        }
 
-       reset_control_deassert(dpaux->rst);
+       if (dpaux->rst)
+               reset_control_deassert(dpaux->rst);
 
        dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
        if (IS_ERR(dpaux->clk_parent)) {
                dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
                        PTR_ERR(dpaux->clk_parent));
-               return PTR_ERR(dpaux->clk_parent);
+               err = PTR_ERR(dpaux->clk_parent);
+               goto assert_reset;
        }
 
        err = clk_prepare_enable(dpaux->clk_parent);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
                        err);
-               return err;
+               goto assert_reset;
        }
 
        err = clk_set_rate(dpaux->clk_parent, 270000000);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n",
                        err);
-               return err;
+               goto disable_parent_clk;
        }
 
        dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
        if (IS_ERR(dpaux->vdd)) {
                dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
                        PTR_ERR(dpaux->vdd));
-               return PTR_ERR(dpaux->vdd);
+               err = PTR_ERR(dpaux->vdd);
+               goto disable_parent_clk;
        }
 
        err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
@@ -350,7 +506,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
        if (err < 0) {
                dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
                        dpaux->irq, err);
-               return err;
+               goto disable_parent_clk;
        }
 
        disable_irq(dpaux->irq);
@@ -360,7 +516,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
 
        err = drm_dp_aux_register(&dpaux->aux);
        if (err < 0)
-               return err;
+               goto disable_parent_clk;
 
        /*
         * Assume that by default the DPAUX/I2C pads will be used for HDMI,
@@ -370,16 +526,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
         * is no possibility to perform the I2C mode configuration in the
         * HDMI path.
         */
-       value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
-       value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
-       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
-
-       value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL);
-       value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
-               DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
-               DPAUX_HYBRID_PADCTL_MODE_I2C;
-       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+       err = tegra_dpaux_pad_config(dpaux, DPAUX_HYBRID_PADCTL_MODE_I2C);
+       if (err < 0)
+               return err;
 
+#ifdef CONFIG_GENERIC_PINCONF
+       dpaux->desc.name = dev_name(&pdev->dev);
+       dpaux->desc.pins = tegra_dpaux_pins;
+       dpaux->desc.npins = ARRAY_SIZE(tegra_dpaux_pins);
+       dpaux->desc.pctlops = &tegra_dpaux_pinctrl_ops;
+       dpaux->desc.pmxops = &tegra_dpaux_pinmux_ops;
+       dpaux->desc.owner = THIS_MODULE;
+
+       dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
+       if (!dpaux->pinctrl) {
+               dev_err(&pdev->dev, "failed to register pincontrol\n");
+               return -ENODEV;
+       }
+#endif
        /* enable and clear all interrupts */
        value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
                DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
@@ -393,17 +557,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, dpaux);
 
        return 0;
+
+disable_parent_clk:
+       clk_disable_unprepare(dpaux->clk_parent);
+assert_reset:
+       if (dpaux->rst)
+               reset_control_assert(dpaux->rst);
+
+       clk_disable_unprepare(dpaux->clk);
+
+       return err;
 }
 
 static int tegra_dpaux_remove(struct platform_device *pdev)
 {
        struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
-       u32 value;
 
        /* make sure pads are powered down when not in use */
-       value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
-       value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
-       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+       tegra_dpaux_pad_power_down(dpaux);
 
        drm_dp_aux_unregister(&dpaux->aux);
 
@@ -414,7 +585,10 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
        cancel_work_sync(&dpaux->work);
 
        clk_disable_unprepare(dpaux->clk_parent);
-       reset_control_assert(dpaux->rst);
+
+       if (dpaux->rst)
+               reset_control_assert(dpaux->rst);
+
        clk_disable_unprepare(dpaux->clk);
 
        return 0;
@@ -528,30 +702,15 @@ enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
 int drm_dp_aux_enable(struct drm_dp_aux *aux)
 {
        struct tegra_dpaux *dpaux = to_dpaux(aux);
-       u32 value;
-
-       value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
-               DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
-               DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
-               DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
-               DPAUX_HYBRID_PADCTL_MODE_AUX;
-       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
-
-       value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
-       value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
-       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
 
-       return 0;
+       return tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_AUX);
 }
 
 int drm_dp_aux_disable(struct drm_dp_aux *aux)
 {
        struct tegra_dpaux *dpaux = to_dpaux(aux);
-       u32 value;
 
-       value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
-       value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
-       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+       tegra_dpaux_pad_power_down(dpaux);
 
        return 0;
 }
index a177a42a98491a8baee7894852686f29e6c2fdd7..755264d9db229be535a05654250cd69e6876a722 100644 (file)
@@ -56,8 +56,8 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
         */
 
        drm_atomic_helper_commit_modeset_disables(drm, state);
-       drm_atomic_helper_commit_planes(drm, state, false);
        drm_atomic_helper_commit_modeset_enables(drm, state);
+       drm_atomic_helper_commit_planes(drm, state, true);
 
        drm_atomic_helper_wait_for_vblanks(drm, state);
 
index 099cccb2fbcb5becd8dafe4f4afe23d5cd8b54b9..3d228ad90e0f14a9c8ad5656738afd0c8cc33cca 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/reset.h>
 
 #include <linux/regulator/consumer.h>
@@ -677,6 +678,45 @@ static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi)
        tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
 }
 
+static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
+{
+       u32 value;
+
+       value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
+       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
+
+       return 0;
+}
+
+static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
+{
+       u32 value;
+
+       /*
+        * XXX Is this still needed? The module reset is deasserted right
+        * before this function is called.
+        */
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+       /* start calibration */
+       tegra_dsi_pad_enable(dsi);
+
+       value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
+               DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
+               DSI_PAD_OUT_CLK(0x0);
+       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+
+       value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
+               DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
+       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
+
+       return tegra_mipi_calibrate(dsi->mipi);
+}
+
 static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
                                  unsigned int vrefresh)
 {
@@ -836,7 +876,7 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
 
        tegra_dsi_disable(dsi);
 
-       return;
+       pm_runtime_put(dsi->dev);
 }
 
 static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
@@ -847,6 +887,13 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
        struct tegra_dsi *dsi = to_dsi(output);
        struct tegra_dsi_state *state;
        u32 value;
+       int err;
+
+       pm_runtime_get_sync(dsi->dev);
+
+       err = tegra_dsi_pad_calibrate(dsi);
+       if (err < 0)
+               dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
 
        state = tegra_dsi_get_state(dsi);
 
@@ -875,8 +922,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
 
        if (output->panel)
                drm_panel_enable(output->panel);
-
-       return;
 }
 
 static int
@@ -966,55 +1011,12 @@ static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
        .atomic_check = tegra_dsi_encoder_atomic_check,
 };
 
-static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
-{
-       u32 value;
-
-       value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
-       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
-
-       return 0;
-}
-
-static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
-{
-       u32 value;
-
-       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
-       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
-       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
-       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
-       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
-
-       /* start calibration */
-       tegra_dsi_pad_enable(dsi);
-
-       value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
-               DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
-               DSI_PAD_OUT_CLK(0x0);
-       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
-
-       value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
-               DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
-       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
-
-       return tegra_mipi_calibrate(dsi->mipi);
-}
-
 static int tegra_dsi_init(struct host1x_client *client)
 {
        struct drm_device *drm = dev_get_drvdata(client->parent);
        struct tegra_dsi *dsi = host1x_client_to_dsi(client);
        int err;
 
-       reset_control_deassert(dsi->rst);
-
-       err = tegra_dsi_pad_calibrate(dsi);
-       if (err < 0) {
-               dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
-               goto reset;
-       }
-
        /* Gangsters must not register their own outputs. */
        if (!dsi->master) {
                dsi->output.dev = client->dev;
@@ -1037,12 +1039,9 @@ static int tegra_dsi_init(struct host1x_client *client)
                drm_connector_register(&dsi->output.connector);
 
                err = tegra_output_init(drm, &dsi->output);
-               if (err < 0) {
-                       dev_err(client->dev,
-                               "failed to initialize output: %d\n",
+               if (err < 0)
+                       dev_err(dsi->dev, "failed to initialize output: %d\n",
                                err);
-                       goto reset;
-               }
 
                dsi->output.encoder.possible_crtcs = 0x3;
        }
@@ -1054,10 +1053,6 @@ static int tegra_dsi_init(struct host1x_client *client)
        }
 
        return 0;
-
-reset:
-       reset_control_assert(dsi->rst);
-       return err;
 }
 
 static int tegra_dsi_exit(struct host1x_client *client)
@@ -1069,7 +1064,7 @@ static int tegra_dsi_exit(struct host1x_client *client)
        if (IS_ENABLED(CONFIG_DEBUG_FS))
                tegra_dsi_debugfs_exit(dsi);
 
-       reset_control_assert(dsi->rst);
+       regulator_disable(dsi->vdd);
 
        return 0;
 }
@@ -1493,74 +1488,50 @@ static int tegra_dsi_probe(struct platform_device *pdev)
        dsi->format = MIPI_DSI_FMT_RGB888;
        dsi->lanes = 4;
 
-       dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
-       if (IS_ERR(dsi->rst))
-               return PTR_ERR(dsi->rst);
+       if (!pdev->dev.pm_domain) {
+               dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
+               if (IS_ERR(dsi->rst))
+                       return PTR_ERR(dsi->rst);
+       }
 
        dsi->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(dsi->clk)) {
                dev_err(&pdev->dev, "cannot get DSI clock\n");
-               err = PTR_ERR(dsi->clk);
-               goto reset;
-       }
-
-       err = clk_prepare_enable(dsi->clk);
-       if (err < 0) {
-               dev_err(&pdev->dev, "cannot enable DSI clock\n");
-               goto reset;
+               return PTR_ERR(dsi->clk);
        }
 
        dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
        if (IS_ERR(dsi->clk_lp)) {
                dev_err(&pdev->dev, "cannot get low-power clock\n");
-               err = PTR_ERR(dsi->clk_lp);
-               goto disable_clk;
-       }
-
-       err = clk_prepare_enable(dsi->clk_lp);
-       if (err < 0) {
-               dev_err(&pdev->dev, "cannot enable low-power clock\n");
-               goto disable_clk;
+               return PTR_ERR(dsi->clk_lp);
        }
 
        dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
        if (IS_ERR(dsi->clk_parent)) {
                dev_err(&pdev->dev, "cannot get parent clock\n");
-               err = PTR_ERR(dsi->clk_parent);
-               goto disable_clk_lp;
+               return PTR_ERR(dsi->clk_parent);
        }
 
        dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
        if (IS_ERR(dsi->vdd)) {
                dev_err(&pdev->dev, "cannot get VDD supply\n");
-               err = PTR_ERR(dsi->vdd);
-               goto disable_clk_lp;
-       }
-
-       err = regulator_enable(dsi->vdd);
-       if (err < 0) {
-               dev_err(&pdev->dev, "cannot enable VDD supply\n");
-               goto disable_clk_lp;
+               return PTR_ERR(dsi->vdd);
        }
 
        err = tegra_dsi_setup_clocks(dsi);
        if (err < 0) {
                dev_err(&pdev->dev, "cannot setup clocks\n");
-               goto disable_vdd;
+               return err;
        }
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
-       if (IS_ERR(dsi->regs)) {
-               err = PTR_ERR(dsi->regs);
-               goto disable_vdd;
-       }
+       if (IS_ERR(dsi->regs))
+               return PTR_ERR(dsi->regs);
 
        dsi->mipi = tegra_mipi_request(&pdev->dev);
-       if (IS_ERR(dsi->mipi)) {
-               err = PTR_ERR(dsi->mipi);
-               goto disable_vdd;
-       }
+       if (IS_ERR(dsi->mipi))
+               return PTR_ERR(dsi->mipi);
 
        dsi->host.ops = &tegra_dsi_host_ops;
        dsi->host.dev = &pdev->dev;
@@ -1571,6 +1542,9 @@ static int tegra_dsi_probe(struct platform_device *pdev)
                goto mipi_free;
        }
 
+       platform_set_drvdata(pdev, dsi);
+       pm_runtime_enable(&pdev->dev);
+
        INIT_LIST_HEAD(&dsi->client.list);
        dsi->client.ops = &dsi_client_ops;
        dsi->client.dev = &pdev->dev;
@@ -1582,22 +1556,12 @@ static int tegra_dsi_probe(struct platform_device *pdev)
                goto unregister;
        }
 
-       platform_set_drvdata(pdev, dsi);
-
        return 0;
 
 unregister:
        mipi_dsi_host_unregister(&dsi->host);
 mipi_free:
        tegra_mipi_free(dsi->mipi);
-disable_vdd:
-       regulator_disable(dsi->vdd);
-disable_clk_lp:
-       clk_disable_unprepare(dsi->clk_lp);
-disable_clk:
-       clk_disable_unprepare(dsi->clk);
-reset:
-       reset_control_assert(dsi->rst);
        return err;
 }
 
@@ -1606,6 +1570,8 @@ static int tegra_dsi_remove(struct platform_device *pdev)
        struct tegra_dsi *dsi = platform_get_drvdata(pdev);
        int err;
 
+       pm_runtime_disable(&pdev->dev);
+
        err = host1x_client_unregister(&dsi->client);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -1618,14 +1584,82 @@ static int tegra_dsi_remove(struct platform_device *pdev)
        mipi_dsi_host_unregister(&dsi->host);
        tegra_mipi_free(dsi->mipi);
 
-       regulator_disable(dsi->vdd);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dsi_suspend(struct device *dev)
+{
+       struct tegra_dsi *dsi = dev_get_drvdata(dev);
+       int err;
+
+       if (dsi->rst) {
+               err = reset_control_assert(dsi->rst);
+               if (err < 0) {
+                       dev_err(dev, "failed to assert reset: %d\n", err);
+                       return err;
+               }
+       }
+
+       usleep_range(1000, 2000);
+
        clk_disable_unprepare(dsi->clk_lp);
        clk_disable_unprepare(dsi->clk);
-       reset_control_assert(dsi->rst);
+
+       regulator_disable(dsi->vdd);
 
        return 0;
 }
 
+static int tegra_dsi_resume(struct device *dev)
+{
+       struct tegra_dsi *dsi = dev_get_drvdata(dev);
+       int err;
+
+       err = regulator_enable(dsi->vdd);
+       if (err < 0) {
+               dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err);
+               return err;
+       }
+
+       err = clk_prepare_enable(dsi->clk);
+       if (err < 0) {
+               dev_err(dev, "cannot enable DSI clock: %d\n", err);
+               goto disable_vdd;
+       }
+
+       err = clk_prepare_enable(dsi->clk_lp);
+       if (err < 0) {
+               dev_err(dev, "cannot enable low-power clock: %d\n", err);
+               goto disable_clk;
+       }
+
+       usleep_range(1000, 2000);
+
+       if (dsi->rst) {
+               err = reset_control_deassert(dsi->rst);
+               if (err < 0) {
+                       dev_err(dev, "cannot assert reset: %d\n", err);
+                       goto disable_clk_lp;
+               }
+       }
+
+       return 0;
+
+disable_clk_lp:
+       clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+       clk_disable_unprepare(dsi->clk);
+disable_vdd:
+       regulator_disable(dsi->vdd);
+       return err;
+}
+#endif
+
+static const struct dev_pm_ops tegra_dsi_pm_ops = {
+       SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL)
+};
+
 static const struct of_device_id tegra_dsi_of_match[] = {
        { .compatible = "nvidia,tegra210-dsi", },
        { .compatible = "nvidia,tegra132-dsi", },
@@ -1639,6 +1673,7 @@ struct platform_driver tegra_dsi_driver = {
        .driver = {
                .name = "tegra-dsi",
                .of_match_table = tegra_dsi_of_match,
+               .pm = &tegra_dsi_pm_ops,
        },
        .probe = tegra_dsi_probe,
        .remove = tegra_dsi_remove,
index 2fdb8796443e70374fb42825e9dbbe18d1605c21..cda0491ed6bf80f2e77fd668adaff5d1233d7e94 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/debugfs.h>
 #include <linux/gpio.h>
 #include <linux/hdmi.h>
+#include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 
+#include <sound/hda_verbs.h>
+
 #include "hdmi.h"
 #include "drm.h"
 #include "dc.h"
 
+#define HDMI_ELD_BUFFER_SIZE 96
+
 struct tmds_config {
        unsigned int pclk;
        u32 pll0;
@@ -39,6 +44,8 @@ struct tegra_hdmi_config {
        u32 fuse_override_value;
 
        bool has_sor_io_peak_current;
+       bool has_hda;
+       bool has_hbr;
 };
 
 struct tegra_hdmi {
@@ -60,7 +67,10 @@ struct tegra_hdmi {
        const struct tegra_hdmi_config *config;
 
        unsigned int audio_source;
-       unsigned int audio_freq;
+       unsigned int audio_sample_rate;
+       unsigned int audio_channels;
+
+       unsigned int pixel_clock;
        bool stereo;
        bool dvi;
 
@@ -402,11 +412,11 @@ static const struct tmds_config tegra124_tmds_config[] = {
 };
 
 static const struct tegra_hdmi_audio_config *
-tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
+tegra_hdmi_get_audio_config(unsigned int sample_rate, unsigned int pclk)
 {
        const struct tegra_hdmi_audio_config *table;
 
-       switch (audio_freq) {
+       switch (sample_rate) {
        case 32000:
                table = tegra_hdmi_audio_32k;
                break;
@@ -476,44 +486,114 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
        }
 }
 
-static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
+static void tegra_hdmi_write_aval(struct tegra_hdmi *hdmi, u32 value)
+{
+       static const struct {
+               unsigned int sample_rate;
+               unsigned int offset;
+       } regs[] = {
+               {  32000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 },
+               {  44100, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 },
+               {  48000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 },
+               {  88200, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 },
+               {  96000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 },
+               { 176400, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 },
+               { 192000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 },
+       };
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(regs); i++) {
+               if (regs[i].sample_rate == hdmi->audio_sample_rate) {
+                       tegra_hdmi_writel(hdmi, value, regs[i].offset);
+                       break;
+               }
+       }
+}
+
+static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi)
 {
-       struct device_node *node = hdmi->dev->of_node;
        const struct tegra_hdmi_audio_config *config;
-       unsigned int offset = 0;
-       u32 value;
+       u32 source, value;
 
        switch (hdmi->audio_source) {
        case HDA:
-               value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+               if (hdmi->config->has_hda)
+                       source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+               else
+                       return -EINVAL;
+
                break;
 
        case SPDIF:
-               value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+               if (hdmi->config->has_hda)
+                       source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+               else
+                       source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
                break;
 
        default:
-               value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+               if (hdmi->config->has_hda)
+                       source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+               else
+                       source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
                break;
        }
 
-       if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
-               value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
-                        AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
-               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
-       } else {
-               value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+       /*
+        * Tegra30 and later use a slightly modified version of the register
+        * layout to accomodate for changes related to supporting HDA as the
+        * audio input source for HDMI. The source select field has moved to
+        * the SOR_AUDIO_CNTRL0 register, but the error tolerance and frames
+        * per block fields remain in the AUDIO_CNTRL0 register.
+        */
+       if (hdmi->config->has_hda) {
+               /*
+                * Inject null samples into the audio FIFO for every frame in
+                * which the codec did not receive any samples. This applies
+                * to stereo LPCM only.
+                *
+                * XXX: This seems to be a remnant of MCP days when this was
+                * used to work around issues with monitors not being able to
+                * play back system startup sounds early. It is possibly not
+                * needed on Linux at all.
+                */
+               if (hdmi->audio_channels == 2)
+                       value = SOR_AUDIO_CNTRL0_INJECT_NULLSMPL;
+               else
+                       value = 0;
+
+               value |= source;
+
                tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+       }
 
-               value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
-                       AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
-               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+       /*
+        * On Tegra20, HDA is not a supported audio source and the source
+        * select field is part of the AUDIO_CNTRL0 register.
+        */
+       value = AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) |
+               AUDIO_CNTRL0_ERROR_TOLERANCE(6);
+
+       if (!hdmi->config->has_hda)
+               value |= source;
+
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+
+       /*
+        * Advertise support for High Bit-Rate on Tegra114 and later.
+        */
+       if (hdmi->config->has_hbr) {
+               value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
+               value |= SOR_AUDIO_SPARE0_HBR_ENABLE;
+               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
        }
 
-       config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
+       config = tegra_hdmi_get_audio_config(hdmi->audio_sample_rate,
+                                            hdmi->pixel_clock);
        if (!config) {
-               dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
-                       hdmi->audio_freq, pclk);
+               dev_err(hdmi->dev,
+                       "cannot set audio to %u Hz at %u Hz pixel clock\n",
+                       hdmi->audio_sample_rate, hdmi->pixel_clock);
                return -EINVAL;
        }
 
@@ -526,8 +606,8 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
        tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
                          HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
 
-       value = ACR_SUBPACK_CTS(config->cts);
-       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+       tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts),
+                         HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
 
        value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
        tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
@@ -536,43 +616,53 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
        value &= ~AUDIO_N_RESETF;
        tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
 
-       if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
-               switch (hdmi->audio_freq) {
-               case 32000:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
-                       break;
+       if (hdmi->config->has_hda)
+               tegra_hdmi_write_aval(hdmi, config->aval);
 
-               case 44100:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
-                       break;
+       tegra_hdmi_setup_audio_fs_tables(hdmi);
 
-               case 48000:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
-                       break;
+       return 0;
+}
 
-               case 88200:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
-                       break;
+static void tegra_hdmi_disable_audio(struct tegra_hdmi *hdmi)
+{
+       u32 value;
 
-               case 96000:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
-                       break;
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+       value &= ~GENERIC_CTRL_AUDIO;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
 
-               case 176400:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
-                       break;
+static void tegra_hdmi_enable_audio(struct tegra_hdmi *hdmi)
+{
+       u32 value;
 
-               case 192000:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
-                       break;
-               }
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+       value |= GENERIC_CTRL_AUDIO;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
 
-               tegra_hdmi_writel(hdmi, config->aval, offset);
-       }
+static void tegra_hdmi_write_eld(struct tegra_hdmi *hdmi)
+{
+       size_t length = drm_eld_size(hdmi->output.connector.eld), i;
+       u32 value;
 
-       tegra_hdmi_setup_audio_fs_tables(hdmi);
+       for (i = 0; i < length; i++)
+               tegra_hdmi_writel(hdmi, i << 8 | hdmi->output.connector.eld[i],
+                                 HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
 
-       return 0;
+       /*
+        * The HDA codec will always report an ELD buffer size of 96 bytes and
+        * the HDA codec driver will check that each byte read from the buffer
+        * is valid. Therefore every byte must be written, even if no 96 bytes
+        * were parsed from EDID.
+        */
+       for (i = length; i < HDMI_ELD_BUFFER_SIZE; i++)
+               tegra_hdmi_writel(hdmi, i << 8 | 0,
+                                 HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+
+       value = SOR_AUDIO_HDA_PRESENSE_VALID | SOR_AUDIO_HDA_PRESENSE_PRESENT;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
 }
 
 static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size)
@@ -644,12 +734,6 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
        u8 buffer[17];
        ssize_t err;
 
-       if (hdmi->dvi) {
-               tegra_hdmi_writel(hdmi, 0,
-                                 HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
-               return;
-       }
-
        err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
        if (err < 0) {
                dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
@@ -663,9 +747,24 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
        }
 
        tegra_hdmi_write_infopack(hdmi, buffer, err);
+}
+
+static void tegra_hdmi_disable_avi_infoframe(struct tegra_hdmi *hdmi)
+{
+       u32 value;
 
-       tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
-                         HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+       value &= ~INFOFRAME_CTRL_ENABLE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_enable_avi_infoframe(struct tegra_hdmi *hdmi)
+{
+       u32 value;
+
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+       value |= INFOFRAME_CTRL_ENABLE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
 }
 
 static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
@@ -674,12 +773,6 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
        u8 buffer[14];
        ssize_t err;
 
-       if (hdmi->dvi) {
-               tegra_hdmi_writel(hdmi, 0,
-                                 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
-               return;
-       }
-
        err = hdmi_audio_infoframe_init(&frame);
        if (err < 0) {
                dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
@@ -687,7 +780,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
                return;
        }
 
-       frame.channels = 2;
+       frame.channels = hdmi->audio_channels;
 
        err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
        if (err < 0) {
@@ -703,9 +796,24 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
         * bytes can be programmed.
         */
        tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
+}
 
-       tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
-                         HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+static void tegra_hdmi_disable_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+       u32 value;
+
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+       value &= ~INFOFRAME_CTRL_ENABLE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_enable_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+       u32 value;
+
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+       value |= INFOFRAME_CTRL_ENABLE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
 }
 
 static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
@@ -713,14 +821,6 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
        struct hdmi_vendor_infoframe frame;
        u8 buffer[10];
        ssize_t err;
-       u32 value;
-
-       if (!hdmi->stereo) {
-               value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
-               value &= ~GENERIC_CTRL_ENABLE;
-               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
-               return;
-       }
 
        hdmi_vendor_infoframe_init(&frame);
        frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
@@ -733,6 +833,20 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
        }
 
        tegra_hdmi_write_infopack(hdmi, buffer, err);
+}
+
+static void tegra_hdmi_disable_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+       u32 value;
+
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+       value &= ~GENERIC_CTRL_ENABLE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_enable_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+       u32 value;
 
        value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
        value |= GENERIC_CTRL_ENABLE;
@@ -772,10 +886,25 @@ static bool tegra_output_is_hdmi(struct tegra_output *output)
        return drm_detect_hdmi_monitor(edid);
 }
 
+static enum drm_connector_status
+tegra_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+       struct tegra_output *output = connector_to_output(connector);
+       struct tegra_hdmi *hdmi = to_hdmi(output);
+       enum drm_connector_status status;
+
+       status = tegra_output_connector_detect(connector, force);
+       if (status == connector_status_connected)
+               return status;
+
+       tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+       return status;
+}
+
 static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .reset = drm_atomic_helper_connector_reset,
-       .detect = tegra_output_connector_detect,
+       .detect = tegra_hdmi_connector_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = tegra_output_connector_destroy,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -814,7 +943,9 @@ static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
 
 static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
 {
+       struct tegra_output *output = encoder_to_output(encoder);
        struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+       struct tegra_hdmi *hdmi = to_hdmi(output);
        u32 value;
 
        /*
@@ -828,6 +959,20 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
 
                tegra_dc_commit(dc);
        }
+
+       if (!hdmi->dvi) {
+               if (hdmi->stereo)
+                       tegra_hdmi_disable_stereo_infoframe(hdmi);
+
+               tegra_hdmi_disable_audio_infoframe(hdmi);
+               tegra_hdmi_disable_avi_infoframe(hdmi);
+               tegra_hdmi_disable_audio(hdmi);
+       }
+
+       tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
+       tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
+
+       pm_runtime_put(hdmi->dev);
 }
 
 static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
@@ -836,21 +981,28 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
        unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
        struct tegra_output *output = encoder_to_output(encoder);
        struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-       struct device_node *node = output->dev->of_node;
        struct tegra_hdmi *hdmi = to_hdmi(output);
-       unsigned int pulse_start, div82, pclk;
+       unsigned int pulse_start, div82;
        int retries = 1000;
        u32 value;
        int err;
 
-       hdmi->dvi = !tegra_output_is_hdmi(output);
+       pm_runtime_get_sync(hdmi->dev);
 
-       pclk = mode->clock * 1000;
+       /*
+        * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+        * is used for interoperability between the HDA codec driver and the
+        * HDMI driver.
+        */
+       tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_ENABLE);
+       tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_MASK);
+
+       hdmi->pixel_clock = mode->clock * 1000;
        h_sync_width = mode->hsync_end - mode->hsync_start;
        h_back_porch = mode->htotal - mode->hsync_end;
        h_front_porch = mode->hsync_start - mode->hdisplay;
 
-       err = clk_set_rate(hdmi->clk, pclk);
+       err = clk_set_rate(hdmi->clk, hdmi->pixel_clock);
        if (err < 0) {
                dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
                        err);
@@ -909,17 +1061,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
        value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
        tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
 
+       hdmi->dvi = !tegra_output_is_hdmi(output);
        if (!hdmi->dvi) {
-               err = tegra_hdmi_setup_audio(hdmi, pclk);
+               err = tegra_hdmi_setup_audio(hdmi);
                if (err < 0)
                        hdmi->dvi = true;
        }
 
-       if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
-               /*
-                * TODO: add ELD support
-                */
-       }
+       if (hdmi->config->has_hda)
+               tegra_hdmi_write_eld(hdmi);
 
        rekey = HDMI_REKEY_DEFAULT;
        value = HDMI_CTRL_REKEY(rekey);
@@ -931,20 +1081,17 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
 
        tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
 
-       if (hdmi->dvi)
-               tegra_hdmi_writel(hdmi, 0x0,
-                                 HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
-       else
-               tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
-                                 HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+       if (!hdmi->dvi) {
+               tegra_hdmi_setup_avi_infoframe(hdmi, mode);
+               tegra_hdmi_setup_audio_infoframe(hdmi);
 
-       tegra_hdmi_setup_avi_infoframe(hdmi, mode);
-       tegra_hdmi_setup_audio_infoframe(hdmi);
-       tegra_hdmi_setup_stereo_infoframe(hdmi);
+               if (hdmi->stereo)
+                       tegra_hdmi_setup_stereo_infoframe(hdmi);
+       }
 
        /* TMDS CONFIG */
        for (i = 0; i < hdmi->config->num_tmds; i++) {
-               if (pclk <= hdmi->config->tmds[i].pclk) {
+               if (hdmi->pixel_clock <= hdmi->config->tmds[i].pclk) {
                        tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
                        break;
                }
@@ -1031,6 +1178,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
 
        tegra_dc_commit(dc);
 
+       if (!hdmi->dvi) {
+               tegra_hdmi_enable_avi_infoframe(hdmi);
+               tegra_hdmi_enable_audio_infoframe(hdmi);
+               tegra_hdmi_enable_audio(hdmi);
+
+               if (hdmi->stereo)
+                       tegra_hdmi_enable_stereo_infoframe(hdmi);
+       }
+
        /* TODO: add HDCP support */
 }
 
@@ -1235,8 +1391,14 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
        DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
        DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
        DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1);
        DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
        DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+       DUMP_REG(HDMI_NV_PDISP_INT_STATUS);
+       DUMP_REG(HDMI_NV_PDISP_INT_MASK);
+       DUMP_REG(HDMI_NV_PDISP_INT_ENABLE);
        DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
 
 #undef DUMP_REG
@@ -1360,14 +1522,6 @@ static int tegra_hdmi_init(struct host1x_client *client)
                return err;
        }
 
-       err = clk_prepare_enable(hdmi->clk);
-       if (err < 0) {
-               dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
-               return err;
-       }
-
-       reset_control_deassert(hdmi->rst);
-
        return 0;
 }
 
@@ -1377,9 +1531,6 @@ static int tegra_hdmi_exit(struct host1x_client *client)
 
        tegra_output_exit(&hdmi->output);
 
-       reset_control_assert(hdmi->rst);
-       clk_disable_unprepare(hdmi->clk);
-
        regulator_disable(hdmi->vdd);
        regulator_disable(hdmi->pll);
        regulator_disable(hdmi->hdmi);
@@ -1401,6 +1552,8 @@ static const struct tegra_hdmi_config tegra20_hdmi_config = {
        .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
        .fuse_override_value = 1 << 31,
        .has_sor_io_peak_current = false,
+       .has_hda = false,
+       .has_hbr = false,
 };
 
 static const struct tegra_hdmi_config tegra30_hdmi_config = {
@@ -1409,6 +1562,8 @@ static const struct tegra_hdmi_config tegra30_hdmi_config = {
        .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
        .fuse_override_value = 1 << 31,
        .has_sor_io_peak_current = false,
+       .has_hda = true,
+       .has_hbr = false,
 };
 
 static const struct tegra_hdmi_config tegra114_hdmi_config = {
@@ -1417,6 +1572,8 @@ static const struct tegra_hdmi_config tegra114_hdmi_config = {
        .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
        .fuse_override_value = 1 << 31,
        .has_sor_io_peak_current = true,
+       .has_hda = true,
+       .has_hbr = true,
 };
 
 static const struct tegra_hdmi_config tegra124_hdmi_config = {
@@ -1425,6 +1582,8 @@ static const struct tegra_hdmi_config tegra124_hdmi_config = {
        .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
        .fuse_override_value = 1 << 31,
        .has_sor_io_peak_current = true,
+       .has_hda = true,
+       .has_hbr = true,
 };
 
 static const struct of_device_id tegra_hdmi_of_match[] = {
@@ -1436,6 +1595,67 @@ static const struct of_device_id tegra_hdmi_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
 
+static void hda_format_parse(unsigned int format, unsigned int *rate,
+                            unsigned int *channels)
+{
+       unsigned int mul, div;
+
+       if (format & AC_FMT_BASE_44K)
+               *rate = 44100;
+       else
+               *rate = 48000;
+
+       mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
+       div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
+
+       *rate = *rate * (mul + 1) / (div + 1);
+
+       *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+}
+
+static irqreturn_t tegra_hdmi_irq(int irq, void *data)
+{
+       struct tegra_hdmi *hdmi = data;
+       u32 value;
+       int err;
+
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS);
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS);
+
+       if (value & INT_CODEC_SCRATCH0) {
+               unsigned int format;
+               u32 value;
+
+               value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
+
+               if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
+                       unsigned int sample_rate, channels;
+
+                       format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
+
+                       hda_format_parse(format, &sample_rate, &channels);
+
+                       hdmi->audio_sample_rate = sample_rate;
+                       hdmi->audio_channels = channels;
+
+                       err = tegra_hdmi_setup_audio(hdmi);
+                       if (err < 0) {
+                               tegra_hdmi_disable_audio_infoframe(hdmi);
+                               tegra_hdmi_disable_audio(hdmi);
+                       } else {
+                               tegra_hdmi_setup_audio_infoframe(hdmi);
+                               tegra_hdmi_enable_audio_infoframe(hdmi);
+                               tegra_hdmi_enable_audio(hdmi);
+                       }
+               } else {
+                       tegra_hdmi_disable_audio_infoframe(hdmi);
+                       tegra_hdmi_disable_audio(hdmi);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
 static int tegra_hdmi_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match;
@@ -1453,8 +1673,10 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
 
        hdmi->config = match->data;
        hdmi->dev = &pdev->dev;
+
        hdmi->audio_source = AUTO;
-       hdmi->audio_freq = 44100;
+       hdmi->audio_sample_rate = 48000;
+       hdmi->audio_channels = 2;
        hdmi->stereo = false;
        hdmi->dvi = false;
 
@@ -1515,6 +1737,17 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
 
        hdmi->irq = err;
 
+       err = devm_request_irq(hdmi->dev, hdmi->irq, tegra_hdmi_irq, 0,
+                              dev_name(hdmi->dev), hdmi);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+                       hdmi->irq, err);
+               return err;
+       }
+
+       platform_set_drvdata(pdev, hdmi);
+       pm_runtime_enable(&pdev->dev);
+
        INIT_LIST_HEAD(&hdmi->client.list);
        hdmi->client.ops = &hdmi_client_ops;
        hdmi->client.dev = &pdev->dev;
@@ -1526,8 +1759,6 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
                return err;
        }
 
-       platform_set_drvdata(pdev, hdmi);
-
        return 0;
 }
 
@@ -1536,6 +1767,8 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
        struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
        int err;
 
+       pm_runtime_disable(&pdev->dev);
+
        err = host1x_client_unregister(&hdmi->client);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -1545,17 +1778,61 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
 
        tegra_output_remove(&hdmi->output);
 
-       clk_disable_unprepare(hdmi->clk_parent);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_hdmi_suspend(struct device *dev)
+{
+       struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
+       int err;
+
+       err = reset_control_assert(hdmi->rst);
+       if (err < 0) {
+               dev_err(dev, "failed to assert reset: %d\n", err);
+               return err;
+       }
+
+       usleep_range(1000, 2000);
+
        clk_disable_unprepare(hdmi->clk);
 
        return 0;
 }
 
+static int tegra_hdmi_resume(struct device *dev)
+{
+       struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
+       int err;
+
+       err = clk_prepare_enable(hdmi->clk);
+       if (err < 0) {
+               dev_err(dev, "failed to enable clock: %d\n", err);
+               return err;
+       }
+
+       usleep_range(1000, 2000);
+
+       err = reset_control_deassert(hdmi->rst);
+       if (err < 0) {
+               dev_err(dev, "failed to deassert reset: %d\n", err);
+               clk_disable_unprepare(hdmi->clk);
+               return err;
+       }
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_hdmi_pm_ops = {
+       SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL)
+};
+
 struct platform_driver tegra_hdmi_driver = {
        .driver = {
                .name = "tegra-hdmi",
-               .owner = THIS_MODULE,
                .of_match_table = tegra_hdmi_of_match,
+               .pm = &tegra_hdmi_pm_ops,
        },
        .probe = tegra_hdmi_probe,
        .remove = tegra_hdmi_remove,
index a882514389cd05a35e503c8b0adb2bb39ba91ae4..2339f134a09ab0ff90da7c800e85dcd68cb73980 100644 (file)
 #define HDMI_NV_PDISP_KEY_SKEY_INDEX                           0xa3
 
 #define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0                         0xac
-#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define  SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO   (0 << 20)
+#define  SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF  (1 << 20)
+#define  SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL   (2 << 20)
+#define  SOR_AUDIO_CNTRL0_INJECT_NULLSMPL      (1 << 29)
+#define HDMI_NV_PDISP_SOR_AUDIO_SPARE0                         0xae
+#define  SOR_AUDIO_SPARE0_HBR_ENABLE           (1 << 27)
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0             0xba
+#define  SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID    (1 << 30)
+#define  SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1             0xbb
 #define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR                  0xbc
 #define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE                   0xbd
+#define  SOR_AUDIO_HDA_PRESENSE_VALID          (1 << 1)
+#define  SOR_AUDIO_HDA_PRESENSE_PRESENT                (1 << 0)
 
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320    0xbf
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441    0xc0
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920    0xc5
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
 
+#define HDMI_NV_PDISP_INT_STATUS                       0xcc
+#define  INT_SCRATCH           (1 << 3)
+#define  INT_CP_REQUEST                (1 << 2)
+#define  INT_CODEC_SCRATCH1    (1 << 1)
+#define  INT_CODEC_SCRATCH0    (1 << 0)
+#define HDMI_NV_PDISP_INT_MASK                         0xcd
+#define HDMI_NV_PDISP_INT_ENABLE                       0xce
+
 #define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT              0xd1
 #define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) <<  0)
 #define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) <<  8)
index 1480f6aaffe492f7d33fb8186f9e0e4039e6d3b4..595d1ec3e02e8ff6e42c0574b0d1d1218bd8210f 100644 (file)
@@ -36,6 +36,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
 
        if (edid) {
                err = drm_add_edid_modes(connector, edid);
+               drm_edid_to_eld(connector, edid);
                kfree(edid);
        }
 
index 34958d71284bc4293b4d83ddc1e80cbc89487b54..74d0540b8d4c797361111fb9e8b0df288811541d 100644 (file)
@@ -7,11 +7,13 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/debugfs.h>
 #include <linux/gpio.h>
 #include <linux/io.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 
@@ -149,6 +151,8 @@ struct tegra_sor_soc {
 
        const struct tegra_sor_hdmi_settings *settings;
        unsigned int num_settings;
+
+       const u8 *xbar_cfg;
 };
 
 struct tegra_sor;
@@ -169,7 +173,9 @@ struct tegra_sor {
 
        struct reset_control *rst;
        struct clk *clk_parent;
+       struct clk *clk_brick;
        struct clk *clk_safe;
+       struct clk *clk_src;
        struct clk *clk_dp;
        struct clk *clk;
 
@@ -190,6 +196,18 @@ struct tegra_sor {
        struct regulator *hdmi_supply;
 };
 
+struct tegra_sor_state {
+       struct drm_connector_state base;
+
+       unsigned int bpc;
+};
+
+static inline struct tegra_sor_state *
+to_sor_state(struct drm_connector_state *state)
+{
+       return container_of(state, struct tegra_sor_state, base);
+}
+
 struct tegra_sor_config {
        u32 bits_per_pixel;
 
@@ -225,6 +243,118 @@ static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value,
        writel(value, sor->regs + (offset << 2));
 }
 
+static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
+{
+       int err;
+
+       clk_disable_unprepare(sor->clk);
+
+       err = clk_set_parent(sor->clk, parent);
+       if (err < 0)
+               return err;
+
+       err = clk_prepare_enable(sor->clk);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+struct tegra_clk_sor_brick {
+       struct clk_hw hw;
+       struct tegra_sor *sor;
+};
+
+static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw)
+{
+       return container_of(hw, struct tegra_clk_sor_brick, hw);
+}
+
+static const char * const tegra_clk_sor_brick_parents[] = {
+       "pll_d2_out0", "pll_dp"
+};
+
+static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct tegra_clk_sor_brick *brick = to_brick(hw);
+       struct tegra_sor *sor = brick->sor;
+       u32 value;
+
+       value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+       value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+       switch (index) {
+       case 0:
+               value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
+               break;
+
+       case 1:
+               value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+               break;
+       }
+
+       tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+       return 0;
+}
+
+static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
+{
+       struct tegra_clk_sor_brick *brick = to_brick(hw);
+       struct tegra_sor *sor = brick->sor;
+       u8 parent = U8_MAX;
+       u32 value;
+
+       value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+
+       switch (value & SOR_CLK_CNTRL_DP_CLK_SEL_MASK) {
+       case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK:
+       case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK:
+               parent = 0;
+               break;
+
+       case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK:
+       case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK:
+               parent = 1;
+               break;
+       }
+
+       return parent;
+}
+
+static const struct clk_ops tegra_clk_sor_brick_ops = {
+       .set_parent = tegra_clk_sor_brick_set_parent,
+       .get_parent = tegra_clk_sor_brick_get_parent,
+};
+
+static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
+                                               const char *name)
+{
+       struct tegra_clk_sor_brick *brick;
+       struct clk_init_data init;
+       struct clk *clk;
+
+       brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL);
+       if (!brick)
+               return ERR_PTR(-ENOMEM);
+
+       brick->sor = sor;
+
+       init.name = name;
+       init.flags = 0;
+       init.parent_names = tegra_clk_sor_brick_parents;
+       init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents);
+       init.ops = &tegra_clk_sor_brick_ops;
+
+       brick->hw.init = &init;
+
+       clk = devm_clk_register(sor->dev, &brick->hw);
+       if (IS_ERR(clk))
+               kfree(brick);
+
+       return clk;
+}
+
 static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
                                   struct drm_dp_link *link)
 {
@@ -569,10 +699,10 @@ static int tegra_sor_compute_params(struct tegra_sor *sor,
        return false;
 }
 
-static int tegra_sor_calc_config(struct tegra_sor *sor,
-                                const struct drm_display_mode *mode,
-                                struct tegra_sor_config *config,
-                                struct drm_dp_link *link)
+static int tegra_sor_compute_config(struct tegra_sor *sor,
+                                   const struct drm_display_mode *mode,
+                                   struct tegra_sor_config *config,
+                                   struct drm_dp_link *link)
 {
        const u64 f = 100000, link_rate = link->rate * 1000;
        const u64 pclk = mode->clock * 1000;
@@ -661,6 +791,135 @@ static int tegra_sor_calc_config(struct tegra_sor *sor,
        return 0;
 }
 
+static void tegra_sor_apply_config(struct tegra_sor *sor,
+                                  const struct tegra_sor_config *config)
+{
+       u32 value;
+
+       value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+       value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
+       value |= SOR_DP_LINKCTL_TU_SIZE(config->tu_size);
+       tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+       value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
+       value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
+       value |= SOR_DP_CONFIG_WATERMARK(config->watermark);
+
+       value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
+       value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config->active_count);
+
+       value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
+       value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config->active_frac);
+
+       if (config->active_polarity)
+               value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+       else
+               value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+
+       value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
+       value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
+       tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
+
+       value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+       value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
+       value |= config->hblank_symbols & 0xffff;
+       tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+
+       value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+       value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
+       value |= config->vblank_symbols & 0xffff;
+       tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+}
+
+static void tegra_sor_mode_set(struct tegra_sor *sor,
+                              const struct drm_display_mode *mode,
+                              struct tegra_sor_state *state)
+{
+       struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc);
+       unsigned int vbe, vse, hbe, hse, vbs, hbs;
+       u32 value;
+
+       value = tegra_sor_readl(sor, SOR_STATE1);
+       value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
+       value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+       value &= ~SOR_STATE_ASY_OWNER_MASK;
+
+       value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
+                SOR_STATE_ASY_OWNER(dc->pipe + 1);
+
+       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+               value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+               value |= SOR_STATE_ASY_HSYNCPOL;
+
+       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+               value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+               value |= SOR_STATE_ASY_VSYNCPOL;
+
+       switch (state->bpc) {
+       case 16:
+               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_48_444;
+               break;
+
+       case 12:
+               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_36_444;
+               break;
+
+       case 10:
+               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_30_444;
+               break;
+
+       case 8:
+               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+               break;
+
+       case 6:
+               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
+               break;
+
+       default:
+               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+               break;
+       }
+
+       tegra_sor_writel(sor, value, SOR_STATE1);
+
+       /*
+        * TODO: The video timing programming below doesn't seem to match the
+        * register definitions.
+        */
+
+       value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
+       tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
+
+       /* sync end = sync width - 1 */
+       vse = mode->vsync_end - mode->vsync_start - 1;
+       hse = mode->hsync_end - mode->hsync_start - 1;
+
+       value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
+       tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
+
+       /* blank end = sync end + back porch */
+       vbe = vse + (mode->vtotal - mode->vsync_end);
+       hbe = hse + (mode->htotal - mode->hsync_end);
+
+       value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
+       tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
+
+       /* blank start = blank end + active */
+       vbs = vbe + mode->vdisplay;
+       hbs = hbe + mode->hdisplay;
+
+       value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
+       tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+       /* XXX interlacing support */
+       tegra_sor_writel(sor, 0x001, SOR_HEAD_STATE5(dc->pipe));
+}
+
 static int tegra_sor_detach(struct tegra_sor *sor)
 {
        unsigned long value, timeout;
@@ -733,7 +992,8 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
        if ((value & SOR_PWR_TRIGGER) != 0)
                return -ETIMEDOUT;
 
-       err = clk_set_parent(sor->clk, sor->clk_safe);
+       /* switch to safe parent clock */
+       err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
        if (err < 0)
                dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
 
@@ -1038,6 +1298,22 @@ static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
        sor->debugfs = NULL;
 }
 
+static void tegra_sor_connector_reset(struct drm_connector *connector)
+{
+       struct tegra_sor_state *state;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return;
+
+       if (connector->state) {
+               __drm_atomic_helper_connector_destroy_state(connector->state);
+               kfree(connector->state);
+       }
+
+       __drm_atomic_helper_connector_reset(connector, &state->base);
+}
+
 static enum drm_connector_status
 tegra_sor_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -1050,13 +1326,28 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force)
        return tegra_output_connector_detect(connector, force);
 }
 
+static struct drm_connector_state *
+tegra_sor_connector_duplicate_state(struct drm_connector *connector)
+{
+       struct tegra_sor_state *state = to_sor_state(connector->state);
+       struct tegra_sor_state *copy;
+
+       copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+       if (!copy)
+               return NULL;
+
+       __drm_atomic_helper_connector_duplicate_state(connector, &copy->base);
+
+       return &copy->base;
+}
+
 static const struct drm_connector_funcs tegra_sor_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
-       .reset = drm_atomic_helper_connector_reset,
+       .reset = tegra_sor_connector_reset,
        .detect = tegra_sor_connector_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = tegra_output_connector_destroy,
-       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_duplicate_state = tegra_sor_connector_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
@@ -1081,6 +1372,10 @@ static enum drm_mode_status
 tegra_sor_connector_mode_valid(struct drm_connector *connector,
                               struct drm_display_mode *mode)
 {
+       /* HDMI 2.0 modes are not yet supported */
+       if (mode->clock > 340000)
+               return MODE_NOCLOCK;
+
        return MODE_OK;
 }
 
@@ -1140,8 +1435,7 @@ static void tegra_sor_edp_disable(struct drm_encoder *encoder)
        if (output->panel)
                drm_panel_unprepare(output->panel);
 
-       reset_control_assert(sor->rst);
-       clk_disable_unprepare(sor->clk);
+       pm_runtime_put(sor->dev);
 }
 
 #if 0
@@ -1191,19 +1485,18 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
        struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct tegra_output *output = encoder_to_output(encoder);
        struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-       unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
        struct tegra_sor *sor = to_sor(output);
        struct tegra_sor_config config;
+       struct tegra_sor_state *state;
        struct drm_dp_link link;
        u8 rate, lanes;
+       unsigned int i;
        int err = 0;
        u32 value;
 
-       err = clk_prepare_enable(sor->clk);
-       if (err < 0)
-               dev_err(sor->dev, "failed to enable clock: %d\n", err);
+       state = to_sor_state(output->connector.state);
 
-       reset_control_deassert(sor->rst);
+       pm_runtime_get_sync(sor->dev);
 
        if (output->panel)
                drm_panel_prepare(output->panel);
@@ -1218,17 +1511,17 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
                return;
        }
 
-       err = clk_set_parent(sor->clk, sor->clk_safe);
+       /* switch to safe parent clock */
+       err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
        if (err < 0)
                dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
 
        memset(&config, 0, sizeof(config));
-       config.bits_per_pixel = output->connector.display_info.bpc * 3;
+       config.bits_per_pixel = state->bpc * 3;
 
-       err = tegra_sor_calc_config(sor, mode, &config, &link);
+       err = tegra_sor_compute_config(sor, mode, &config, &link);
        if (err < 0)
-               dev_err(sor->dev, "failed to compute link configuration: %d\n",
-                       err);
+               dev_err(sor->dev, "failed to compute configuration: %d\n", err);
 
        value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
        value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
@@ -1325,10 +1618,18 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
        value &= ~SOR_PLL2_PORT_POWERDOWN;
        tegra_sor_writel(sor, value, SOR_PLL2);
 
-       /* switch to DP clock */
-       err = clk_set_parent(sor->clk, sor->clk_dp);
+       /* XXX not in TRM */
+       for (value = 0, i = 0; i < 5; i++)
+               value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+                        SOR_XBAR_CTRL_LINK1_XSEL(i, i);
+
+       tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+       tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+       /* switch to DP parent clock */
+       err = tegra_sor_set_parent_clock(sor, sor->clk_dp);
        if (err < 0)
-               dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
+               dev_err(sor->dev, "failed to set parent clock: %d\n", err);
 
        /* power DP lanes */
        value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
@@ -1374,13 +1675,11 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
        value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
        tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
-       /* set linkctl */
+       tegra_sor_apply_config(sor, &config);
+
+       /* enable link */
        value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
        value |= SOR_DP_LINKCTL_ENABLE;
-
-       value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
-       value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
-
        value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
        tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
 
@@ -1393,35 +1692,6 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
 
        tegra_sor_writel(sor, value, SOR_DP_TPG);
 
-       value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
-       value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
-       value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
-
-       value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
-       value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config.active_count);
-
-       value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
-       value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config.active_frac);
-
-       if (config.active_polarity)
-               value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
-       else
-               value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
-
-       value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
-       value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
-       tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
-
-       value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
-       value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
-       value |= config.hblank_symbols & 0xffff;
-       tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
-
-       value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
-       value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
-       value |= config.vblank_symbols & 0xffff;
-       tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
-
        /* enable pad calibration logic */
        value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
        value |= SOR_DP_PADCTL_PAD_CAL_PD;
@@ -1477,75 +1747,19 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
        if (err < 0)
                dev_err(sor->dev, "failed to power up SOR: %d\n", err);
 
-       /*
-        * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
-        * raster, associate with display controller)
-        */
-       value = SOR_STATE_ASY_PROTOCOL_DP_A |
-               SOR_STATE_ASY_CRC_MODE_COMPLETE |
-               SOR_STATE_ASY_OWNER(dc->pipe + 1);
-
-       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
-               value &= ~SOR_STATE_ASY_HSYNCPOL;
-
-       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-               value |= SOR_STATE_ASY_HSYNCPOL;
-
-       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
-               value &= ~SOR_STATE_ASY_VSYNCPOL;
-
-       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-               value |= SOR_STATE_ASY_VSYNCPOL;
-
-       switch (config.bits_per_pixel) {
-       case 24:
-               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
-               break;
-
-       case 18:
-               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
-               break;
-
-       default:
-               BUG();
-               break;
-       }
-
-       tegra_sor_writel(sor, value, SOR_STATE1);
-
-       /*
-        * TODO: The video timing programming below doesn't seem to match the
-        * register definitions.
-        */
-
-       value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
-
-       vse = mode->vsync_end - mode->vsync_start - 1;
-       hse = mode->hsync_end - mode->hsync_start - 1;
-
-       value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
-
-       vbe = vse + (mode->vsync_start - mode->vdisplay);
-       hbe = hse + (mode->hsync_start - mode->hdisplay);
-
-       value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
-
-       vbs = vbe + mode->vdisplay;
-       hbs = hbe + mode->hdisplay;
-
-       value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
-
-       tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
-
        /* CSTM (LVDS, link A/B, upper) */
        value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
                SOR_CSTM_UPPER;
        tegra_sor_writel(sor, value, SOR_CSTM);
 
+       /* use DP-A protocol */
+       value = tegra_sor_readl(sor, SOR_STATE1);
+       value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+       value |= SOR_STATE_ASY_PROTOCOL_DP_A;
+       tegra_sor_writel(sor, value, SOR_STATE1);
+
+       tegra_sor_mode_set(sor, mode, state);
+
        /* PWM setup */
        err = tegra_sor_setup_pwm(sor, 250);
        if (err < 0)
@@ -1577,11 +1791,15 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
                               struct drm_connector_state *conn_state)
 {
        struct tegra_output *output = encoder_to_output(encoder);
+       struct tegra_sor_state *state = to_sor_state(conn_state);
        struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
        unsigned long pclk = crtc_state->mode.clock * 1000;
        struct tegra_sor *sor = to_sor(output);
+       struct drm_display_info *info;
        int err;
 
+       info = &output->connector.display_info;
+
        err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
                                         pclk, 0);
        if (err < 0) {
@@ -1589,6 +1807,18 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
                return err;
        }
 
+       switch (info->bpc) {
+       case 8:
+       case 6:
+               state->bpc = info->bpc;
+               break;
+
+       default:
+               DRM_DEBUG_KMS("%u bits-per-color not supported\n", info->bpc);
+               state->bpc = 8;
+               break;
+       }
+
        return 0;
 }
 
@@ -1751,9 +1981,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
        if (err < 0)
                dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
 
-       reset_control_assert(sor->rst);
-       usleep_range(1000, 2000);
-       clk_disable_unprepare(sor->clk);
+       pm_runtime_put(sor->dev);
 }
 
 static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
@@ -1761,26 +1989,21 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
        struct tegra_output *output = encoder_to_output(encoder);
        unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
        struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-       unsigned int vbe, vse, hbe, hse, vbs, hbs, div;
        struct tegra_sor_hdmi_settings *settings;
        struct tegra_sor *sor = to_sor(output);
+       struct tegra_sor_state *state;
        struct drm_display_mode *mode;
-       struct drm_display_info *info;
+       unsigned int div, i;
        u32 value;
        int err;
 
+       state = to_sor_state(output->connector.state);
        mode = &encoder->crtc->state->adjusted_mode;
-       info = &output->connector.display_info;
 
-       err = clk_prepare_enable(sor->clk);
-       if (err < 0)
-               dev_err(sor->dev, "failed to enable clock: %d\n", err);
+       pm_runtime_get_sync(sor->dev);
 
-       usleep_range(1000, 2000);
-
-       reset_control_deassert(sor->rst);
-
-       err = clk_set_parent(sor->clk, sor->clk_safe);
+       /* switch to safe parent clock */
+       err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
        if (err < 0)
                dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
 
@@ -1876,22 +2099,20 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
        value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
        tegra_sor_writel(sor, value, SOR_REFCLK);
 
-       /* XXX don't hardcode */
-       value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) |
-               SOR_XBAR_CTRL_LINK1_XSEL(3, 3) |
-               SOR_XBAR_CTRL_LINK1_XSEL(2, 2) |
-               SOR_XBAR_CTRL_LINK1_XSEL(1, 1) |
-               SOR_XBAR_CTRL_LINK1_XSEL(0, 0) |
-               SOR_XBAR_CTRL_LINK0_XSEL(4, 4) |
-               SOR_XBAR_CTRL_LINK0_XSEL(3, 3) |
-               SOR_XBAR_CTRL_LINK0_XSEL(2, 0) |
-               SOR_XBAR_CTRL_LINK0_XSEL(1, 1) |
-               SOR_XBAR_CTRL_LINK0_XSEL(0, 2);
-       tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+       /* XXX not in TRM */
+       for (value = 0, i = 0; i < 5; i++)
+               value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+                        SOR_XBAR_CTRL_LINK1_XSEL(i, i);
 
        tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+       tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
 
-       err = clk_set_parent(sor->clk, sor->clk_parent);
+       /* switch to parent clock */
+       err = clk_set_parent(sor->clk_src, sor->clk_parent);
+       if (err < 0)
+               dev_err(sor->dev, "failed to set source clock: %d\n", err);
+
+       err = tegra_sor_set_parent_clock(sor, sor->clk_src);
        if (err < 0)
                dev_err(sor->dev, "failed to set parent clock: %d\n", err);
 
@@ -2001,7 +2222,7 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
        value &= ~DITHER_CONTROL_MASK;
        value &= ~BASE_COLOR_SIZE_MASK;
 
-       switch (info->bpc) {
+       switch (state->bpc) {
        case 6:
                value |= BASE_COLOR_SIZE_666;
                break;
@@ -2011,7 +2232,8 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
                break;
 
        default:
-               WARN(1, "%u bits-per-color not supported\n", info->bpc);
+               WARN(1, "%u bits-per-color not supported\n", state->bpc);
+               value |= BASE_COLOR_SIZE_888;
                break;
        }
 
@@ -2021,83 +2243,19 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
        if (err < 0)
                dev_err(sor->dev, "failed to power up SOR: %d\n", err);
 
-       /* configure mode */
-       value = tegra_sor_readl(sor, SOR_STATE1);
-       value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
-       value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
-       value &= ~SOR_STATE_ASY_OWNER_MASK;
-
-       value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
-                SOR_STATE_ASY_OWNER(dc->pipe + 1);
-
-       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
-               value &= ~SOR_STATE_ASY_HSYNCPOL;
-
-       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-               value |= SOR_STATE_ASY_HSYNCPOL;
-
-       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
-               value &= ~SOR_STATE_ASY_VSYNCPOL;
-
-       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-               value |= SOR_STATE_ASY_VSYNCPOL;
-
-       switch (info->bpc) {
-       case 8:
-               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
-               break;
-
-       case 6:
-               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
-               break;
-
-       default:
-               BUG();
-               break;
-       }
-
-       tegra_sor_writel(sor, value, SOR_STATE1);
-
+       /* configure dynamic range of output */
        value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
        value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
        value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
        tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
 
+       /* configure colorspace */
        value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
        value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
        value |= SOR_HEAD_STATE_COLORSPACE_RGB;
        tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
 
-       /*
-        * TODO: The video timing programming below doesn't seem to match the
-        * register definitions.
-        */
-
-       value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
-
-       /* sync end = sync width - 1 */
-       vse = mode->vsync_end - mode->vsync_start - 1;
-       hse = mode->hsync_end - mode->hsync_start - 1;
-
-       value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
-
-       /* blank end = sync end + back porch */
-       vbe = vse + (mode->vtotal - mode->vsync_end);
-       hbe = hse + (mode->htotal - mode->hsync_end);
-
-       value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
-
-       /* blank start = blank end + active */
-       vbs = vbe + mode->vdisplay;
-       hbs = hbe + mode->hdisplay;
-
-       value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
-
-       tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
+       tegra_sor_mode_set(sor, mode, state);
 
        tegra_sor_update(sor);
 
@@ -2195,10 +2353,13 @@ static int tegra_sor_init(struct host1x_client *client)
         * XXX: Remove this reset once proper hand-over from firmware to
         * kernel is possible.
         */
-       err = reset_control_assert(sor->rst);
-       if (err < 0) {
-               dev_err(sor->dev, "failed to assert SOR reset: %d\n", err);
-               return err;
+       if (sor->rst) {
+               err = reset_control_assert(sor->rst);
+               if (err < 0) {
+                       dev_err(sor->dev, "failed to assert SOR reset: %d\n",
+                               err);
+                       return err;
+               }
        }
 
        err = clk_prepare_enable(sor->clk);
@@ -2209,10 +2370,13 @@ static int tegra_sor_init(struct host1x_client *client)
 
        usleep_range(1000, 3000);
 
-       err = reset_control_deassert(sor->rst);
-       if (err < 0) {
-               dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err);
-               return err;
+       if (sor->rst) {
+               err = reset_control_deassert(sor->rst);
+               if (err < 0) {
+                       dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
+                               err);
+                       return err;
+               }
        }
 
        err = clk_prepare_enable(sor->clk_safe);
@@ -2323,11 +2487,16 @@ static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
        .remove = tegra_sor_hdmi_remove,
 };
 
+static const u8 tegra124_sor_xbar_cfg[5] = {
+       0, 1, 2, 3, 4
+};
+
 static const struct tegra_sor_soc tegra124_sor = {
        .supports_edp = true,
        .supports_lvds = true,
        .supports_hdmi = false,
        .supports_dp = false,
+       .xbar_cfg = tegra124_sor_xbar_cfg,
 };
 
 static const struct tegra_sor_soc tegra210_sor = {
@@ -2335,6 +2504,11 @@ static const struct tegra_sor_soc tegra210_sor = {
        .supports_lvds = false,
        .supports_hdmi = false,
        .supports_dp = false,
+       .xbar_cfg = tegra124_sor_xbar_cfg,
+};
+
+static const u8 tegra210_sor_xbar_cfg[5] = {
+       2, 1, 0, 3, 4
 };
 
 static const struct tegra_sor_soc tegra210_sor1 = {
@@ -2345,6 +2519,8 @@ static const struct tegra_sor_soc tegra210_sor1 = {
 
        .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
        .settings = tegra210_sor_hdmi_defaults,
+
+       .xbar_cfg = tegra210_sor_xbar_cfg,
 };
 
 static const struct of_device_id tegra_sor_of_match[] = {
@@ -2434,11 +2610,14 @@ static int tegra_sor_probe(struct platform_device *pdev)
                goto remove;
        }
 
-       sor->rst = devm_reset_control_get(&pdev->dev, "sor");
-       if (IS_ERR(sor->rst)) {
-               err = PTR_ERR(sor->rst);
-               dev_err(&pdev->dev, "failed to get reset control: %d\n", err);
-               goto remove;
+       if (!pdev->dev.pm_domain) {
+               sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+               if (IS_ERR(sor->rst)) {
+                       err = PTR_ERR(sor->rst);
+                       dev_err(&pdev->dev, "failed to get reset control: %d\n",
+                               err);
+                       goto remove;
+               }
        }
 
        sor->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2448,6 +2627,16 @@ static int tegra_sor_probe(struct platform_device *pdev)
                goto remove;
        }
 
+       if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
+               sor->clk_src = devm_clk_get(&pdev->dev, "source");
+               if (IS_ERR(sor->clk_src)) {
+                       err = PTR_ERR(sor->clk_src);
+                       dev_err(sor->dev, "failed to get source clock: %d\n",
+                               err);
+                       goto remove;
+               }
+       }
+
        sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
        if (IS_ERR(sor->clk_parent)) {
                err = PTR_ERR(sor->clk_parent);
@@ -2469,6 +2658,19 @@ static int tegra_sor_probe(struct platform_device *pdev)
                goto remove;
        }
 
+       platform_set_drvdata(pdev, sor);
+       pm_runtime_enable(&pdev->dev);
+
+       pm_runtime_get_sync(&pdev->dev);
+       sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick");
+       pm_runtime_put(&pdev->dev);
+
+       if (IS_ERR(sor->clk_brick)) {
+               err = PTR_ERR(sor->clk_brick);
+               dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err);
+               goto remove;
+       }
+
        INIT_LIST_HEAD(&sor->client.list);
        sor->client.ops = &sor_client_ops;
        sor->client.dev = &pdev->dev;
@@ -2480,8 +2682,6 @@ static int tegra_sor_probe(struct platform_device *pdev)
                goto remove;
        }
 
-       platform_set_drvdata(pdev, sor);
-
        return 0;
 
 remove:
@@ -2497,6 +2697,8 @@ static int tegra_sor_remove(struct platform_device *pdev)
        struct tegra_sor *sor = platform_get_drvdata(pdev);
        int err;
 
+       pm_runtime_disable(&pdev->dev);
+
        err = host1x_client_unregister(&sor->client);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -2515,10 +2717,62 @@ static int tegra_sor_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int tegra_sor_suspend(struct device *dev)
+{
+       struct tegra_sor *sor = dev_get_drvdata(dev);
+       int err;
+
+       if (sor->rst) {
+               err = reset_control_assert(sor->rst);
+               if (err < 0) {
+                       dev_err(dev, "failed to assert reset: %d\n", err);
+                       return err;
+               }
+       }
+
+       usleep_range(1000, 2000);
+
+       clk_disable_unprepare(sor->clk);
+
+       return 0;
+}
+
+static int tegra_sor_resume(struct device *dev)
+{
+       struct tegra_sor *sor = dev_get_drvdata(dev);
+       int err;
+
+       err = clk_prepare_enable(sor->clk);
+       if (err < 0) {
+               dev_err(dev, "failed to enable clock: %d\n", err);
+               return err;
+       }
+
+       usleep_range(1000, 2000);
+
+       if (sor->rst) {
+               err = reset_control_deassert(sor->rst);
+               if (err < 0) {
+                       dev_err(dev, "failed to deassert reset: %d\n", err);
+                       clk_disable_unprepare(sor->clk);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_sor_pm_ops = {
+       SET_RUNTIME_PM_OPS(tegra_sor_suspend, tegra_sor_resume, NULL)
+};
+
 struct platform_driver tegra_sor_driver = {
        .driver = {
                .name = "tegra-sor",
                .of_match_table = tegra_sor_of_match,
+               .pm = &tegra_sor_pm_ops,
        },
        .probe = tegra_sor_probe,
        .remove = tegra_sor_remove,
index 2d31d027e3f68cfe22d36d03cd4f75f6a575333b..865c73b489685c74c8b438b145602adb20f8a5a9 100644 (file)
@@ -27,6 +27,9 @@
 #define  SOR_STATE_ASY_PIXELDEPTH_MASK         (0xf << 17)
 #define  SOR_STATE_ASY_PIXELDEPTH_BPP_18_444   (0x2 << 17)
 #define  SOR_STATE_ASY_PIXELDEPTH_BPP_24_444   (0x5 << 17)
+#define  SOR_STATE_ASY_PIXELDEPTH_BPP_30_444   (0x6 << 17)
+#define  SOR_STATE_ASY_PIXELDEPTH_BPP_36_444   (0x8 << 17)
+#define  SOR_STATE_ASY_PIXELDEPTH_BPP_48_444   (0x9 << 17)
 #define  SOR_STATE_ASY_VSYNCPOL                        (1 << 13)
 #define  SOR_STATE_ASY_HSYNCPOL                        (1 << 12)
 #define  SOR_STATE_ASY_PROTOCOL_MASK           (0xf << 8)
index 4e55863e836b69ef5ec6fce1025324fdaa35b326..b1c1d8f42c8c3a22c57bbcefed593fc0346d85c3 100644 (file)
@@ -148,6 +148,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
        BUG_ON(!list_empty(&bo->ddestroy));
        ttm_tt_destroy(bo->ttm);
        atomic_dec(&bo->glob->bo_count);
+       fence_put(bo->moving);
        if (bo->resv == &bo->ttm_resv)
                reservation_object_fini(&bo->ttm_resv);
        mutex_destroy(&bo->wu_mutex);
@@ -358,7 +359,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                ret = bdev->driver->move(bo, evict, interruptible,
                                         no_wait_gpu, mem);
        else
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
+               ret = ttm_bo_move_memcpy(bo, evict, interruptible,
+                                        no_wait_gpu, mem);
 
        if (ret) {
                if (bdev->driver->move_notify) {
@@ -394,8 +396,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
 out_err:
        new_man = &bdev->man[bo->mem.mem_type];
-       if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
-               ttm_tt_unbind(bo->ttm);
+       if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
                ttm_tt_destroy(bo->ttm);
                bo->ttm = NULL;
        }
@@ -416,11 +417,8 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
        if (bo->bdev->driver->move_notify)
                bo->bdev->driver->move_notify(bo, NULL);
 
-       if (bo->ttm) {
-               ttm_tt_unbind(bo->ttm);
-               ttm_tt_destroy(bo->ttm);
-               bo->ttm = NULL;
-       }
+       ttm_tt_destroy(bo->ttm);
+       bo->ttm = NULL;
        ttm_bo_mem_put(bo, &bo->mem);
 
        ww_mutex_unlock (&bo->resv->lock);
@@ -686,15 +684,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        struct ttm_placement placement;
        int ret = 0;
 
-       ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
-
-       if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS) {
-                       pr_err("Failed to expire sync object before buffer eviction\n");
-               }
-               goto out;
-       }
-
        lockdep_assert_held(&bo->resv->lock.base);
 
        evict_mem = bo->mem;
@@ -718,7 +707,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
 
        ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
                                     no_wait_gpu);
-       if (ret) {
+       if (unlikely(ret)) {
                if (ret != -ERESTARTSYS)
                        pr_err("Buffer eviction failed\n");
                ttm_bo_mem_put(bo, &evict_mem);
@@ -797,6 +786,34 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
 }
 EXPORT_SYMBOL(ttm_bo_mem_put);
 
+/**
+ * Add the last move fence to the BO and reserve a new shared slot.
+ */
+static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
+                                struct ttm_mem_type_manager *man,
+                                struct ttm_mem_reg *mem)
+{
+       struct fence *fence;
+       int ret;
+
+       spin_lock(&man->move_lock);
+       fence = fence_get(man->move);
+       spin_unlock(&man->move_lock);
+
+       if (fence) {
+               reservation_object_add_shared_fence(bo->resv, fence);
+
+               ret = reservation_object_reserve_shared(bo->resv);
+               if (unlikely(ret))
+                       return ret;
+
+               fence_put(bo->moving);
+               bo->moving = fence;
+       }
+
+       return 0;
+}
+
 /**
  * Repeatedly evict memory from the LRU for @mem_type until we create enough
  * space, or we've evicted everything and there isn't enough space.
@@ -823,10 +840,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                if (unlikely(ret != 0))
                        return ret;
        } while (1);
-       if (mem->mm_node == NULL)
-               return -ENOMEM;
        mem->mem_type = mem_type;
-       return 0;
+       return ttm_bo_add_move_fence(bo, man, mem);
 }
 
 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -896,6 +911,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
        bool has_erestartsys = false;
        int i, ret;
 
+       ret = reservation_object_reserve_shared(bo->resv);
+       if (unlikely(ret))
+               return ret;
+
        mem->mm_node = NULL;
        for (i = 0; i < placement->num_placement; ++i) {
                const struct ttm_place *place = &placement->placement[i];
@@ -929,9 +948,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                ret = (*man->func->get_node)(man, bo, place, mem);
                if (unlikely(ret))
                        return ret;
-               
-               if (mem->mm_node)
+
+               if (mem->mm_node) {
+                       ret = ttm_bo_add_move_fence(bo, man, mem);
+                       if (unlikely(ret)) {
+                               (*man->func->put_node)(man, mem);
+                               return ret;
+                       }
                        break;
+               }
        }
 
        if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
@@ -998,20 +1023,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 
        lockdep_assert_held(&bo->resv->lock.base);
 
-       /*
-        * Don't wait for the BO on initial allocation. This is important when
-        * the BO has an imported reservation object.
-        */
-       if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) {
-               /*
-                * FIXME: It's possible to pipeline buffer moves.
-                * Have the driver move function wait for idle when necessary,
-                * instead of doing it here.
-                */
-               ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
-               if (ret)
-                       return ret;
-       }
        mem.num_pages = bo->num_pages;
        mem.size = mem.num_pages << PAGE_SHIFT;
        mem.page_alignment = bo->mem.page_alignment;
@@ -1163,7 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        bo->mem.page_alignment = page_alignment;
        bo->mem.bus.io_reserved_vm = false;
        bo->mem.bus.io_reserved_count = 0;
-       bo->priv_flags = 0;
+       bo->moving = NULL;
        bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
        bo->persistent_swap_storage = persistent_swap_storage;
        bo->acc_size = acc_size;
@@ -1275,6 +1286,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 {
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
        struct ttm_bo_global *glob = bdev->glob;
+       struct fence *fence;
        int ret;
 
        /*
@@ -1295,6 +1307,23 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
                spin_lock(&glob->lru_lock);
        }
        spin_unlock(&glob->lru_lock);
+
+       spin_lock(&man->move_lock);
+       fence = fence_get(man->move);
+       spin_unlock(&man->move_lock);
+
+       if (fence) {
+               ret = fence_wait(fence, false);
+               fence_put(fence);
+               if (ret) {
+                       if (allow_errors) {
+                               return ret;
+                       } else {
+                               pr_err("Cleanup eviction failed\n");
+                       }
+               }
+       }
+
        return 0;
 }
 
@@ -1314,6 +1343,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
                       mem_type);
                return ret;
        }
+       fence_put(man->move);
 
        man->use_type = false;
        man->has_type = false;
@@ -1359,6 +1389,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
        man->io_reserve_fastpath = true;
        man->use_io_reserve_lru = false;
        mutex_init(&man->io_reserve_mutex);
+       spin_lock_init(&man->move_lock);
        INIT_LIST_HEAD(&man->io_reserve_lru);
 
        ret = bdev->driver->init_mem_type(bdev, type, man);
@@ -1377,6 +1408,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
        man->size = p_size;
 
        INIT_LIST_HEAD(&man->lru);
+       man->move = NULL;
 
        return 0;
 }
@@ -1570,47 +1602,17 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 int ttm_bo_wait(struct ttm_buffer_object *bo,
                bool interruptible, bool no_wait)
 {
-       struct reservation_object_list *fobj;
-       struct reservation_object *resv;
-       struct fence *excl;
-       long timeout = 15 * HZ;
-       int i;
-
-       resv = bo->resv;
-       fobj = reservation_object_get_list(resv);
-       excl = reservation_object_get_excl(resv);
-       if (excl) {
-               if (!fence_is_signaled(excl)) {
-                       if (no_wait)
-                               return -EBUSY;
-
-                       timeout = fence_wait_timeout(excl,
-                                                    interruptible, timeout);
-               }
-       }
-
-       for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
-               struct fence *fence;
-               fence = rcu_dereference_protected(fobj->shared[i],
-                                               reservation_object_held(resv));
-
-               if (!fence_is_signaled(fence)) {
-                       if (no_wait)
-                               return -EBUSY;
-
-                       timeout = fence_wait_timeout(fence,
-                                                    interruptible, timeout);
-               }
-       }
+       long timeout = no_wait ? 0 : 15 * HZ;
 
+       timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
+                                                     interruptible, timeout);
        if (timeout < 0)
                return timeout;
 
        if (timeout == 0)
                return -EBUSY;
 
-       reservation_object_add_excl_fence(resv, NULL);
-       clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+       reservation_object_add_excl_fence(bo->resv, NULL);
        return 0;
 }
 EXPORT_SYMBOL(ttm_bo_wait);
@@ -1680,14 +1682,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
        ttm_bo_list_ref_sub(bo, put_count, true);
 
        /**
-        * Wait for GPU, then move to system cached.
+        * Move to system cached
         */
 
-       ret = ttm_bo_wait(bo, false, false);
-
-       if (unlikely(ret != 0))
-               goto out;
-
        if ((bo->mem.placement & swap_placement) != swap_placement) {
                struct ttm_mem_reg evict_mem;
 
@@ -1702,6 +1699,14 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                        goto out;
        }
 
+       /**
+        * Make sure BO is idle.
+        */
+
+       ret = ttm_bo_wait(bo, false, false);
+       if (unlikely(ret != 0))
+               goto out;
+
        ttm_bo_unmap_virtual(bo);
 
        /**
index d9831559706eb9312cf4803ac43e7113a6e41c5c..4da0e784f9e7dd5851b24d5bfcfe8eac764d8f06 100644 (file)
@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
        int ret;
 
        if (old_mem->mem_type != TTM_PL_SYSTEM) {
-               ttm_tt_unbind(ttm);
                ttm_bo_free_old_node(bo);
                ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
                                TTM_PL_MASK_MEM);
@@ -321,7 +320,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                      bool evict, bool no_wait_gpu,
+                      bool evict, bool interruptible,
+                      bool no_wait_gpu,
                       struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -337,6 +337,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
        unsigned long add = 0;
        int dir;
 
+       ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+       if (ret)
+               return ret;
+
        ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
        if (ret)
                return ret;
@@ -401,8 +405,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
        *old_mem = *new_mem;
        new_mem->mm_node = NULL;
 
-       if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
-               ttm_tt_unbind(ttm);
+       if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
                ttm_tt_destroy(ttm);
                bo->ttm = NULL;
        }
@@ -462,6 +465,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
        INIT_LIST_HEAD(&fbo->io_reserve_lru);
+       fbo->moving = NULL;
        drm_vma_node_reset(&fbo->vma_node);
        atomic_set(&fbo->cpu_writers, 0);
 
@@ -634,7 +638,6 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                              struct fence *fence,
                              bool evict,
-                             bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -649,9 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                if (ret)
                        return ret;
 
-               if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
-                   (bo->ttm != NULL)) {
-                       ttm_tt_unbind(bo->ttm);
+               if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
                        ttm_tt_destroy(bo->ttm);
                        bo->ttm = NULL;
                }
@@ -665,7 +666,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                 * operation has completed.
                 */
 
-               set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+               fence_put(bo->moving);
+               bo->moving = fence_get(fence);
 
                ret = ttm_buffer_object_transfer(bo, &ghost_obj);
                if (ret)
@@ -694,3 +696,95 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
        return 0;
 }
 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+
+int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
+                        struct fence *fence, bool evict,
+                        struct ttm_mem_reg *new_mem)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_mem_reg *old_mem = &bo->mem;
+
+       struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
+       struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
+
+       int ret;
+
+       reservation_object_add_excl_fence(bo->resv, fence);
+
+       if (!evict) {
+               struct ttm_buffer_object *ghost_obj;
+
+               /**
+                * This should help pipeline ordinary buffer moves.
+                *
+                * Hang old buffer memory on a new buffer object,
+                * and leave it to be released when the GPU
+                * operation has completed.
+                */
+
+               fence_put(bo->moving);
+               bo->moving = fence_get(fence);
+
+               ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+               if (ret)
+                       return ret;
+
+               reservation_object_add_excl_fence(ghost_obj->resv, fence);
+
+               /**
+                * If we're not moving to fixed memory, the TTM object
+                * needs to stay alive. Otherwhise hang it on the ghost
+                * bo to be unbound and destroyed.
+                */
+
+               if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
+                       ghost_obj->ttm = NULL;
+               else
+                       bo->ttm = NULL;
+
+               ttm_bo_unreserve(ghost_obj);
+               ttm_bo_unref(&ghost_obj);
+
+       } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
+
+               /**
+                * BO doesn't have a TTM we need to bind/unbind. Just remember
+                * this eviction and free up the allocation
+                */
+
+               spin_lock(&from->move_lock);
+               if (!from->move || fence_is_later(fence, from->move)) {
+                       fence_put(from->move);
+                       from->move = fence_get(fence);
+               }
+               spin_unlock(&from->move_lock);
+
+               ttm_bo_free_old_node(bo);
+
+               fence_put(bo->moving);
+               bo->moving = fence_get(fence);
+
+       } else {
+               /**
+                * Last resort, wait for the move to be completed.
+                *
+                * Should never happen in pratice.
+                */
+
+               ret = ttm_bo_wait(bo, false, false);
+               if (ret)
+                       return ret;
+
+               if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
+                       ttm_tt_destroy(bo->ttm);
+                       bo->ttm = NULL;
+               }
+               ttm_bo_free_old_node(bo);
+       }
+
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+
+       return 0;
+}
+EXPORT_SYMBOL(ttm_bo_pipeline_move);
index 3216878bced3d3debd16cca931589706de897795..a6ed9d5e5167fc935f0fceb8bc314db843959c19 100644 (file)
@@ -48,15 +48,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
 {
        int ret = 0;
 
-       if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
+       if (likely(!bo->moving))
                goto out_unlock;
 
        /*
         * Quick non-stalling check for idle.
         */
-       ret = ttm_bo_wait(bo, false, true);
-       if (likely(ret == 0))
-               goto out_unlock;
+       if (fence_is_signaled(bo->moving))
+               goto out_clear;
 
        /*
         * If possible, avoid waiting for GPU with mmap_sem
@@ -68,17 +67,23 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
                        goto out_unlock;
 
                up_read(&vma->vm_mm->mmap_sem);
-               (void) ttm_bo_wait(bo, true, false);
+               (void) fence_wait(bo->moving, true);
                goto out_unlock;
        }
 
        /*
         * Ordinary wait.
         */
-       ret = ttm_bo_wait(bo, true, false);
-       if (unlikely(ret != 0))
+       ret = fence_wait(bo->moving, true);
+       if (unlikely(ret != 0)) {
                ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
                        VM_FAULT_NOPAGE;
+               goto out_unlock;
+       }
+
+out_clear:
+       fence_put(bo->moving);
+       bo->moving = NULL;
 
 out_unlock:
        return ret;
index 077ae9b2865dcda4bb8d9e774a8b4a5cf498bc84..d28d4333dcce091fb36c6d6242b2328e8eb9f3e2 100644 (file)
@@ -166,11 +166,15 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
 
 void ttm_tt_destroy(struct ttm_tt *ttm)
 {
-       if (unlikely(ttm == NULL))
+       int ret;
+
+       if (ttm == NULL)
                return;
 
        if (ttm->state == tt_bound) {
-               ttm_tt_unbind(ttm);
+               ret = ttm->func->unbind(ttm);
+               BUG_ON(ret);
+               ttm->state = tt_unbound;
        }
 
        if (ttm->state == tt_unbound)
@@ -251,17 +255,6 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
 }
 EXPORT_SYMBOL(ttm_dma_tt_fini);
 
-void ttm_tt_unbind(struct ttm_tt *ttm)
-{
-       int ret;
-
-       if (ttm->state == tt_bound) {
-               ret = ttm->func->unbind(ttm);
-               BUG_ON(ret);
-               ttm->state = tt_unbound;
-       }
-}
-
 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
        int ret = 0;
index 4c0f26a644a395c33f48b7a3a5befe43aa619e36..8fc2b731b59a613fddce5853981e1ab8fd982837 100644 (file)
@@ -46,12 +46,17 @@ struct vc4_crtc {
        const struct vc4_crtc_data *data;
        void __iomem *regs;
 
+       /* Timestamp at start of vblank irq - unaffected by lock delays. */
+       ktime_t t_vblank;
+
        /* Which HVS channel we're using for our CRTC. */
        int channel;
 
        u8 lut_r[256];
        u8 lut_g[256];
        u8 lut_b[256];
+       /* Size in pixels of the COB memory allocated to this CRTC. */
+       u32 cob_size;
 
        struct drm_pending_vblank_event *event;
 };
@@ -146,6 +151,144 @@ int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused)
 }
 #endif
 
+int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
+                           unsigned int flags, int *vpos, int *hpos,
+                           ktime_t *stime, ktime_t *etime,
+                           const struct drm_display_mode *mode)
+{
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+       u32 val;
+       int fifo_lines;
+       int vblank_lines;
+       int ret = 0;
+
+       /*
+        * XXX Doesn't work well in interlaced mode yet, partially due
+        * to problems in vc4 kms or drm core interlaced mode handling,
+        * so disable for now in interlaced mode.
+        */
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               return ret;
+
+       /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+       /* Get optional system timestamp before query. */
+       if (stime)
+               *stime = ktime_get();
+
+       /*
+        * Read vertical scanline which is currently composed for our
+        * pixelvalve by the HVS, and also the scaler status.
+        */
+       val = HVS_READ(SCALER_DISPSTATX(vc4_crtc->channel));
+
+       /* Get optional system timestamp after query. */
+       if (etime)
+               *etime = ktime_get();
+
+       /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+       /* Vertical position of hvs composed scanline. */
+       *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+
+       /* No hpos info available. */
+       if (hpos)
+               *hpos = 0;
+
+       /* This is the offset we need for translating hvs -> pv scanout pos. */
+       fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay;
+
+       if (fifo_lines > 0)
+               ret |= DRM_SCANOUTPOS_VALID;
+
+       /* HVS more than fifo_lines into frame for compositing? */
+       if (*vpos > fifo_lines) {
+               /*
+                * We are in active scanout and can get some meaningful results
+                * from HVS. The actual PV scanout can not trail behind more
+                * than fifo_lines as that is the fifo's capacity. Assume that
+                * in active scanout the HVS and PV work in lockstep wrt. HVS
+                * refilling the fifo and PV consuming from the fifo, ie.
+                * whenever the PV consumes and frees up a scanline in the
+                * fifo, the HVS will immediately refill it, therefore
+                * incrementing vpos. Therefore we choose HVS read position -
+                * fifo size in scanlines as a estimate of the real scanout
+                * position of the PV.
+                */
+               *vpos -= fifo_lines + 1;
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       *vpos /= 2;
+
+               ret |= DRM_SCANOUTPOS_ACCURATE;
+               return ret;
+       }
+
+       /*
+        * Less: This happens when we are in vblank and the HVS, after getting
+        * the VSTART restart signal from the PV, just started refilling its
+        * fifo with new lines from the top-most lines of the new framebuffers.
+        * The PV does not scan out in vblank, so does not remove lines from
+        * the fifo, so the fifo will be full quickly and the HVS has to pause.
+        * We can't get meaningful readings wrt. scanline position of the PV
+        * and need to make things up in a approximative but consistent way.
+        */
+       ret |= DRM_SCANOUTPOS_IN_VBLANK;
+       vblank_lines = mode->crtc_vtotal - mode->crtc_vdisplay;
+
+       if (flags & DRM_CALLED_FROM_VBLIRQ) {
+               /*
+                * Assume the irq handler got called close to first
+                * line of vblank, so PV has about a full vblank
+                * scanlines to go, and as a base timestamp use the
+                * one taken at entry into vblank irq handler, so it
+                * is not affected by random delays due to lock
+                * contention on event_lock or vblank_time lock in
+                * the core.
+                */
+               *vpos = -vblank_lines;
+
+               if (stime)
+                       *stime = vc4_crtc->t_vblank;
+               if (etime)
+                       *etime = vc4_crtc->t_vblank;
+
+               /*
+                * If the HVS fifo is not yet full then we know for certain
+                * we are at the very beginning of vblank, as the hvs just
+                * started refilling, and the stime and etime timestamps
+                * truly correspond to start of vblank.
+                */
+               if ((val & SCALER_DISPSTATX_FULL) != SCALER_DISPSTATX_FULL)
+                       ret |= DRM_SCANOUTPOS_ACCURATE;
+       } else {
+               /*
+                * No clue where we are inside vblank. Return a vpos of zero,
+                * which will cause calling code to just return the etime
+                * timestamp uncorrected. At least this is no worse than the
+                * standard fallback.
+                */
+               *vpos = 0;
+       }
+
+       return ret;
+}
+
+int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
+                                 int *max_error, struct timeval *vblank_time,
+                                 unsigned flags)
+{
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+       struct drm_crtc *crtc = &vc4_crtc->base;
+       struct drm_crtc_state *state = crtc->state;
+
+       /* Helper routine in DRM core does all the work: */
+       return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc_id, max_error,
+                                                    vblank_time, flags,
+                                                    &state->adjusted_mode);
+}
+
 static void vc4_crtc_destroy(struct drm_crtc *crtc)
 {
        drm_crtc_cleanup(crtc);
@@ -449,14 +592,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
 
        WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
 
-       HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
-                 vc4_state->mm.start);
-
-       if (debug_dump_regs) {
-               DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
-               vc4_hvs_dump_state(dev);
-       }
-
        if (crtc->state->event) {
                unsigned long flags;
 
@@ -466,8 +601,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
 
                spin_lock_irqsave(&dev->event_lock, flags);
                vc4_crtc->event = crtc->state->event;
-               spin_unlock_irqrestore(&dev->event_lock, flags);
                crtc->state->event = NULL;
+
+               HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+                         vc4_state->mm.start);
+
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+       } else {
+               HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+                         vc4_state->mm.start);
+       }
+
+       if (debug_dump_regs) {
+               DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
+               vc4_hvs_dump_state(dev);
        }
 }
 
@@ -493,12 +640,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
 {
        struct drm_crtc *crtc = &vc4_crtc->base;
        struct drm_device *dev = crtc->dev;
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+       u32 chan = vc4_crtc->channel;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->event_lock, flags);
-       if (vc4_crtc->event) {
+       if (vc4_crtc->event &&
+           (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
                drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
                vc4_crtc->event = NULL;
+               drm_crtc_vblank_put(crtc);
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
@@ -510,6 +662,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
        irqreturn_t ret = IRQ_NONE;
 
        if (stat & PV_INT_VFP_START) {
+               vc4_crtc->t_vblank = ktime_get();
                CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
                drm_crtc_handle_vblank(&vc4_crtc->base);
                vc4_crtc_handle_page_flip(vc4_crtc);
@@ -549,6 +702,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
                spin_unlock_irqrestore(&dev->event_lock, flags);
        }
 
+       drm_crtc_vblank_put(crtc);
        drm_framebuffer_unreference(flip_state->fb);
        kfree(flip_state);
 
@@ -591,6 +745,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
                return ret;
        }
 
+       WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
        /* Immediately update the plane's legacy fb pointer, so that later
         * modeset prep sees the state that will be present when the semaphore
         * is released.
@@ -711,6 +867,22 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
        }
 }
 
+static void
+vc4_crtc_get_cob_allocation(struct vc4_crtc *vc4_crtc)
+{
+       struct drm_device *drm = vc4_crtc->base.dev;
+       struct vc4_dev *vc4 = to_vc4_dev(drm);
+       u32 dispbase = HVS_READ(SCALER_DISPBASEX(vc4_crtc->channel));
+       /* Top/base are supposed to be 4-pixel aligned, but the
+        * Raspberry Pi firmware fills the low bits (which are
+        * presumably ignored).
+        */
+       u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
+       u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
+
+       vc4_crtc->cob_size = top - base + 4;
+}
+
 static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -787,6 +959,8 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
                crtc->cursor = cursor_plane;
        }
 
+       vc4_crtc_get_cob_allocation(vc4_crtc);
+
        CRTC_WRITE(PV_INTEN, 0);
        CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
        ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
index dba1114297e45c26b2d26ec475c90cb7a48af153..275fedbdbd9eae420939627e7fcdf9236cfce4fa 100644 (file)
@@ -227,14 +227,12 @@ static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
 {
        struct drm_connector *connector = NULL;
        struct vc4_dpi_connector *dpi_connector;
-       int ret = 0;
 
        dpi_connector = devm_kzalloc(dev->dev, sizeof(*dpi_connector),
                                     GFP_KERNEL);
-       if (!dpi_connector) {
-               ret = -ENOMEM;
-               goto fail;
-       }
+       if (!dpi_connector)
+               return ERR_PTR(-ENOMEM);
+
        connector = &dpi_connector->base;
 
        dpi_connector->encoder = dpi->encoder;
@@ -251,12 +249,6 @@ static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
        drm_mode_connector_attach_encoder(connector, dpi->encoder);
 
        return connector;
-
- fail:
-       if (connector)
-               vc4_dpi_connector_destroy(connector);
-
-       return ERR_PTR(ret);
 }
 
 static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
index 493bb580a4138ba6b572d9a285e3e88830e9576d..8b42d31a7f0e8b61ed776200841285d7d5687dbb 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include "drm_fb_cma_helper.h"
 
 #include "uapi/drm/vc4_drm.h"
@@ -43,6 +44,49 @@ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
        return map;
 }
 
+static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file_priv)
+{
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct drm_vc4_get_param *args = data;
+       int ret;
+
+       if (args->pad != 0)
+               return -EINVAL;
+
+       switch (args->param) {
+       case DRM_VC4_PARAM_V3D_IDENT0:
+               ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+               if (ret)
+                       return ret;
+               args->value = V3D_READ(V3D_IDENT0);
+               pm_runtime_put(&vc4->v3d->pdev->dev);
+               break;
+       case DRM_VC4_PARAM_V3D_IDENT1:
+               ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+               if (ret)
+                       return ret;
+               args->value = V3D_READ(V3D_IDENT1);
+               pm_runtime_put(&vc4->v3d->pdev->dev);
+               break;
+       case DRM_VC4_PARAM_V3D_IDENT2:
+               ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+               if (ret)
+                       return ret;
+               args->value = V3D_READ(V3D_IDENT2);
+               pm_runtime_put(&vc4->v3d->pdev->dev);
+               break;
+       case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
+               args->value = true;
+               break;
+       default:
+               DRM_DEBUG("Unknown parameter %d\n", args->param);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static void vc4_lastclose(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -65,14 +109,15 @@ static const struct file_operations vc4_drm_fops = {
 };
 
 static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
-       DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
+       DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
                          DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(VC4_GET_PARAM, vc4_get_param_ioctl, DRM_RENDER_ALLOW),
 };
 
 static struct drm_driver vc4_drm_driver = {
@@ -90,7 +135,9 @@ static struct drm_driver vc4_drm_driver = {
 
        .enable_vblank = vc4_enable_vblank,
        .disable_vblank = vc4_disable_vblank,
-       .get_vblank_counter = drm_vblank_count,
+       .get_vblank_counter = drm_vblank_no_hw_counter,
+       .get_scanout_position = vc4_crtc_get_scanoutpos,
+       .get_vblank_timestamp = vc4_crtc_get_vblank_timestamp,
 
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = vc4_debugfs_init,
@@ -194,8 +241,6 @@ static int vc4_drm_bind(struct device *dev)
        vc4_bo_cache_init(drm);
 
        drm_mode_config_init(drm);
-       if (ret)
-               goto unref;
 
        vc4_gem_init(drm);
 
@@ -217,7 +262,6 @@ static int vc4_drm_bind(struct device *dev)
        component_unbind_all(dev, drm);
 gem_destroy:
        vc4_gem_destroy(drm);
-unref:
        drm_dev_unref(drm);
        vc4_bo_cache_destroy(drm);
        return ret;
@@ -245,8 +289,8 @@ static const struct component_master_ops vc4_drm_ops = {
 static struct platform_driver *const component_drivers[] = {
        &vc4_hdmi_driver,
        &vc4_dpi_driver,
-       &vc4_crtc_driver,
        &vc4_hvs_driver,
+       &vc4_crtc_driver,
        &vc4_v3d_driver,
 };
 
index c799baabc0084d27ab16667aa748a97a4984839d..489e3de0c050df0eb3022afa421c579449c3ebdf 100644 (file)
@@ -355,6 +355,9 @@ struct vc4_validated_shader_info {
        uint32_t uniforms_src_size;
        uint32_t num_texture_samples;
        struct vc4_texture_sample_info *texture_samples;
+
+       uint32_t num_uniform_addr_offsets;
+       uint32_t *uniform_addr_offsets;
 };
 
 /**
@@ -415,6 +418,13 @@ extern struct platform_driver vc4_crtc_driver;
 int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
 void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
+int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
+                           unsigned int flags, int *vpos, int *hpos,
+                           ktime_t *stime, ktime_t *etime,
+                           const struct drm_display_mode *mode);
+int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
+                                 int *max_error, struct timeval *vblank_time,
+                                 unsigned flags);
 
 /* vc4_debugfs.c */
 int vc4_debugfs_init(struct drm_minor *minor);
index 68df91c3f8600c0cb30afc904cf3af55729c3466..4452f3631cacea37bbd5dc8a594367631e308adc 100644 (file)
@@ -456,12 +456,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
        if (IS_ERR(hdmi->hd_regs))
                return PTR_ERR(hdmi->hd_regs);
 
-       ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
-       if (!ddc_node) {
-               DRM_ERROR("Failed to find ddc node in device tree\n");
-               return -ENODEV;
-       }
-
        hdmi->pixel_clock = devm_clk_get(dev, "pixel");
        if (IS_ERR(hdmi->pixel_clock)) {
                DRM_ERROR("Failed to get pixel clock\n");
@@ -473,7 +467,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
                return PTR_ERR(hdmi->hsm_clock);
        }
 
+       ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
+       if (!ddc_node) {
+               DRM_ERROR("Failed to find ddc node in device tree\n");
+               return -ENODEV;
+       }
+
        hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+       of_node_put(ddc_node);
        if (!hdmi->ddc) {
                DRM_DEBUG("Failed to get ddc i2c adapter by node\n");
                return -EPROBE_DEFER;
index 0fa3c8128875e6efb8c1000f10e02095887b2d99..4ac894d993cd80472777b2a3bacc2fcf0dd52810 100644 (file)
@@ -118,10 +118,18 @@ static int vc4_atomic_commit(struct drm_device *dev,
                return -ENOMEM;
 
        /* Make sure that any outstanding modesets have finished. */
-       ret = down_interruptible(&vc4->async_modeset);
-       if (ret) {
-               kfree(c);
-               return ret;
+       if (nonblock) {
+               ret = down_trylock(&vc4->async_modeset);
+               if (ret) {
+                       kfree(c);
+                       return -EBUSY;
+               }
+       } else {
+               ret = down_interruptible(&vc4->async_modeset);
+               if (ret) {
+                       kfree(c);
+                       return ret;
+               }
        }
 
        ret = drm_atomic_helper_prepare_planes(dev, state);
index 5d2c3d9fd17ac48bb4b2dfb46ca338f733e7b75d..29e4b400e25e34a63e4710e9edbba62541b9cdc5 100644 (file)
@@ -93,6 +93,14 @@ static const struct hvs_format {
                .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
                .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
        },
+       {
+               .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+               .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = true,
+       },
+       {
+               .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+               .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = false,
+       },
        {
                .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
                .pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false,
index d5c2f3c85ebbf6b1ae7ca731d12d604ca24b176e..f4e795a0d3f6125519431da466b41e12726d00f0 100644 (file)
@@ -70,7 +70,7 @@ enum qpu_raddr {
        QPU_R_ELEM_QPU = 38,
        QPU_R_NOP,
        QPU_R_XY_PIXEL_COORD = 41,
-       QPU_R_MS_REV_FLAGS = 41,
+       QPU_R_MS_REV_FLAGS = 42,
        QPU_R_VPM = 48,
        QPU_R_VPM_LD_BUSY,
        QPU_R_VPM_LD_WAIT,
@@ -230,6 +230,15 @@ enum qpu_unpack_r4 {
 #define QPU_COND_MUL_SHIFT              46
 #define QPU_COND_MUL_MASK               QPU_MASK(48, 46)
 
+#define QPU_BRANCH_COND_SHIFT           52
+#define QPU_BRANCH_COND_MASK            QPU_MASK(55, 52)
+
+#define QPU_BRANCH_REL                  ((uint64_t)1 << 51)
+#define QPU_BRANCH_REG                  ((uint64_t)1 << 50)
+
+#define QPU_BRANCH_RADDR_A_SHIFT        45
+#define QPU_BRANCH_RADDR_A_MASK         QPU_MASK(49, 45)
+
 #define QPU_SF                          ((uint64_t)1 << 45)
 
 #define QPU_WADDR_ADD_SHIFT             38
@@ -261,4 +270,10 @@ enum qpu_unpack_r4 {
 #define QPU_OP_ADD_SHIFT                24
 #define QPU_OP_ADD_MASK                 QPU_MASK(28, 24)
 
+#define QPU_LOAD_IMM_SHIFT              0
+#define QPU_LOAD_IMM_MASK               QPU_MASK(31, 0)
+
+#define QPU_BRANCH_TARGET_SHIFT         0
+#define QPU_BRANCH_TARGET_MASK          QPU_MASK(31, 0)
+
 #endif /* VC4_QPU_DEFINES_H */
index 6163b95c54111bde8c20bfd8e2efd233cb90b6ae..160942a9180e716e6c6d890214f0fc6e0e081ecd 100644 (file)
 #define SCALER_DISPLACT0                        0x00000030
 #define SCALER_DISPLACT1                        0x00000034
 #define SCALER_DISPLACT2                        0x00000038
+#define SCALER_DISPLACTX(x)                    (SCALER_DISPLACT0 +     \
+                                                (x) * (SCALER_DISPLACT1 - \
+                                                       SCALER_DISPLACT0))
+
 #define SCALER_DISPCTRL0                        0x00000040
 # define SCALER_DISPCTRLX_ENABLE               BIT(31)
 # define SCALER_DISPCTRLX_RESET                        BIT(30)
 # define SCALER_DISPBKGND_FILL                 BIT(24)
 
 #define SCALER_DISPSTAT0                        0x00000048
-#define SCALER_DISPBASE0                        0x0000004c
 # define SCALER_DISPSTATX_MODE_MASK            VC4_MASK(31, 30)
 # define SCALER_DISPSTATX_MODE_SHIFT           30
 # define SCALER_DISPSTATX_MODE_DISABLED                0
 # define SCALER_DISPSTATX_MODE_EOF             3
 # define SCALER_DISPSTATX_FULL                 BIT(29)
 # define SCALER_DISPSTATX_EMPTY                        BIT(28)
+# define SCALER_DISPSTATX_FRAME_COUNT_MASK     VC4_MASK(17, 12)
+# define SCALER_DISPSTATX_FRAME_COUNT_SHIFT    12
+# define SCALER_DISPSTATX_LINE_MASK            VC4_MASK(11, 0)
+# define SCALER_DISPSTATX_LINE_SHIFT           0
+
+#define SCALER_DISPBASE0                        0x0000004c
+/* Last pixel in the COB (display FIFO memory) allocated to this HVS
+ * channel.  Must be 4-pixel aligned (and thus 4 pixels less than the
+ * next COB base).
+ */
+# define SCALER_DISPBASEX_TOP_MASK             VC4_MASK(31, 16)
+# define SCALER_DISPBASEX_TOP_SHIFT            16
+/* First pixel in the COB (display FIFO memory) allocated to this HVS
+ * channel.  Must be 4-pixel aligned.
+ */
+# define SCALER_DISPBASEX_BASE_MASK            VC4_MASK(15, 0)
+# define SCALER_DISPBASEX_BASE_SHIFT           0
+
 #define SCALER_DISPCTRL1                        0x00000050
 #define SCALER_DISPBKGND1                       0x00000054
 #define SCALER_DISPBKGNDX(x)                   (SCALER_DISPBKGND0 +        \
                                                 (x) * (SCALER_DISPSTAT1 - \
                                                        SCALER_DISPSTAT0))
 #define SCALER_DISPBASE1                        0x0000005c
+#define SCALER_DISPBASEX(x)                    (SCALER_DISPBASE0 +        \
+                                                (x) * (SCALER_DISPBASE1 - \
+                                                       SCALER_DISPBASE0))
 #define SCALER_DISPCTRL2                        0x00000060
 #define SCALER_DISPCTRLX(x)                    (SCALER_DISPCTRL0 +        \
                                                 (x) * (SCALER_DISPCTRL1 - \
index 24c2c746e8f397aafd0285323508bd2ee351d3a2..9ce1d0adf8826e893a58529328ce3f53f05150ee 100644 (file)
@@ -802,7 +802,7 @@ validate_gl_shader_rec(struct drm_device *dev,
                uint32_t src_offset = *(uint32_t *)(pkt_u + o);
                uint32_t *texture_handles_u;
                void *uniform_data_u;
-               uint32_t tex;
+               uint32_t tex, uni;
 
                *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
 
@@ -840,6 +840,17 @@ validate_gl_shader_rec(struct drm_device *dev,
                        }
                }
 
+               /* Fill in the uniform slots that need this shader's
+                * start-of-uniforms address (used for resetting the uniform
+                * stream in the presence of control flow).
+                */
+               for (uni = 0;
+                    uni < validated_shader->num_uniform_addr_offsets;
+                    uni++) {
+                       uint32_t o = validated_shader->uniform_addr_offsets[uni];
+                       ((uint32_t *)exec->uniforms_v)[o] = exec->uniforms_p;
+               }
+
                *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
 
                exec->uniforms_u += validated_shader->uniforms_src_size;
index f67124b4c5348706136e4c0f9fe06ea10b77b230..46527e989ce37839b0bd76d27dd0d5a4821bf58c 100644 (file)
 #include "vc4_drv.h"
 #include "vc4_qpu_defines.h"
 
+#define LIVE_REG_COUNT (32 + 32 + 4)
+
 struct vc4_shader_validation_state {
+       /* Current IP being validated. */
+       uint32_t ip;
+
+       /* IP at the end of the BO, do not read shader[max_ip] */
+       uint32_t max_ip;
+
+       uint64_t *shader;
+
        struct vc4_texture_sample_info tmu_setup[2];
        int tmu_write_count[2];
 
@@ -49,8 +59,30 @@ struct vc4_shader_validation_state {
         *
         * This is used for the validation of direct address memory reads.
         */
-       uint32_t live_min_clamp_offsets[32 + 32 + 4];
-       bool live_max_clamp_regs[32 + 32 + 4];
+       uint32_t live_min_clamp_offsets[LIVE_REG_COUNT];
+       bool live_max_clamp_regs[LIVE_REG_COUNT];
+       uint32_t live_immediates[LIVE_REG_COUNT];
+
+       /* Bitfield of which IPs are used as branch targets.
+        *
+        * Used for validation that the uniform stream is updated at the right
+        * points and clearing the texturing/clamping state.
+        */
+       unsigned long *branch_targets;
+
+       /* Set when entering a basic block, and cleared when the uniform
+        * address update is found.  This is used to make sure that we don't
+        * read uniforms when the address is undefined.
+        */
+       bool needs_uniform_address_update;
+
+       /* Set when we find a backwards branch.  If the branch is backwards,
+        * the taraget is probably doing an address reset to read uniforms,
+        * and so we need to be sure that a uniforms address is present in the
+        * stream, even if the shader didn't need to read uniforms in later
+        * basic blocks.
+        */
+       bool needs_uniform_address_for_loop;
 };
 
 static uint32_t
@@ -129,11 +161,11 @@ record_texture_sample(struct vc4_validated_shader_info *validated_shader,
 }
 
 static bool
-check_tmu_write(uint64_t inst,
-               struct vc4_validated_shader_info *validated_shader,
+check_tmu_write(struct vc4_validated_shader_info *validated_shader,
                struct vc4_shader_validation_state *validation_state,
                bool is_mul)
 {
+       uint64_t inst = validation_state->shader[validation_state->ip];
        uint32_t waddr = (is_mul ?
                          QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
                          QPU_GET_FIELD(inst, QPU_WADDR_ADD));
@@ -162,7 +194,7 @@ check_tmu_write(uint64_t inst,
                        return false;
                }
 
-               /* We assert that the the clamped address is the first
+               /* We assert that the clamped address is the first
                 * argument, and the UBO base address is the second argument.
                 * This is arbitrary, but simpler than supporting flipping the
                 * two either way.
@@ -212,8 +244,14 @@ check_tmu_write(uint64_t inst,
        /* Since direct uses a RADDR uniform reference, it will get counted in
         * check_instruction_reads()
         */
-       if (!is_direct)
+       if (!is_direct) {
+               if (validation_state->needs_uniform_address_update) {
+                       DRM_ERROR("Texturing with undefined uniform address\n");
+                       return false;
+               }
+
                validated_shader->uniforms_size += 4;
+       }
 
        if (submit) {
                if (!record_texture_sample(validated_shader,
@@ -227,23 +265,138 @@ check_tmu_write(uint64_t inst,
        return true;
 }
 
+static bool require_uniform_address_uniform(struct vc4_validated_shader_info *validated_shader)
+{
+       uint32_t o = validated_shader->num_uniform_addr_offsets;
+       uint32_t num_uniforms = validated_shader->uniforms_size / 4;
+
+       validated_shader->uniform_addr_offsets =
+               krealloc(validated_shader->uniform_addr_offsets,
+                        (o + 1) *
+                        sizeof(*validated_shader->uniform_addr_offsets),
+                        GFP_KERNEL);
+       if (!validated_shader->uniform_addr_offsets)
+               return false;
+
+       validated_shader->uniform_addr_offsets[o] = num_uniforms;
+       validated_shader->num_uniform_addr_offsets++;
+
+       return true;
+}
+
 static bool
-check_reg_write(uint64_t inst,
-               struct vc4_validated_shader_info *validated_shader,
+validate_uniform_address_write(struct vc4_validated_shader_info *validated_shader,
+                              struct vc4_shader_validation_state *validation_state,
+                              bool is_mul)
+{
+       uint64_t inst = validation_state->shader[validation_state->ip];
+       u32 add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
+       u32 raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+       u32 raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+       u32 add_lri = raddr_add_a_to_live_reg_index(inst);
+       /* We want our reset to be pointing at whatever uniform follows the
+        * uniforms base address.
+        */
+       u32 expected_offset = validated_shader->uniforms_size + 4;
+
+       /* We only support absolute uniform address changes, and we
+        * require that they be in the current basic block before any
+        * of its uniform reads.
+        *
+        * One could potentially emit more efficient QPU code, by
+        * noticing that (say) an if statement does uniform control
+        * flow for all threads and that the if reads the same number
+        * of uniforms on each side.  However, this scheme is easy to
+        * validate so it's all we allow for now.
+        */
+
+       if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) {
+               DRM_ERROR("uniforms address change must be "
+                         "normal math\n");
+               return false;
+       }
+
+       if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
+               DRM_ERROR("Uniform address reset must be an ADD.\n");
+               return false;
+       }
+
+       if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
+               DRM_ERROR("Uniform address reset must be unconditional.\n");
+               return false;
+       }
+
+       if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
+           !(inst & QPU_PM)) {
+               DRM_ERROR("No packing allowed on uniforms reset\n");
+               return false;
+       }
+
+       if (add_lri == -1) {
+               DRM_ERROR("First argument of uniform address write must be "
+                         "an immediate value.\n");
+               return false;
+       }
+
+       if (validation_state->live_immediates[add_lri] != expected_offset) {
+               DRM_ERROR("Resetting uniforms with offset %db instead of %db\n",
+                         validation_state->live_immediates[add_lri],
+                         expected_offset);
+               return false;
+       }
+
+       if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
+           !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
+               DRM_ERROR("Second argument of uniform address write must be "
+                         "a uniform.\n");
+               return false;
+       }
+
+       validation_state->needs_uniform_address_update = false;
+       validation_state->needs_uniform_address_for_loop = false;
+       return require_uniform_address_uniform(validated_shader);
+}
+
+static bool
+check_reg_write(struct vc4_validated_shader_info *validated_shader,
                struct vc4_shader_validation_state *validation_state,
                bool is_mul)
 {
+       uint64_t inst = validation_state->shader[validation_state->ip];
        uint32_t waddr = (is_mul ?
                          QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
                          QPU_GET_FIELD(inst, QPU_WADDR_ADD));
+       uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+       bool ws = inst & QPU_WS;
+       bool is_b = is_mul ^ ws;
+       u32 lri = waddr_to_live_reg_index(waddr, is_b);
+
+       if (lri != -1) {
+               uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
+               uint32_t cond_mul = QPU_GET_FIELD(inst, QPU_COND_MUL);
+
+               if (sig == QPU_SIG_LOAD_IMM &&
+                   QPU_GET_FIELD(inst, QPU_PACK) == QPU_PACK_A_NOP &&
+                   ((is_mul && cond_mul == QPU_COND_ALWAYS) ||
+                    (!is_mul && cond_add == QPU_COND_ALWAYS))) {
+                       validation_state->live_immediates[lri] =
+                               QPU_GET_FIELD(inst, QPU_LOAD_IMM);
+               } else {
+                       validation_state->live_immediates[lri] = ~0;
+               }
+       }
 
        switch (waddr) {
        case QPU_W_UNIFORMS_ADDRESS:
-               /* XXX: We'll probably need to support this for reladdr, but
-                * it's definitely a security-related one.
-                */
-               DRM_ERROR("uniforms address load unsupported\n");
-               return false;
+               if (is_b) {
+                       DRM_ERROR("relative uniforms address change "
+                                 "unsupported\n");
+                       return false;
+               }
+
+               return validate_uniform_address_write(validated_shader,
+                                                     validation_state,
+                                                     is_mul);
 
        case QPU_W_TLB_COLOR_MS:
        case QPU_W_TLB_COLOR_ALL:
@@ -261,7 +414,7 @@ check_reg_write(uint64_t inst,
        case QPU_W_TMU1_T:
        case QPU_W_TMU1_R:
        case QPU_W_TMU1_B:
-               return check_tmu_write(inst, validated_shader, validation_state,
+               return check_tmu_write(validated_shader, validation_state,
                                       is_mul);
 
        case QPU_W_HOST_INT:
@@ -294,10 +447,10 @@ check_reg_write(uint64_t inst,
 }
 
 static void
-track_live_clamps(uint64_t inst,
-                 struct vc4_validated_shader_info *validated_shader,
+track_live_clamps(struct vc4_validated_shader_info *validated_shader,
                  struct vc4_shader_validation_state *validation_state)
 {
+       uint64_t inst = validation_state->shader[validation_state->ip];
        uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
        uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
        uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
@@ -369,10 +522,10 @@ track_live_clamps(uint64_t inst,
 }
 
 static bool
-check_instruction_writes(uint64_t inst,
-                        struct vc4_validated_shader_info *validated_shader,
+check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
                         struct vc4_shader_validation_state *validation_state)
 {
+       uint64_t inst = validation_state->shader[validation_state->ip];
        uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
        uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
        bool ok;
@@ -382,20 +535,44 @@ check_instruction_writes(uint64_t inst,
                return false;
        }
 
-       ok = (check_reg_write(inst, validated_shader, validation_state,
-                             false) &&
-             check_reg_write(inst, validated_shader, validation_state,
-                             true));
+       ok = (check_reg_write(validated_shader, validation_state, false) &&
+             check_reg_write(validated_shader, validation_state, true));
 
-       track_live_clamps(inst, validated_shader, validation_state);
+       track_live_clamps(validated_shader, validation_state);
 
        return ok;
 }
 
 static bool
-check_instruction_reads(uint64_t inst,
-                       struct vc4_validated_shader_info *validated_shader)
+check_branch(uint64_t inst,
+            struct vc4_validated_shader_info *validated_shader,
+            struct vc4_shader_validation_state *validation_state,
+            int ip)
+{
+       int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
+       uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
+       uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
+
+       if ((int)branch_imm < 0)
+               validation_state->needs_uniform_address_for_loop = true;
+
+       /* We don't want to have to worry about validation of this, and
+        * there's no need for it.
+        */
+       if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
+               DRM_ERROR("branch instruction at %d wrote a register.\n",
+                         validation_state->ip);
+               return false;
+       }
+
+       return true;
+}
+
+static bool
+check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
+                       struct vc4_shader_validation_state *validation_state)
 {
+       uint64_t inst = validation_state->shader[validation_state->ip];
        uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
        uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
        uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
@@ -407,40 +584,204 @@ check_instruction_reads(uint64_t inst,
                 * already be OOM.
                 */
                validated_shader->uniforms_size += 4;
+
+               if (validation_state->needs_uniform_address_update) {
+                       DRM_ERROR("Uniform read with undefined uniform "
+                                 "address\n");
+                       return false;
+               }
+       }
+
+       return true;
+}
+
+/* Make sure that all branches are absolute and point within the shader, and
+ * note their targets for later.
+ */
+static bool
+vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
+{
+       uint32_t max_branch_target = 0;
+       bool found_shader_end = false;
+       int ip;
+       int shader_end_ip = 0;
+       int last_branch = -2;
+
+       for (ip = 0; ip < validation_state->max_ip; ip++) {
+               uint64_t inst = validation_state->shader[ip];
+               int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
+               uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+               uint32_t after_delay_ip = ip + 4;
+               uint32_t branch_target_ip;
+
+               if (sig == QPU_SIG_PROG_END) {
+                       shader_end_ip = ip;
+                       found_shader_end = true;
+                       continue;
+               }
+
+               if (sig != QPU_SIG_BRANCH)
+                       continue;
+
+               if (ip - last_branch < 4) {
+                       DRM_ERROR("Branch at %d during delay slots\n", ip);
+                       return false;
+               }
+               last_branch = ip;
+
+               if (inst & QPU_BRANCH_REG) {
+                       DRM_ERROR("branching from register relative "
+                                 "not supported\n");
+                       return false;
+               }
+
+               if (!(inst & QPU_BRANCH_REL)) {
+                       DRM_ERROR("relative branching required\n");
+                       return false;
+               }
+
+               /* The actual branch target is the instruction after the delay
+                * slots, plus whatever byte offset is in the low 32 bits of
+                * the instruction.  Make sure we're not branching beyond the
+                * end of the shader object.
+                */
+               if (branch_imm % sizeof(inst) != 0) {
+                       DRM_ERROR("branch target not aligned\n");
+                       return false;
+               }
+
+               branch_target_ip = after_delay_ip + (branch_imm >> 3);
+               if (branch_target_ip >= validation_state->max_ip) {
+                       DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n",
+                                 ip, branch_target_ip,
+                                 validation_state->max_ip);
+                       return false;
+               }
+               set_bit(branch_target_ip, validation_state->branch_targets);
+
+               /* Make sure that the non-branching path is also not outside
+                * the shader.
+                */
+               if (after_delay_ip >= validation_state->max_ip) {
+                       DRM_ERROR("Branch at %d continues past shader end "
+                                 "(%d/%d)\n",
+                                 ip, after_delay_ip, validation_state->max_ip);
+                       return false;
+               }
+               set_bit(after_delay_ip, validation_state->branch_targets);
+               max_branch_target = max(max_branch_target, after_delay_ip);
+
+               /* There are two delay slots after program end is signaled
+                * that are still executed, then we're finished.
+                */
+               if (found_shader_end && ip == shader_end_ip + 2)
+                       break;
+       }
+
+       if (max_branch_target > shader_end_ip) {
+               DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
+               return false;
        }
 
        return true;
 }
 
+/* Resets any known state for the shader, used when we may be branched to from
+ * multiple locations in the program (or at shader start).
+ */
+static void
+reset_validation_state(struct vc4_shader_validation_state *validation_state)
+{
+       int i;
+
+       for (i = 0; i < 8; i++)
+               validation_state->tmu_setup[i / 4].p_offset[i % 4] = ~0;
+
+       for (i = 0; i < LIVE_REG_COUNT; i++) {
+               validation_state->live_min_clamp_offsets[i] = ~0;
+               validation_state->live_max_clamp_regs[i] = false;
+               validation_state->live_immediates[i] = ~0;
+       }
+}
+
+static bool
+texturing_in_progress(struct vc4_shader_validation_state *validation_state)
+{
+       return (validation_state->tmu_write_count[0] != 0 ||
+               validation_state->tmu_write_count[1] != 0);
+}
+
+static bool
+vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
+{
+       uint32_t ip = validation_state->ip;
+
+       if (!test_bit(ip, validation_state->branch_targets))
+               return true;
+
+       if (texturing_in_progress(validation_state)) {
+               DRM_ERROR("Branch target landed during TMU setup\n");
+               return false;
+       }
+
+       /* Reset our live values tracking, since this instruction may have
+        * multiple predecessors.
+        *
+        * One could potentially do analysis to determine that, for
+        * example, all predecessors have a live max clamp in the same
+        * register, but we don't bother with that.
+        */
+       reset_validation_state(validation_state);
+
+       /* Since we've entered a basic block from potentially multiple
+        * predecessors, we need the uniforms address to be updated before any
+        * unforms are read.  We require that after any branch point, the next
+        * uniform to be loaded is a uniform address offset.  That uniform's
+        * offset will be marked by the uniform address register write
+        * validation, or a one-off the end-of-program check.
+        */
+       validation_state->needs_uniform_address_update = true;
+
+       return true;
+}
+
 struct vc4_validated_shader_info *
 vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
 {
        bool found_shader_end = false;
        int shader_end_ip = 0;
-       uint32_t ip, max_ip;
-       uint64_t *shader;
-       struct vc4_validated_shader_info *validated_shader;
+       uint32_t ip;
+       struct vc4_validated_shader_info *validated_shader = NULL;
        struct vc4_shader_validation_state validation_state;
-       int i;
 
        memset(&validation_state, 0, sizeof(validation_state));
+       validation_state.shader = shader_obj->vaddr;
+       validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
 
-       for (i = 0; i < 8; i++)
-               validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
-       for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
-               validation_state.live_min_clamp_offsets[i] = ~0;
+       reset_validation_state(&validation_state);
 
-       shader = shader_obj->vaddr;
-       max_ip = shader_obj->base.size / sizeof(uint64_t);
+       validation_state.branch_targets =
+               kcalloc(BITS_TO_LONGS(validation_state.max_ip),
+                       sizeof(unsigned long), GFP_KERNEL);
+       if (!validation_state.branch_targets)
+               goto fail;
 
        validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL);
        if (!validated_shader)
-               return NULL;
+               goto fail;
+
+       if (!vc4_validate_branches(&validation_state))
+               goto fail;
 
-       for (ip = 0; ip < max_ip; ip++) {
-               uint64_t inst = shader[ip];
+       for (ip = 0; ip < validation_state.max_ip; ip++) {
+               uint64_t inst = validation_state.shader[ip];
                uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
 
+               validation_state.ip = ip;
+
+               if (!vc4_handle_branch_target(&validation_state))
+                       goto fail;
+
                switch (sig) {
                case QPU_SIG_NONE:
                case QPU_SIG_WAIT_FOR_SCOREBOARD:
@@ -450,13 +791,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
                case QPU_SIG_LOAD_TMU1:
                case QPU_SIG_PROG_END:
                case QPU_SIG_SMALL_IMM:
-                       if (!check_instruction_writes(inst, validated_shader,
+                       if (!check_instruction_writes(validated_shader,
                                                      &validation_state)) {
                                DRM_ERROR("Bad write at ip %d\n", ip);
                                goto fail;
                        }
 
-                       if (!check_instruction_reads(inst, validated_shader))
+                       if (!check_instruction_reads(validated_shader,
+                                                    &validation_state))
                                goto fail;
 
                        if (sig == QPU_SIG_PROG_END) {
@@ -467,13 +809,18 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
                        break;
 
                case QPU_SIG_LOAD_IMM:
-                       if (!check_instruction_writes(inst, validated_shader,
+                       if (!check_instruction_writes(validated_shader,
                                                      &validation_state)) {
                                DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
                                goto fail;
                        }
                        break;
 
+               case QPU_SIG_BRANCH:
+                       if (!check_branch(inst, validated_shader,
+                                         &validation_state, ip))
+                               goto fail;
+                       break;
                default:
                        DRM_ERROR("Unsupported QPU signal %d at "
                                  "instruction %d\n", sig, ip);
@@ -487,13 +834,28 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
                        break;
        }
 
-       if (ip == max_ip) {
+       if (ip == validation_state.max_ip) {
                DRM_ERROR("shader failed to terminate before "
                          "shader BO end at %zd\n",
                          shader_obj->base.size);
                goto fail;
        }
 
+       /* If we did a backwards branch and we haven't emitted a uniforms
+        * reset since then, we still need the uniforms stream to have the
+        * uniforms address available so that the backwards branch can do its
+        * uniforms reset.
+        *
+        * We could potentially prove that the backwards branch doesn't
+        * contain any uses of uniforms until program exit, but that doesn't
+        * seem to be worth the trouble.
+        */
+       if (validation_state.needs_uniform_address_for_loop) {
+               if (!require_uniform_address_uniform(validated_shader))
+                       goto fail;
+               validated_shader->uniforms_size += 4;
+       }
+
        /* Again, no chance of integer overflow here because the worst case
         * scenario is 8 bytes of uniforms plus handles per 8-byte
         * instruction.
@@ -502,9 +864,12 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
                (validated_shader->uniforms_size +
                 4 * validated_shader->num_texture_samples);
 
+       kfree(validation_state.branch_targets);
+
        return validated_shader;
 
 fail:
+       kfree(validation_state.branch_targets);
        if (validated_shader) {
                kfree(validated_shader->texture_samples);
                kfree(validated_shader);
index a0580815629f52a4c6dc61a22ca330d977991570..80482ac5f95dc71ac06bdd2d3dacc68333e32006 100644 (file)
@@ -375,6 +375,12 @@ static int virtio_gpu_bo_move(struct ttm_buffer_object *bo,
                              bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
 {
+       int ret;
+
+       ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+       if (ret)
+               return ret;
+
        virtio_gpu_move_null(bo, new_mem);
        return 0;
 }
index 6de283c8fa3edf0088ca6ee25193802ce64480bf..f0374f9b56cad3a522119f7d4cf8a8940f445bf0 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/frame.h>
 #include <asm/hypervisor.h>
 #include "drmP.h"
 #include "vmwgfx_msg.h"
@@ -194,7 +195,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
 
        return -EINVAL;
 }
-
+STACK_FRAME_NON_STANDARD(vmw_send_msg);
 
 
 /**
@@ -304,6 +305,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
 
        return 0;
 }
+STACK_FRAME_NON_STANDARD(vmw_recv_msg);
 
 
 /**
index a18db4d5347cefa99847df944be3a3b364de089a..c5d82a8a2ec9c2498bf9e7ed96823cb14db8afae 100644 (file)
@@ -96,12 +96,12 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
  */
 static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
 {
-       u32 pos = pb->pos;
-       u32 *p = (u32 *)((void *)pb->mapped + pos);
-       WARN_ON(pos == pb->fence);
+       u32 *p = (u32 *)((void *)pb->mapped + pb->pos);
+
+       WARN_ON(pb->pos == pb->fence);
        *(p++) = op1;
        *(p++) = op2;
-       pb->pos = (pos + 8) & (pb->size_bytes - 1);
+       pb->pos = (pb->pos + 8) & (pb->size_bytes - 1);
 }
 
 /*
@@ -134,14 +134,19 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
                                     enum cdma_event event)
 {
        for (;;) {
+               struct push_buffer *pb = &cdma->push_buffer;
                unsigned int space;
 
-               if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+               switch (event) {
+               case CDMA_EVENT_SYNC_QUEUE_EMPTY:
                        space = list_empty(&cdma->sync_queue) ? 1 : 0;
-               else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) {
-                       struct push_buffer *pb = &cdma->push_buffer;
+                       break;
+
+               case CDMA_EVENT_PUSH_BUFFER_SPACE:
                        space = host1x_pushbuffer_space(pb);
-               } else {
+                       break;
+
+               default:
                        WARN_ON(1);
                        return -EINVAL;
                }
@@ -159,12 +164,14 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
                        mutex_lock(&cdma->lock);
                        continue;
                }
+
                cdma->event = event;
 
                mutex_unlock(&cdma->lock);
                down(&cdma->sem);
                mutex_lock(&cdma->lock);
        }
+
        return 0;
 }
 
@@ -234,6 +241,7 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
                        /* Start timer on next pending syncpt */
                        if (job->timeout)
                                cdma_start_timer_locked(cdma, job);
+
                        break;
                }
 
@@ -247,7 +255,9 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
                /* Pop push buffer slots */
                if (job->num_slots) {
                        struct push_buffer *pb = &cdma->push_buffer;
+
                        host1x_pushbuffer_pop(pb, job->num_slots);
+
                        if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
                                signal = true;
                }
@@ -269,11 +279,9 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
 void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
                                   struct device *dev)
 {
-       u32 restart_addr;
-       u32 syncpt_incrs;
-       struct host1x_job *job = NULL;
-       u32 syncpt_val;
        struct host1x *host1x = cdma_to_host1x(cdma);
+       u32 restart_addr, syncpt_incrs, syncpt_val;
+       struct host1x_job *job = NULL;
 
        syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
 
@@ -342,9 +350,11 @@ void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
                syncpt_val += syncpt_incrs;
        }
 
-       /* The following sumbits from the same client may be dependent on the
+       /*
+        * The following sumbits from the same client may be dependent on the
         * failed submit and therefore they may fail. Force a small timeout
-        * to make the queue cleanup faster */
+        * to make the queue cleanup faster.
+        */
 
        list_for_each_entry_from(job, &cdma->sync_queue, list)
                if (job->client == cdma->timeout.client)
@@ -375,6 +385,7 @@ int host1x_cdma_init(struct host1x_cdma *cdma)
        err = host1x_pushbuffer_init(&cdma->push_buffer);
        if (err)
                return err;
+
        return 0;
 }
 
@@ -410,6 +421,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
                /* init state on first submit with timeout value */
                if (!cdma->timeout.initialized) {
                        int err;
+
                        err = host1x_hw_cdma_timeout_init(host1x, cdma,
                                                          job->syncpt_id);
                        if (err) {
@@ -418,6 +430,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
                        }
                }
        }
+
        if (!cdma->running)
                host1x_hw_cdma_start(host1x, cdma);
 
@@ -448,6 +461,7 @@ void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
                slots_free = host1x_cdma_wait_locked(cdma,
                                                CDMA_EVENT_PUSH_BUFFER_SPACE);
        }
+
        cdma->slots_free = slots_free - 1;
        cdma->slots_used++;
        host1x_pushbuffer_push(pb, op1, op2);
index b4ae3affb987bb92ed7dcd1f3317714b260fd61d..8f437d924c10c700cbf46f060d32c2d902a980f1 100644 (file)
@@ -83,9 +83,10 @@ EXPORT_SYMBOL(host1x_channel_put);
 struct host1x_channel *host1x_channel_request(struct device *dev)
 {
        struct host1x *host = dev_get_drvdata(dev->parent);
-       int max_channels = host->info->nb_channels;
+       unsigned int max_channels = host->info->nb_channels;
        struct host1x_channel *channel = NULL;
-       int index, err;
+       unsigned long index;
+       int err;
 
        mutex_lock(&host->chlist_mutex);
 
index ee3d12b51c50a036b89c48c3e60576a2a14acf5b..d9330fcc62ad5c88a6090e1c07eb0a8115019402 100644 (file)
@@ -39,6 +39,7 @@ void host1x_debug_output(struct output *o, const char *fmt, ...)
        va_start(args, fmt);
        len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
        va_end(args);
+
        o->fn(o->ctx, o->buf, len);
 }
 
@@ -48,13 +49,17 @@ static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
        struct output *o = data;
 
        mutex_lock(&ch->reflock);
+
        if (ch->refcount) {
                mutex_lock(&ch->cdma.lock);
+
                if (show_fifo)
                        host1x_hw_show_channel_fifo(m, ch, o);
+
                host1x_hw_show_channel_cdma(m, ch, o);
                mutex_unlock(&ch->cdma.lock);
        }
+
        mutex_unlock(&ch->reflock);
 
        return 0;
@@ -62,22 +67,27 @@ static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
 
 static void show_syncpts(struct host1x *m, struct output *o)
 {
-       int i;
+       unsigned int i;
+
        host1x_debug_output(o, "---- syncpts ----\n");
+
        for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
                u32 max = host1x_syncpt_read_max(m->syncpt + i);
                u32 min = host1x_syncpt_load(m->syncpt + i);
+
                if (!min && !max)
                        continue;
-               host1x_debug_output(o, "id %d (%s) min %d max %d\n",
+
+               host1x_debug_output(o, "id %u (%s) min %d max %d\n",
                                    i, m->syncpt[i].name, min, max);
        }
 
        for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
                u32 base_val;
+
                base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
                if (base_val)
-                       host1x_debug_output(o, "waitbase id %d val %d\n", i,
+                       host1x_debug_output(o, "waitbase id %u val %d\n", i,
                                            base_val);
        }
 
@@ -114,7 +124,9 @@ static int host1x_debug_show_all(struct seq_file *s, void *unused)
                .fn = write_to_seqfile,
                .ctx = s
        };
+
        show_all(s->private, &o);
+
        return 0;
 }
 
@@ -124,7 +136,9 @@ static int host1x_debug_show(struct seq_file *s, void *unused)
                .fn = write_to_seqfile,
                .ctx = s
        };
+
        show_all_no_fifo(s->private, &o);
+
        return 0;
 }
 
@@ -134,10 +148,10 @@ static int host1x_debug_open_all(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations host1x_debug_all_fops = {
-       .open           = host1x_debug_open_all,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+       .open = host1x_debug_open_all,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
 };
 
 static int host1x_debug_open(struct inode *inode, struct file *file)
@@ -146,10 +160,10 @@ static int host1x_debug_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations host1x_debug_fops = {
-       .open           = host1x_debug_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+       .open = host1x_debug_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
 };
 
 static void host1x_debugfs_init(struct host1x *host1x)
@@ -201,6 +215,7 @@ void host1x_debug_dump(struct host1x *host1x)
        struct output o = {
                .fn = write_to_printk
        };
+
        show_all(host1x, &o);
 }
 
@@ -209,5 +224,6 @@ void host1x_debug_dump_syncpts(struct host1x *host1x)
        struct output o = {
                .fn = write_to_printk
        };
+
        show_syncpts(host1x, &o);
 }
index ff348690df94a0eebcddaadedfbf440cb05cac4a..a62317af76ad7b8b90593c2fda904a102e360701 100644 (file)
@@ -63,13 +63,13 @@ u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
 }
 
 static const struct host1x_info host1x01_info = {
-       .nb_channels    = 8,
-       .nb_pts         = 32,
-       .nb_mlocks      = 16,
-       .nb_bases       = 8,
-       .init           = host1x01_init,
-       .sync_offset    = 0x3000,
-       .dma_mask       = DMA_BIT_MASK(32),
+       .nb_channels = 8,
+       .nb_pts = 32,
+       .nb_mlocks = 16,
+       .nb_bases = 8,
+       .init = host1x01_init,
+       .sync_offset = 0x3000,
+       .dma_mask = DMA_BIT_MASK(32),
 };
 
 static const struct host1x_info host1x02_info = {
@@ -102,7 +102,7 @@ static const struct host1x_info host1x05_info = {
        .dma_mask = DMA_BIT_MASK(34),
 };
 
-static struct of_device_id host1x_of_match[] = {
+static const struct of_device_id host1x_of_match[] = {
        { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
        { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
        { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
index dace124994bb2683cc0c66218dcd5cdd15902d2b..5220510f39dae91c219b29d40062b3e689fcb815 100644 (file)
@@ -45,7 +45,7 @@ struct host1x_cdma_ops {
        void (*start)(struct host1x_cdma *cdma);
        void (*stop)(struct host1x_cdma *cdma);
        void (*flush)(struct  host1x_cdma *cdma);
-       int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id);
+       int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt);
        void (*timeout_destroy)(struct host1x_cdma *cdma);
        void (*freeze)(struct host1x_cdma *cdma);
        void (*resume)(struct host1x_cdma *cdma, u32 getptr);
@@ -82,21 +82,21 @@ struct host1x_intr_ops {
        int (*init_host_sync)(struct host1x *host, u32 cpm,
                void (*syncpt_thresh_work)(struct work_struct *work));
        void (*set_syncpt_threshold)(
-               struct host1x *host, u32 id, u32 thresh);
-       void (*enable_syncpt_intr)(struct host1x *host, u32 id);
-       void (*disable_syncpt_intr)(struct host1x *host, u32 id);
+               struct host1x *host, unsigned int id, u32 thresh);
+       void (*enable_syncpt_intr)(struct host1x *host, unsigned int id);
+       void (*disable_syncpt_intr)(struct host1x *host, unsigned int id);
        void (*disable_all_syncpt_intrs)(struct host1x *host);
        int (*free_syncpt_irq)(struct host1x *host);
 };
 
 struct host1x_info {
-       int     nb_channels;            /* host1x: num channels supported */
-       int     nb_pts;                 /* host1x: num syncpoints supported */
-       int     nb_bases;               /* host1x: num syncpoints supported */
-       int     nb_mlocks;              /* host1x: number of mlocks */
-       int     (*init)(struct host1x *); /* initialize per SoC ops */
-       int     sync_offset;
-       u64     dma_mask;               /* mask of addressable memory */
+       unsigned int nb_channels; /* host1x: number of channels supported */
+       unsigned int nb_pts; /* host1x: number of syncpoints supported */
+       unsigned int nb_bases; /* host1x: number of syncpoint bases supported */
+       unsigned int nb_mlocks; /* host1x: number of mlocks supported */
+       int (*init)(struct host1x *host1x); /* initialize per SoC ops */
+       unsigned int sync_offset; /* offset of syncpoint registers */
+       u64 dma_mask; /* mask of addressable memory */
 };
 
 struct host1x {
@@ -109,7 +109,6 @@ struct host1x {
        struct clk *clk;
 
        struct mutex intr_mutex;
-       struct workqueue_struct *intr_wq;
        int intr_syncpt_irq;
 
        const struct host1x_syncpt_ops *syncpt_op;
@@ -183,19 +182,20 @@ static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
 }
 
 static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
-                                                      u32 id, u32 thresh)
+                                                      unsigned int id,
+                                                      u32 thresh)
 {
        host->intr_op->set_syncpt_threshold(host, id, thresh);
 }
 
 static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
-                                                    u32 id)
+                                                    unsigned int id)
 {
        host->intr_op->enable_syncpt_intr(host, id);
 }
 
 static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
-                                                     u32 id)
+                                                     unsigned int id)
 {
        host->intr_op->disable_syncpt_intr(host, id);
 }
@@ -212,9 +212,9 @@ static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
 
 static inline int host1x_hw_channel_init(struct host1x *host,
                                         struct host1x_channel *channel,
-                                        int chid)
+                                        unsigned int id)
 {
-       return host->channel_op->init(channel, host, chid);
+       return host->channel_op->init(channel, host, id);
 }
 
 static inline int host1x_hw_channel_submit(struct host1x *host,
@@ -243,9 +243,9 @@ static inline void host1x_hw_cdma_flush(struct host1x *host,
 
 static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
                                              struct host1x_cdma *cdma,
-                                             u32 syncpt_id)
+                                             unsigned int syncpt)
 {
-       return host->cdma_op->timeout_init(cdma, syncpt_id);
+       return host->cdma_op->timeout_init(cdma, syncpt);
 }
 
 static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
index 305ea8f3382d22da2611e6de7025695265d97699..659c1bbfeeba2316df23976b2ce78217f578ffad 100644 (file)
@@ -41,7 +41,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
 {
        struct host1x *host1x = cdma_to_host1x(cdma);
        struct push_buffer *pb = &cdma->push_buffer;
-       u32 i;
+       unsigned int i;
 
        for (i = 0; i < syncpt_incrs; i++)
                host1x_syncpt_incr(cdma->timeout.syncpt);
@@ -58,6 +58,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
                        &pb->phys, getptr);
                getptr = (getptr + 8) & (pb->size_bytes - 1);
        }
+
        wmb();
 }
 
@@ -162,12 +163,14 @@ static void cdma_stop(struct host1x_cdma *cdma)
        struct host1x_channel *ch = cdma_to_channel(cdma);
 
        mutex_lock(&cdma->lock);
+
        if (cdma->running) {
                host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
                host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
                                 HOST1X_CHANNEL_DMACTRL);
                cdma->running = false;
        }
+
        mutex_unlock(&cdma->lock);
 }
 
@@ -213,11 +216,11 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
        u32 cmdproc_stop;
 
        dev_dbg(host1x->dev,
-               "resuming channel (id %d, DMAGET restart = 0x%x)\n",
+               "resuming channel (id %u, DMAGET restart = 0x%x)\n",
                ch->id, getptr);
 
        cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
-       cmdproc_stop &= ~(BIT(ch->id));
+       cmdproc_stop &= ~BIT(ch->id);
        host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
 
        cdma->torndown = false;
@@ -231,14 +234,11 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
  */
 static void cdma_timeout_handler(struct work_struct *work)
 {
+       u32 prev_cmdproc, cmdproc_stop, syncpt_val;
        struct host1x_cdma *cdma;
        struct host1x *host1x;
        struct host1x_channel *ch;
 
-       u32 syncpt_val;
-
-       u32 prev_cmdproc, cmdproc_stop;
-
        cdma = container_of(to_delayed_work(work), struct host1x_cdma,
                            timeout.wq);
        host1x = cdma_to_host1x(cdma);
@@ -277,9 +277,9 @@ static void cdma_timeout_handler(struct work_struct *work)
                return;
        }
 
-       dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n",
-               __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
-               syncpt_val, cdma->timeout.syncpt_val);
+       dev_warn(host1x->dev, "%s: timeout: %u (%s), HW thresh %d, done %d\n",
+                __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
+                syncpt_val, cdma->timeout.syncpt_val);
 
        /* stop HW, resetting channel/module */
        host1x_hw_cdma_freeze(host1x, cdma);
@@ -291,7 +291,7 @@ static void cdma_timeout_handler(struct work_struct *work)
 /*
  * Init timeout resources
  */
-static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id)
+static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt)
 {
        INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
        cdma->timeout.initialized = true;
@@ -306,6 +306,7 @@ static void cdma_timeout_destroy(struct host1x_cdma *cdma)
 {
        if (cdma->timeout.initialized)
                cancel_delayed_work(&cdma->timeout.wq);
+
        cdma->timeout.initialized = false;
 }
 
index 946c332c3906ad823826b56b0c25a3300715424c..5e8df78b7acd82acbd2539671c167527fc512828 100644 (file)
@@ -46,6 +46,7 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
                 */
                for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
                        u32 num_words = min(words - i, TRACE_MAX_LENGTH);
+
                        offset += i * sizeof(u32);
 
                        trace_host1x_cdma_push_gather(dev_name(dev), bo,
@@ -66,6 +67,7 @@ static void submit_gathers(struct host1x_job *job)
                struct host1x_job_gather *g = &job->gathers[i];
                u32 op1 = host1x_opcode_gather(g->words);
                u32 op2 = g->base + g->offset;
+
                trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
                host1x_cdma_push(cdma, op1, op2);
        }
@@ -75,7 +77,8 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
 {
        struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
        struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
-       u32 id, value;
+       unsigned int id;
+       u32 value;
 
        value = host1x_syncpt_read_max(sp);
        id = sp->base->id;
index cc3f1825c735e5cf10aad66ff72c11003bc15737..7a4a3286e4a7c284e17e4940a846143730f2ad2d 100644 (file)
@@ -40,8 +40,7 @@ enum {
 
 static unsigned int show_channel_command(struct output *o, u32 val)
 {
-       unsigned mask;
-       unsigned subop;
+       unsigned int mask, subop;
 
        switch (val >> 28) {
        case HOST1X_OPCODE_SETCLASS:
@@ -51,12 +50,11 @@ static unsigned int show_channel_command(struct output *o, u32 val)
                                            val >> 6 & 0x3ff,
                                            val >> 16 & 0xfff, mask);
                        return hweight8(mask);
-               } else {
-                       host1x_debug_output(o, "SETCL(class=%03x)\n",
-                                           val >> 6 & 0x3ff);
-                       return 0;
                }
 
+               host1x_debug_output(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
+               return 0;
+
        case HOST1X_OPCODE_INCR:
                host1x_debug_output(o, "INCR(offset=%03x, [",
                                    val >> 16 & 0xfff);
@@ -143,7 +141,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
        struct host1x_job *job;
 
        list_for_each_entry(job, &cdma->sync_queue, list) {
-               int i;
+               unsigned int i;
+
                host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
                                    job, job->syncpt_id, job->syncpt_end,
                                    job->first_get, job->timeout,
@@ -190,7 +189,7 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
        cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
        cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
 
-       host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev));
+       host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev));
 
        if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
            !ch->cdma.push_buffer.mapped) {
@@ -200,14 +199,13 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
 
        if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
            HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
-           HOST1X_UCLASS_WAIT_SYNCPT)
+                       HOST1X_UCLASS_WAIT_SYNCPT)
                host1x_debug_output(o, "waiting on syncpt %d val %d\n",
                                    cbread >> 24, cbread & 0xffffff);
        else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
-          HOST1X_CLASS_HOST1X &&
-          HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
-          HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
-
+                               HOST1X_CLASS_HOST1X &&
+                HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+                               HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
                base = (cbread >> 16) & 0xff;
                baseval =
                        host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
@@ -236,7 +234,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
        u32 val, rd_ptr, wr_ptr, start, end;
        unsigned int data_count = 0;
 
-       host1x_debug_output(o, "%d: fifo:\n", ch->id);
+       host1x_debug_output(o, "%u: fifo:\n", ch->id);
 
        val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
        host1x_debug_output(o, "FIFOSTAT %08x\n", val);
@@ -290,20 +288,22 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
 
 static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
 {
-       int i;
+       unsigned int i;
 
        host1x_debug_output(o, "---- mlocks ----\n");
+
        for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
                u32 owner =
                        host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
                if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
-                       host1x_debug_output(o, "%d: locked by channel %d\n",
+                       host1x_debug_output(o, "%u: locked by channel %u\n",
                                i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner));
                else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
-                       host1x_debug_output(o, "%d: locked by cpu\n", i);
+                       host1x_debug_output(o, "%u: locked by cpu\n", i);
                else
-                       host1x_debug_output(o, "%d: unlocked\n", i);
+                       host1x_debug_output(o, "%u: unlocked\n", i);
        }
+
        host1x_debug_output(o, "\n");
 }
 
index e1e31e9e67cd96bdb17297ec66fb1ad8415a0ebd..dacb8009a605860a3153f1b2b00cf514007fb07b 100644 (file)
@@ -38,14 +38,14 @@ static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
        host1x_sync_writel(host, BIT_MASK(id),
                HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
 
-       queue_work(host->intr_wq, &syncpt->intr.work);
+       schedule_work(&syncpt->intr.work);
 }
 
 static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
 {
        struct host1x *host = dev_id;
        unsigned long reg;
-       int i, id;
+       unsigned int i, id;
 
        for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
                reg = host1x_sync_readl(host,
@@ -62,7 +62,7 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
 
 static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
 {
-       u32 i;
+       unsigned int i;
 
        for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
                host1x_sync_writel(host, 0xffffffffu,
@@ -72,10 +72,12 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
        }
 }
 
-static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
-       void (*syncpt_thresh_work)(struct work_struct *))
+static int
+_host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
+                           void (*syncpt_thresh_work)(struct work_struct *))
 {
-       int i, err;
+       unsigned int i;
+       int err;
 
        host1x_hw_intr_disable_all_syncpt_intrs(host);
 
@@ -106,18 +108,21 @@ static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
 }
 
 static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
-       u32 id, u32 thresh)
+                                             unsigned int id,
+                                             u32 thresh)
 {
        host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
 }
 
-static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id)
+static void _host1x_intr_enable_syncpt_intr(struct host1x *host,
+                                           unsigned int id)
 {
        host1x_sync_writel(host, BIT_MASK(id),
                HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
 }
 
-static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
+static void _host1x_intr_disable_syncpt_intr(struct host1x *host,
+                                            unsigned int id)
 {
        host1x_sync_writel(host, BIT_MASK(id),
                HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
@@ -127,8 +132,13 @@ static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
 
 static int _host1x_free_syncpt_irq(struct host1x *host)
 {
+       unsigned int i;
+
        devm_free_irq(host->dev, host->intr_syncpt_irq, host);
-       flush_workqueue(host->intr_wq);
+
+       for (i = 0; i < host->info->nb_pts; i++)
+               cancel_work_sync(&host->syncpt[i].intr.work);
+
        return 0;
 }
 
index 56e85395ac24112faf17b4b06a1aa950e671df0b..c93f74fcce72cf4a22ce98fc1a277a7e480d937f 100644 (file)
@@ -26,8 +26,9 @@
  */
 static void syncpt_restore(struct host1x_syncpt *sp)
 {
+       u32 min = host1x_syncpt_read_min(sp);
        struct host1x *host = sp->host;
-       int min = host1x_syncpt_read_min(sp);
+
        host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
 }
 
@@ -37,6 +38,7 @@ static void syncpt_restore(struct host1x_syncpt *sp)
 static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
 {
        struct host1x *host = sp->host;
+
        host1x_sync_writel(host, sp->base_val,
                           HOST1X_SYNC_SYNCPT_BASE(sp->id));
 }
@@ -47,6 +49,7 @@ static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
 static void syncpt_read_wait_base(struct host1x_syncpt *sp)
 {
        struct host1x *host = sp->host;
+
        sp->base_val =
                host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
 }
@@ -85,6 +88,7 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
        if (!host1x_syncpt_client_managed(sp) &&
            host1x_syncpt_idle(sp))
                return -EINVAL;
+
        host1x_sync_writel(host, BIT_MASK(sp->id),
                           HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
        wmb();
@@ -95,10 +99,10 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
 /* remove a wait pointed to by patch_addr */
 static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
 {
-       u32 override = host1x_class_host_wait_syncpt(
-               HOST1X_SYNCPT_RESERVED, 0);
+       u32 override = host1x_class_host_wait_syncpt(HOST1X_SYNCPT_RESERVED, 0);
 
        *((u32 *)patch_addr) = override;
+
        return 0;
 }
 
index 2491bf82e30cafa05f5fc10721fd4a13993c6477..8b4fad0ab35d24de11380ec00f7e8e6802db5c3a 100644 (file)
@@ -122,18 +122,20 @@ static void action_submit_complete(struct host1x_waitlist *waiter)
 static void action_wakeup(struct host1x_waitlist *waiter)
 {
        wait_queue_head_t *wq = waiter->data;
+
        wake_up(wq);
 }
 
 static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
 {
        wait_queue_head_t *wq = waiter->data;
+
        wake_up_interruptible(wq);
 }
 
 typedef void (*action_handler)(struct host1x_waitlist *waiter);
 
-static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
+static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
        action_submit_complete,
        action_wakeup,
        action_wakeup_interruptible,
@@ -209,7 +211,7 @@ static void syncpt_thresh_work(struct work_struct *work)
                                host1x_syncpt_load(host->syncpt + id));
 }
 
-int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
                           enum host1x_intr_action action, void *data,
                           struct host1x_waitlist *waiter, void **ref)
 {
@@ -254,7 +256,7 @@ int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
        return 0;
 }
 
-void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
 {
        struct host1x_waitlist *waiter = ref;
        struct host1x_syncpt *syncpt;
@@ -277,9 +279,6 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
 
        mutex_init(&host->intr_mutex);
        host->intr_syncpt_irq = irq_sync;
-       host->intr_wq = create_workqueue("host_syncpt");
-       if (!host->intr_wq)
-               return -ENOMEM;
 
        for (id = 0; id < nb_pts; ++id) {
                struct host1x_syncpt *syncpt = host->syncpt + id;
@@ -288,7 +287,7 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
                INIT_LIST_HEAD(&syncpt->intr.wait_head);
                snprintf(syncpt->intr.thresh_irq_name,
                         sizeof(syncpt->intr.thresh_irq_name),
-                        "host1x_sp_%02d", id);
+                        "host1x_sp_%02u", id);
        }
 
        host1x_intr_start(host);
@@ -299,7 +298,6 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
 void host1x_intr_deinit(struct host1x *host)
 {
        host1x_intr_stop(host);
-       destroy_workqueue(host->intr_wq);
 }
 
 void host1x_intr_start(struct host1x *host)
@@ -342,7 +340,7 @@ void host1x_intr_stop(struct host1x *host)
                if (!list_empty(&syncpt[id].intr.wait_head)) {
                        /* output diagnostics */
                        mutex_unlock(&host->intr_mutex);
-                       pr_warn("%s cannot stop syncpt intr id=%d\n",
+                       pr_warn("%s cannot stop syncpt intr id=%u\n",
                                __func__, id);
                        return;
                }
index 2b8adf016a05b0fe3cbec0cc9cdb49e2da88ea3b..1370c2bb75b810ff5e275cfbb3b8ef8edb459183 100644 (file)
@@ -75,7 +75,7 @@ struct host1x_waitlist {
  *
  * This is a non-blocking api.
  */
-int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
        enum host1x_intr_action action, void *data,
        struct host1x_waitlist *waiter, void **ref);
 
@@ -84,7 +84,7 @@ int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
  * You must call this if you passed non-NULL as ref.
  * @ref the ref returned from host1x_intr_add_action()
  */
-void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref);
 
 /* Initialize host1x sync point interrupt */
 int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
index b4515d54403967dad522594c19dd2052f9c875c0..a91b7c4a6110f8b0bfe74589e32a226fefe7f341 100644 (file)
@@ -161,7 +161,7 @@ static int do_waitchks(struct host1x_job *job, struct host1x *host,
 
                if (host1x_syncpt_is_expired(sp, wait->thresh)) {
                        dev_dbg(host->dev,
-                               "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
+                               "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n",
                                wait->syncpt_id, sp->name, wait->thresh,
                                host1x_syncpt_read_min(sp));
 
@@ -464,6 +464,7 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
 
        for (i = 0; i < job->num_gathers; i++) {
                struct host1x_job_gather *g = &job->gathers[i];
+
                size += g->words * sizeof(u32);
        }
 
@@ -514,6 +515,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
        bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
        for (i = 0; i < job->num_waitchk; i++) {
                u32 syncpt_id = job->waitchk[i].syncpt_id;
+
                if (syncpt_id < host1x_syncpt_nb_pts(host))
                        set_bit(syncpt_id, waitchk_mask);
        }
@@ -571,14 +573,16 @@ void host1x_job_unpin(struct host1x_job *job)
 
        for (i = 0; i < job->num_unpins; i++) {
                struct host1x_job_unpin_data *unpin = &job->unpins[i];
+
                host1x_bo_unpin(unpin->bo, unpin->sgt);
                host1x_bo_put(unpin->bo);
        }
+
        job->num_unpins = 0;
 
        if (job->gather_copy_size)
                dma_free_wc(job->channel->dev, job->gather_copy_size,
-                           job->gather_copy_mapped, job->gather_copy);
+                           job->gather_copy_mapped, job->gather_copy);
 }
 EXPORT_SYMBOL(host1x_job_unpin);
 
index 6b7fdc1e2ed078c337c8fa235633fcc105c4ccde..95589328ad52a27a7ba345f097a5740cca67d34f 100644 (file)
@@ -73,7 +73,7 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
                        return NULL;
        }
 
-       name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
+       name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
                        dev ? dev_name(dev) : NULL);
        if (!name)
                return NULL;
@@ -110,12 +110,14 @@ EXPORT_SYMBOL(host1x_syncpt_incr_max);
 void host1x_syncpt_restore(struct host1x *host)
 {
        struct host1x_syncpt *sp_base = host->syncpt;
-       u32 i;
+       unsigned int i;
 
        for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
                host1x_hw_syncpt_restore(host, sp_base + i);
+
        for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
                host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
+
        wmb();
 }
 
@@ -126,7 +128,7 @@ void host1x_syncpt_restore(struct host1x *host)
 void host1x_syncpt_save(struct host1x *host)
 {
        struct host1x_syncpt *sp_base = host->syncpt;
-       u32 i;
+       unsigned int i;
 
        for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
                if (host1x_syncpt_client_managed(sp_base + i))
@@ -146,6 +148,7 @@ void host1x_syncpt_save(struct host1x *host)
 u32 host1x_syncpt_load(struct host1x_syncpt *sp)
 {
        u32 val;
+
        val = host1x_hw_syncpt_load(sp->host, sp);
        trace_host1x_syncpt_load_min(sp->id, val);
 
@@ -157,10 +160,9 @@ u32 host1x_syncpt_load(struct host1x_syncpt *sp)
  */
 u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
 {
-       u32 val;
        host1x_hw_syncpt_load_wait_base(sp->host, sp);
-       val = sp->base_val;
-       return val;
+
+       return sp->base_val;
 }
 
 /*
@@ -179,6 +181,7 @@ EXPORT_SYMBOL(host1x_syncpt_incr);
 static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
 {
        host1x_hw_syncpt_load(sp->host, sp);
+
        return host1x_syncpt_is_expired(sp, thresh);
 }
 
@@ -186,7 +189,7 @@ static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
  * Main entrypoint for syncpoint value waits.
  */
 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
-                       u32 *value)
+                      u32 *value)
 {
        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
        void *ref;
@@ -201,6 +204,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
        if (host1x_syncpt_is_expired(sp, thresh)) {
                if (value)
                        *value = host1x_syncpt_load(sp);
+
                return 0;
        }
 
@@ -209,6 +213,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
        if (host1x_syncpt_is_expired(sp, thresh)) {
                if (value)
                        *value = val;
+
                goto done;
        }
 
@@ -239,32 +244,42 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
        /* wait for the syncpoint, or timeout, or signal */
        while (timeout) {
                long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
-               int remain = wait_event_interruptible_timeout(wq,
+               int remain;
+
+               remain = wait_event_interruptible_timeout(wq,
                                syncpt_load_min_is_expired(sp, thresh),
                                check);
                if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
                        if (value)
                                *value = host1x_syncpt_load(sp);
+
                        err = 0;
+
                        break;
                }
+
                if (remain < 0) {
                        err = remain;
                        break;
                }
+
                timeout -= check;
+
                if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
                        dev_warn(sp->host->dev,
-                               "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
+                               "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n",
                                 current->comm, sp->id, sp->name,
                                 thresh, timeout);
 
                        host1x_debug_dump_syncpts(sp->host);
+
                        if (check_count == MAX_STUCK_CHECK_COUNT)
                                host1x_debug_dump(sp->host);
+
                        check_count++;
                }
        }
+
        host1x_intr_put_ref(sp->host, sp->id, ref);
 
 done:
@@ -279,7 +294,9 @@ bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
 {
        u32 current_val;
        u32 future_val;
+
        smp_rmb();
+
        current_val = (u32)atomic_read(&sp->min_val);
        future_val = (u32)atomic_read(&sp->max_val);
 
@@ -341,14 +358,14 @@ int host1x_syncpt_init(struct host1x *host)
 {
        struct host1x_syncpt_base *bases;
        struct host1x_syncpt *syncpt;
-       int i;
+       unsigned int i;
 
-       syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
+       syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt),
                              GFP_KERNEL);
        if (!syncpt)
                return -ENOMEM;
 
-       bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases,
+       bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases),
                             GFP_KERNEL);
        if (!bases)
                return -ENOMEM;
@@ -378,6 +395,7 @@ struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
                                            unsigned long flags)
 {
        struct host1x *host = dev_get_drvdata(dev->parent);
+
        return host1x_syncpt_alloc(host, dev, flags);
 }
 EXPORT_SYMBOL(host1x_syncpt_request);
@@ -398,8 +416,9 @@ EXPORT_SYMBOL(host1x_syncpt_free);
 
 void host1x_syncpt_deinit(struct host1x *host)
 {
-       int i;
        struct host1x_syncpt *sp = host->syncpt;
+       unsigned int i;
+
        for (i = 0; i < host->info->nb_pts; i++, sp++)
                kfree(sp->name);
 }
@@ -407,10 +426,11 @@ void host1x_syncpt_deinit(struct host1x *host)
 /*
  * Read max. It indicates how many operations there are in queue, either in
  * channel or in a software thread.
- * */
+ */
 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
 {
        smp_rmb();
+
        return (u32)atomic_read(&sp->max_val);
 }
 EXPORT_SYMBOL(host1x_syncpt_read_max);
@@ -421,6 +441,7 @@ EXPORT_SYMBOL(host1x_syncpt_read_max);
 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
 {
        smp_rmb();
+
        return (u32)atomic_read(&sp->min_val);
 }
 EXPORT_SYMBOL(host1x_syncpt_read_min);
@@ -431,25 +452,26 @@ u32 host1x_syncpt_read(struct host1x_syncpt *sp)
 }
 EXPORT_SYMBOL(host1x_syncpt_read);
 
-int host1x_syncpt_nb_pts(struct host1x *host)
+unsigned int host1x_syncpt_nb_pts(struct host1x *host)
 {
        return host->info->nb_pts;
 }
 
-int host1x_syncpt_nb_bases(struct host1x *host)
+unsigned int host1x_syncpt_nb_bases(struct host1x *host)
 {
        return host->info->nb_bases;
 }
 
-int host1x_syncpt_nb_mlocks(struct host1x *host)
+unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
 {
        return host->info->nb_mlocks;
 }
 
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
 {
        if (host->info->nb_pts < id)
                return NULL;
+
        return host->syncpt + id;
 }
 EXPORT_SYMBOL(host1x_syncpt_get);
index 9056465ecd3f1ea88963c397b6f2fdd0ae5f407a..f719205105ac19192bd8b7881d17ab87810696eb 100644 (file)
@@ -37,7 +37,7 @@ struct host1x_syncpt_base {
 };
 
 struct host1x_syncpt {
-       int id;
+       unsigned int id;
        atomic_t min_val;
        atomic_t max_val;
        u32 base_val;
@@ -58,13 +58,13 @@ int host1x_syncpt_init(struct host1x *host);
 void host1x_syncpt_deinit(struct host1x *host);
 
 /* Return number of sync point supported. */
-int host1x_syncpt_nb_pts(struct host1x *host);
+unsigned int host1x_syncpt_nb_pts(struct host1x *host);
 
 /* Return number of wait bases supported. */
-int host1x_syncpt_nb_bases(struct host1x *host);
+unsigned int host1x_syncpt_nb_bases(struct host1x *host);
 
 /* Return number of mlocks supported. */
-int host1x_syncpt_nb_mlocks(struct host1x *host);
+unsigned int host1x_syncpt_nb_mlocks(struct host1x *host);
 
 /*
  * Check sync point sanity. If max is larger than min, there have too many
index 2f29780e7c6863f7b3e9c82b0b9b5e7c0cc6be80..659475c1e44abea70ad9834d09c26e3c51d8e14f 100644 (file)
@@ -150,6 +150,9 @@ static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand,
 static int ipu_bus_format_to_map(u32 fmt)
 {
        switch (fmt) {
+       default:
+               WARN_ON(1);
+               /* fall-through */
        case MEDIA_BUS_FMT_RGB888_1X24:
                return IPU_DC_MAP_RGB24;
        case MEDIA_BUS_FMT_RGB565_1X16:
@@ -162,8 +165,6 @@ static int ipu_bus_format_to_map(u32 fmt)
                return IPU_DC_MAP_LVDS666;
        case MEDIA_BUS_FMT_BGR888_1X24:
                return IPU_DC_MAP_BGR24;
-       default:
-               return -EINVAL;
        }
 }
 
@@ -178,10 +179,6 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
        dc->di = ipu_di_get_num(di);
 
        map = ipu_bus_format_to_map(bus_format);
-       if (map < 0) {
-               dev_dbg(priv->dev, "IPU_DISP: No MAP\n");
-               return map;
-       }
 
        /*
         * In interlaced mode we need more counters to create the asymmetric
index 359268e3a166851ba94303ee418f306817ba19ad..a8d87ddd8a1769e3a9b348f2f45b1272f99702df 100644 (file)
@@ -572,9 +572,6 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
        dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
                di->id, sig->mode.hactive, sig->mode.vactive);
 
-       if ((sig->mode.vsync_len == 0) || (sig->mode.hsync_len == 0))
-               return -EINVAL;
-
        dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
                clk_get_rate(di->clk_ipu),
                clk_get_rate(di->clk_di),
index 837b1ec228005ebf0b36a865d4f992588229dc0a..42705bb5aaa36b6e42d495032df6758ef9a0ddcd 100644 (file)
 #define DMFC_DP_CHAN_6B_24             16
 #define DMFC_DP_CHAN_6F_29             24
 
-#define DMFC_FIFO_SIZE_64              (3 << 3)
-#define DMFC_FIFO_SIZE_128             (2 << 3)
-#define DMFC_FIFO_SIZE_256             (1 << 3)
-#define DMFC_FIFO_SIZE_512             (0 << 3)
-
-#define DMFC_SEGMENT(x)                        ((x & 0x7) << 0)
-#define DMFC_BURSTSIZE_128             (0 << 6)
-#define DMFC_BURSTSIZE_64              (1 << 6)
-#define DMFC_BURSTSIZE_32              (2 << 6)
-#define DMFC_BURSTSIZE_16              (3 << 6)
-
 struct dmfc_channel_data {
        int             ipu_channel;
        unsigned long   channel_reg;
@@ -104,9 +93,6 @@ struct ipu_dmfc_priv;
 
 struct dmfc_channel {
        unsigned                        slots;
-       unsigned                        slotmask;
-       unsigned                        segment;
-       int                             burstsize;
        struct ipu_soc                  *ipu;
        struct ipu_dmfc_priv            *priv;
        const struct dmfc_channel_data  *data;
@@ -117,7 +103,6 @@ struct ipu_dmfc_priv {
        struct device *dev;
        struct dmfc_channel channels[DMFC_NUM_CHANNELS];
        struct mutex mutex;
-       unsigned long bandwidth_per_slot;
        void __iomem *base;
        int use_count;
 };
@@ -172,184 +157,6 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
 }
 EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel);
 
-static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots,
-               int segment, int burstsize)
-{
-       struct ipu_dmfc_priv *priv = dmfc->priv;
-       u32 val, field;
-
-       dev_dbg(priv->dev,
-                       "dmfc: using %d slots starting from segment %d for IPU channel %d\n",
-                       slots, segment, dmfc->data->ipu_channel);
-
-       switch (slots) {
-       case 1:
-               field = DMFC_FIFO_SIZE_64;
-               break;
-       case 2:
-               field = DMFC_FIFO_SIZE_128;
-               break;
-       case 4:
-               field = DMFC_FIFO_SIZE_256;
-               break;
-       case 8:
-               field = DMFC_FIFO_SIZE_512;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       switch (burstsize) {
-       case 16:
-               field |= DMFC_BURSTSIZE_16;
-               break;
-       case 32:
-               field |= DMFC_BURSTSIZE_32;
-               break;
-       case 64:
-               field |= DMFC_BURSTSIZE_64;
-               break;
-       case 128:
-               field |= DMFC_BURSTSIZE_128;
-               break;
-       }
-
-       field |= DMFC_SEGMENT(segment);
-
-       val = readl(priv->base + dmfc->data->channel_reg);
-
-       val &= ~(0xff << dmfc->data->shift);
-       val |= field << dmfc->data->shift;
-
-       writel(val, priv->base + dmfc->data->channel_reg);
-
-       dmfc->slots = slots;
-       dmfc->segment = segment;
-       dmfc->burstsize = burstsize;
-       dmfc->slotmask = ((1 << slots) - 1) << segment;
-
-       return 0;
-}
-
-static int dmfc_bandwidth_to_slots(struct ipu_dmfc_priv *priv,
-               unsigned long bandwidth)
-{
-       int slots = 1;
-
-       while (slots * priv->bandwidth_per_slot < bandwidth)
-               slots *= 2;
-
-       return slots;
-}
-
-static int dmfc_find_slots(struct ipu_dmfc_priv *priv, int slots)
-{
-       unsigned slotmask_need, slotmask_used = 0;
-       int i, segment = 0;
-
-       slotmask_need = (1 << slots) - 1;
-
-       for (i = 0; i < DMFC_NUM_CHANNELS; i++)
-               slotmask_used |= priv->channels[i].slotmask;
-
-       while (slotmask_need <= 0xff) {
-               if (!(slotmask_used & slotmask_need))
-                       return segment;
-
-               slotmask_need <<= 1;
-               segment++;
-       }
-
-       return -EBUSY;
-}
-
-void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc)
-{
-       struct ipu_dmfc_priv *priv = dmfc->priv;
-       int i;
-
-       dev_dbg(priv->dev, "dmfc: freeing %d slots starting from segment %d\n",
-                       dmfc->slots, dmfc->segment);
-
-       mutex_lock(&priv->mutex);
-
-       if (!dmfc->slots)
-               goto out;
-
-       dmfc->slotmask = 0;
-       dmfc->slots = 0;
-       dmfc->segment = 0;
-
-       for (i = 0; i < DMFC_NUM_CHANNELS; i++)
-               priv->channels[i].slotmask = 0;
-
-       for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
-               if (priv->channels[i].slots > 0) {
-                       priv->channels[i].segment =
-                               dmfc_find_slots(priv, priv->channels[i].slots);
-                       priv->channels[i].slotmask =
-                               ((1 << priv->channels[i].slots) - 1) <<
-                               priv->channels[i].segment;
-               }
-       }
-
-       for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
-               if (priv->channels[i].slots > 0)
-                       ipu_dmfc_setup_channel(&priv->channels[i],
-                                       priv->channels[i].slots,
-                                       priv->channels[i].segment,
-                                       priv->channels[i].burstsize);
-       }
-out:
-       mutex_unlock(&priv->mutex);
-}
-EXPORT_SYMBOL_GPL(ipu_dmfc_free_bandwidth);
-
-int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
-               unsigned long bandwidth_pixel_per_second, int burstsize)
-{
-       struct ipu_dmfc_priv *priv = dmfc->priv;
-       int slots = dmfc_bandwidth_to_slots(priv, bandwidth_pixel_per_second);
-       int segment = -1, ret = 0;
-
-       dev_dbg(priv->dev, "dmfc: trying to allocate %ldMpixel/s for IPU channel %d\n",
-                       bandwidth_pixel_per_second / 1000000,
-                       dmfc->data->ipu_channel);
-
-       ipu_dmfc_free_bandwidth(dmfc);
-
-       mutex_lock(&priv->mutex);
-
-       if (slots > 8) {
-               ret = -EBUSY;
-               goto out;
-       }
-
-       /* For the MEM_BG channel, first try to allocate twice the slots */
-       if (dmfc->data->ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC)
-               segment = dmfc_find_slots(priv, slots * 2);
-       else if (slots < 2)
-               /* Always allocate at least 128*4 bytes (2 slots) */
-               slots = 2;
-
-       if (segment >= 0)
-               slots *= 2;
-       else
-               segment = dmfc_find_slots(priv, slots);
-       if (segment < 0) {
-               ret = -EBUSY;
-               goto out;
-       }
-
-       ipu_dmfc_setup_channel(dmfc, slots, segment, burstsize);
-
-out:
-       mutex_unlock(&priv->mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
-
 void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
 {
        struct ipu_dmfc_priv *priv = dmfc->priv;
@@ -384,7 +191,6 @@ EXPORT_SYMBOL_GPL(ipu_dmfc_get);
 
 void ipu_dmfc_put(struct dmfc_channel *dmfc)
 {
-       ipu_dmfc_free_bandwidth(dmfc);
 }
 EXPORT_SYMBOL_GPL(ipu_dmfc_put);
 
@@ -412,20 +218,15 @@ int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
                priv->channels[i].priv = priv;
                priv->channels[i].ipu = ipu;
                priv->channels[i].data = &dmfcdata[i];
-       }
-
-       writel(0x0, priv->base + DMFC_WR_CHAN);
-       writel(0x0, priv->base + DMFC_DP_CHAN);
 
-       /*
-        * We have a total bandwidth of clkrate * 4pixel divided
-        * into 8 slots.
-        */
-       priv->bandwidth_per_slot = clk_get_rate(ipu_clk) * 4 / 8;
-
-       dev_dbg(dev, "dmfc: 8 slots with %ldMpixel/s bandwidth each\n",
-                       priv->bandwidth_per_slot / 1000000);
+               if (dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC ||
+                   dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_FG_SYNC ||
+                   dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_DC_SYNC)
+                       priv->channels[i].slots = 2;
+       }
 
+       writel(0x00000050, priv->base + DMFC_WR_CHAN);
+       writel(0x00005654, priv->base + DMFC_DP_CHAN);
        writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF);
        writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF);
        writel(0x00000003, priv->base + DMFC_GENERAL1);
index aad8c162a825dc60516700b1bd8caf8efeec4244..0cd4f72162394b5652fc529681cfbc8774b50a36 100644 (file)
@@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev)
        struct elo_priv *priv = hid_get_drvdata(hdev);
 
        hid_hw_stop(hdev);
-       flush_workqueue(wq);
+       cancel_delayed_work_sync(&priv->work);
        kfree(priv);
 }
 
index c741f5e50a6681cd0b6267fc04c5ca11f1b0a1b5..95b7d61d9910f3c65fced473c4e2fab5ce0d4e9b 100644 (file)
@@ -1401,6 +1401,11 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
                        USB_DEVICE_ID_NOVATEK_PCT) },
 
+       /* Ntrig Panel */
+       { .driver_data = MT_CLS_NSMU,
+               HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+                       USB_VENDOR_ID_NTRIG, 0x1b05) },
+
        /* PixArt optical touch screen */
        { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
                MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
index 2f1ddca6f2e0a3a4712bce434d5b29e1556e216c..700145b1508894f30a018aef278d15cfc458ef3a 100644 (file)
@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
                                        goto inval;
                        } else if (uref->usage_index >= field->report_count)
                                goto inval;
-
-                       else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
-                                (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
-                                 uref->usage_index + uref_multi->num_values > field->report_count))
-                               goto inval;
                }
 
+               if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
+                   (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+                    uref->usage_index + uref_multi->num_values > field->report_count))
+                       goto inval;
+
                switch (cmd) {
                case HIDIOCGUSAGE:
                        uref->value = field->value[uref->usage_index];
index c43318d3416e20a4d027a32402b1cf751718b8c6..2ac87d553e22f67ebaca32d62fb76375e3d22bdc 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/sched.h>
+#include <linux/ctype.h>
 
 #include <linux/i8k.h>
 
 
 static DEFINE_MUTEX(i8k_mutex);
 static char bios_version[4];
+static char bios_machineid[16];
 static struct device *i8k_hwmon_dev;
 static u32 i8k_hwmon_flags;
 static uint i8k_fan_mult = I8K_FAN_MULT;
 static uint i8k_pwm_mult;
 static uint i8k_fan_max = I8K_FAN_HIGH;
+static bool disallow_fan_type_call;
 
 #define I8K_HWMON_HAVE_TEMP1   (1 << 0)
 #define I8K_HWMON_HAVE_TEMP2   (1 << 1)
@@ -94,13 +97,13 @@ module_param(ignore_dmi, bool, 0);
 MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
 
 #if IS_ENABLED(CONFIG_I8K)
-static bool restricted;
+static bool restricted = true;
 module_param(restricted, bool, 0);
-MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
+MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)");
 
 static bool power_status;
 module_param(power_status, bool, 0600);
-MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
+MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)");
 #endif
 
 static uint fan_mult;
@@ -235,14 +238,28 @@ static int i8k_get_fan_speed(int fan)
 /*
  * Read the fan type.
  */
-static int i8k_get_fan_type(int fan)
+static int _i8k_get_fan_type(int fan)
 {
        struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
 
+       if (disallow_fan_type_call)
+               return -EINVAL;
+
        regs.ebx = fan & 0xff;
        return i8k_smm(&regs) ? : regs.eax & 0xff;
 }
 
+static int i8k_get_fan_type(int fan)
+{
+       /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
+       static int types[2] = { INT_MIN, INT_MIN };
+
+       if (types[fan] == INT_MIN)
+               types[fan] = _i8k_get_fan_type(fan);
+
+       return types[fan];
+}
+
 /*
  * Read the fan nominal rpm for specific fan speed.
  */
@@ -387,14 +404,20 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
 
        switch (cmd) {
        case I8K_BIOS_VERSION:
+               if (!isdigit(bios_version[0]) || !isdigit(bios_version[1]) ||
+                   !isdigit(bios_version[2]))
+                       return -EINVAL;
+
                val = (bios_version[0] << 16) |
                                (bios_version[1] << 8) | bios_version[2];
                break;
 
        case I8K_MACHINE_ID:
-               memset(buff, 0, 16);
-               strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
-                       sizeof(buff));
+               if (restricted && !capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               memset(buff, 0, sizeof(buff));
+               strlcpy(buff, bios_machineid, sizeof(buff));
                break;
 
        case I8K_FN_STATUS:
@@ -511,7 +534,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
        seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
                   I8K_PROC_FMT,
                   bios_version,
-                  i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+                  (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid,
                   cpu_temp,
                   left_fan, right_fan, left_speed, right_speed,
                   ac_power, fn_key);
@@ -718,6 +741,9 @@ static struct attribute *i8k_attrs[] = {
 static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
                              int index)
 {
+       if (disallow_fan_type_call &&
+           (index == 9 || index == 12))
+               return 0;
        if (index >= 0 && index <= 1 &&
            !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
                return 0;
@@ -767,13 +793,17 @@ static int __init i8k_init_hwmon(void)
        if (err >= 0)
                i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
 
-       /* First fan attributes, if fan type is OK */
-       err = i8k_get_fan_type(0);
+       /* First fan attributes, if fan status or type is OK */
+       err = i8k_get_fan_status(0);
+       if (err < 0)
+               err = i8k_get_fan_type(0);
        if (err >= 0)
                i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
 
-       /* Second fan attributes, if fan type is OK */
-       err = i8k_get_fan_type(1);
+       /* Second fan attributes, if fan status or type is OK */
+       err = i8k_get_fan_status(1);
+       if (err < 0)
+               err = i8k_get_fan_type(1);
        if (err >= 0)
                i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
 
@@ -929,12 +959,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
 
 MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
 
-static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+/*
+ * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
+ * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
+ * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
+ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
+ */
+static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
        {
-               /*
-                * CPU fan speed going up and down on Dell Studio XPS 8000
-                * for unknown reasons.
-                */
                .ident = "Dell Studio XPS 8000",
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -942,16 +974,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
                },
        },
        {
-               /*
-                * CPU fan speed going up and down on Dell Studio XPS 8100
-                * for unknown reasons.
-                */
                .ident = "Dell Studio XPS 8100",
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
                        DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
                },
        },
+       {
+               .ident = "Dell Inspiron 580",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
+               },
+       },
        { }
 };
 
@@ -966,8 +1001,7 @@ static int __init i8k_probe(void)
        /*
         * Get DMI information
         */
-       if (!dmi_check_system(i8k_dmi_table) ||
-           dmi_check_system(i8k_blacklist_dmi_table)) {
+       if (!dmi_check_system(i8k_dmi_table)) {
                if (!ignore_dmi && !force)
                        return -ENODEV;
 
@@ -978,8 +1012,13 @@ static int __init i8k_probe(void)
                        i8k_get_dmi_data(DMI_BIOS_VERSION));
        }
 
+       if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
+               disallow_fan_type_call = true;
+
        strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
                sizeof(bios_version));
+       strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+               sizeof(bios_machineid));
 
        /*
         * Get SMM Dell signature
index eb97a9241d171433aa5e4ce60395718fec97af06..15aa49d082c45752fd2751f9229d16bc93196868 100644 (file)
@@ -172,9 +172,9 @@ static void do_read_registers_on_cu(void *_data)
  */
 static int read_registers(struct fam15h_power_data *data)
 {
-       int this_cpu, ret, cpu;
        int core, this_core;
        cpumask_var_t mask;
+       int ret, cpu;
 
        ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
        if (!ret)
@@ -183,7 +183,6 @@ static int read_registers(struct fam15h_power_data *data)
        memset(data->cu_on, 0, sizeof(int) * MAX_CUS);
 
        get_online_cpus();
-       this_cpu = smp_processor_id();
 
        /*
         * Choose the first online core of each compute unit, and then
@@ -205,12 +204,9 @@ static int read_registers(struct fam15h_power_data *data)
                cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
        }
 
-       if (cpumask_test_cpu(this_cpu, mask))
-               do_read_registers_on_cu(data);
+       on_each_cpu_mask(mask, do_read_registers_on_cu, data, true);
 
-       smp_call_function_many(mask, do_read_registers_on_cu, data, true);
        put_online_cpus();
-
        free_cpumask_var(mask);
 
        return 0;
index c9ff08dbe10cefcf8731c1360370edd037307d43..e30a5939dc0d5566ae76ef31e790a578d8b0877c 100644 (file)
@@ -375,7 +375,7 @@ struct lm90_data {
        int kind;
        u32 flags;
 
-       int update_interval;    /* in milliseconds */
+       unsigned int update_interval; /* in milliseconds */
 
        u8 config_orig;         /* Original configuration register value */
        u8 convrate_orig;       /* Original conversion rate register value */
index 847d1b5f2c13b997f46d5a565bfba542fdaaee4e..688be9e060fc547f57a259bae61a49579edfa8e8 100644 (file)
@@ -300,13 +300,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
        if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
                /*
                 * The trace run will continue with the same allocated trace
-                * buffer. As such zero-out the buffer so that we don't end
-                * up with stale data.
-                *
-                * Since the tracer is still enabled drvdata::buf
-                * can't be NULL.
+                * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
+                * so we don't have to explicitly clear it. Also, since the
+                * tracer is still enabled drvdata::buf can't be NULL.
                 */
-               memset(drvdata->buf, 0, drvdata->size);
                tmc_etr_enable_hw(drvdata);
        } else {
                /*
@@ -315,7 +312,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
                 */
                vaddr = drvdata->vaddr;
                paddr = drvdata->paddr;
-               drvdata->buf = NULL;
+               drvdata->buf = drvdata->vaddr = NULL;
        }
 
        drvdata->reading = false;
index 5443d03a1eec4b09feedd4be794d7ef5d2c2dca4..d08d1ab9bba5d7e1c0772d48b6d43802057e323d 100644 (file)
@@ -385,7 +385,6 @@ static int _coresight_build_path(struct coresight_device *csdev,
        int i;
        bool found = false;
        struct coresight_node *node;
-       struct coresight_connection *conn;
 
        /* An activated sink has been found.  Enqueue the element */
        if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
@@ -394,8 +393,9 @@ static int _coresight_build_path(struct coresight_device *csdev,
 
        /* Not a sink - recursively explore each port found on this element */
        for (i = 0; i < csdev->nr_outport; i++) {
-               conn = &csdev->conns[i];
-               if (_coresight_build_path(conn->child_dev, path) == 0) {
+               struct coresight_device *child_dev = csdev->conns[i].child_dev;
+
+               if (child_dev && _coresight_build_path(child_dev, path) == 0) {
                        found = true;
                        break;
                }
@@ -425,6 +425,7 @@ static int _coresight_build_path(struct coresight_device *csdev,
 struct list_head *coresight_build_path(struct coresight_device *csdev)
 {
        struct list_head *path;
+       int rc;
 
        path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
        if (!path)
@@ -432,9 +433,10 @@ struct list_head *coresight_build_path(struct coresight_device *csdev)
 
        INIT_LIST_HEAD(path);
 
-       if (_coresight_build_path(csdev, path)) {
+       rc = _coresight_build_path(csdev, path);
+       if (rc) {
                kfree(path);
-               path = NULL;
+               return ERR_PTR(rc);
        }
 
        return path;
@@ -507,8 +509,9 @@ int coresight_enable(struct coresight_device *csdev)
                goto out;
 
        path = coresight_build_path(csdev);
-       if (!path) {
+       if (IS_ERR(path)) {
                pr_err("building path(s) failed\n");
+               ret = PTR_ERR(path);
                goto out;
        }
 
index 64b1208bca5e15af2295e9b26b9581af67f474c4..4a60ad2147479081e2fd8fd1b29f35f4dbb58cd6 100644 (file)
@@ -245,6 +245,13 @@ struct i801_priv {
        struct platform_device *mux_pdev;
 #endif
        struct platform_device *tco_pdev;
+
+       /*
+        * If set to true the host controller registers are reserved for
+        * ACPI AML use. Protected by acpi_lock.
+        */
+       bool acpi_reserved;
+       struct mutex acpi_lock;
 };
 
 #define FEATURE_SMBUS_PEC      (1 << 0)
@@ -718,6 +725,12 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
        int ret = 0, xact = 0;
        struct i801_priv *priv = i2c_get_adapdata(adap);
 
+       mutex_lock(&priv->acpi_lock);
+       if (priv->acpi_reserved) {
+               mutex_unlock(&priv->acpi_lock);
+               return -EBUSY;
+       }
+
        pm_runtime_get_sync(&priv->pci_dev->dev);
 
        hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
@@ -820,6 +833,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
 out:
        pm_runtime_mark_last_busy(&priv->pci_dev->dev);
        pm_runtime_put_autosuspend(&priv->pci_dev->dev);
+       mutex_unlock(&priv->acpi_lock);
        return ret;
 }
 
@@ -1257,6 +1271,83 @@ static void i801_add_tco(struct i801_priv *priv)
        priv->tco_pdev = pdev;
 }
 
+#ifdef CONFIG_ACPI
+static acpi_status
+i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+                    u64 *value, void *handler_context, void *region_context)
+{
+       struct i801_priv *priv = handler_context;
+       struct pci_dev *pdev = priv->pci_dev;
+       acpi_status status;
+
+       /*
+        * Once BIOS AML code touches the OpRegion we warn and inhibit any
+        * further access from the driver itself. This device is now owned
+        * by the system firmware.
+        */
+       mutex_lock(&priv->acpi_lock);
+
+       if (!priv->acpi_reserved) {
+               priv->acpi_reserved = true;
+
+               dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
+               dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n");
+
+               /*
+                * BIOS is accessing the host controller so prevent it from
+                * suspending automatically from now on.
+                */
+               pm_runtime_get_sync(&pdev->dev);
+       }
+
+       if ((function & ACPI_IO_MASK) == ACPI_READ)
+               status = acpi_os_read_port(address, (u32 *)value, bits);
+       else
+               status = acpi_os_write_port(address, (u32)*value, bits);
+
+       mutex_unlock(&priv->acpi_lock);
+
+       return status;
+}
+
+static int i801_acpi_probe(struct i801_priv *priv)
+{
+       struct acpi_device *adev;
+       acpi_status status;
+
+       adev = ACPI_COMPANION(&priv->pci_dev->dev);
+       if (adev) {
+               status = acpi_install_address_space_handler(adev->handle,
+                               ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler,
+                               NULL, priv);
+               if (ACPI_SUCCESS(status))
+                       return 0;
+       }
+
+       return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]);
+}
+
+static void i801_acpi_remove(struct i801_priv *priv)
+{
+       struct acpi_device *adev;
+
+       adev = ACPI_COMPANION(&priv->pci_dev->dev);
+       if (!adev)
+               return;
+
+       acpi_remove_address_space_handler(adev->handle,
+               ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler);
+
+       mutex_lock(&priv->acpi_lock);
+       if (priv->acpi_reserved)
+               pm_runtime_put(&priv->pci_dev->dev);
+       mutex_unlock(&priv->acpi_lock);
+}
+#else
+static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
+static inline void i801_acpi_remove(struct i801_priv *priv) { }
+#endif
+
 static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        unsigned char temp;
@@ -1274,6 +1365,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        priv->adapter.dev.parent = &dev->dev;
        ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
        priv->adapter.retries = 3;
+       mutex_init(&priv->acpi_lock);
 
        priv->pci_dev = dev;
        switch (dev->device) {
@@ -1336,10 +1428,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
                return -ENODEV;
        }
 
-       err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
-       if (err) {
+       if (i801_acpi_probe(priv))
                return -ENODEV;
-       }
 
        err = pcim_iomap_regions(dev, 1 << SMBBAR,
                                 dev_driver_string(&dev->dev));
@@ -1348,6 +1438,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        "Failed to request SMBus region 0x%lx-0x%Lx\n",
                        priv->smba,
                        (unsigned long long)pci_resource_end(dev, SMBBAR));
+               i801_acpi_remove(priv);
                return err;
        }
 
@@ -1412,6 +1503,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        err = i2c_add_adapter(&priv->adapter);
        if (err) {
                dev_err(&dev->dev, "Failed to add SMBus adapter\n");
+               i801_acpi_remove(priv);
                return err;
        }
 
@@ -1438,6 +1530,7 @@ static void i801_remove(struct pci_dev *dev)
 
        i801_del_mux(priv);
        i2c_del_adapter(&priv->adapter);
+       i801_acpi_remove(priv);
        pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
 
        platform_device_unregister(priv->tco_pdev);
index aa5f01efd826693720a19d662058b953a8b2fbeb..30ae35146723a466afc20767d5f11853e040faf6 100644 (file)
@@ -934,8 +934,15 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
                return result;
 
        for (i = 0; i < length; i++) {
-               /* for the last byte TWSI_CTL_AAK must not be set */
-               if (i + 1 == length)
+               /*
+                * For the last byte to receive TWSI_CTL_AAK must not be set.
+                *
+                * A special case is I2C_M_RECV_LEN where we don't know the
+                * additional length yet. If recv_len is set we assume we're
+                * not reading the final byte and therefore need to set
+                * TWSI_CTL_AAK.
+                */
+               if ((i + 1 == length) && !(recv_len && i == 0))
                        final_read = true;
 
                /* clear iflg to allow next event */
@@ -950,12 +957,8 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
 
                data[i] = octeon_i2c_data_read(i2c);
                if (recv_len && i == 0) {
-                       if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) {
-                               dev_err(i2c->dev,
-                                       "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n",
-                                       __func__, data[i]);
+                       if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
                                return -EPROTO;
-                       }
                        length += data[i];
                }
 
index 6773cadf7c9fdd09248bae67e5d0197e16e97496..26e7c5187a589b473857fd991acdd60a1bdb4609 100644 (file)
@@ -260,6 +260,7 @@ static struct platform_driver i2c_mux_reg_driver = {
        .remove = i2c_mux_reg_remove,
        .driver = {
                .name   = "i2c-mux-reg",
+               .of_match_table = of_match_ptr(i2c_mux_reg_of_match),
        },
 };
 
index a1e642ee13d6a4f8af5eb0f6a9c92bafe51460d9..7fddc137e91ed7942e91a42a70f19f6320cfc0df 100644 (file)
@@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = {
 
 int st_accel_allocate_ring(struct iio_dev *indio_dev)
 {
-       return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       return iio_triggered_buffer_setup(indio_dev, NULL,
                &st_sensors_trigger_handler, &st_accel_buffer_setup_ops);
 }
 
index dc73f2d85e6d102ef719557e4db2b08c68901ab5..4d95bfc4786cafc6de2fcc659950860596193877 100644 (file)
@@ -741,6 +741,7 @@ static const struct iio_info accel_info = {
 static const struct iio_trigger_ops st_accel_trigger_ops = {
        .owner = THIS_MODULE,
        .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
+       .validate_device = st_sensors_validate_device,
 };
 #define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops)
 #else
index c55898543a47d9a2db596e89f59c5416b19bb0b7..f1693dbebb8ac23359035786ef26b6607191058d 100644 (file)
@@ -57,31 +57,20 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
        struct iio_poll_func *pf = p;
        struct iio_dev *indio_dev = pf->indio_dev;
        struct st_sensor_data *sdata = iio_priv(indio_dev);
+       s64 timestamp;
 
-       /* If we have a status register, check if this IRQ came from us */
-       if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
-               u8 status;
-
-               len = sdata->tf->read_byte(&sdata->tb, sdata->dev,
-                          sdata->sensor_settings->drdy_irq.addr_stat_drdy,
-                          &status);
-               if (len < 0)
-                       dev_err(sdata->dev, "could not read channel status\n");
-
-               /*
-                * If this was not caused by any channels on this sensor,
-                * return IRQ_NONE
-                */
-               if (!(status & (u8)indio_dev->active_scan_mask[0]))
-                       return IRQ_NONE;
-       }
+       /* If we do timetamping here, do it before reading the values */
+       if (sdata->hw_irq_trigger)
+               timestamp = sdata->hw_timestamp;
+       else
+               timestamp = iio_get_time_ns();
 
        len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
        if (len < 0)
                goto st_sensors_get_buffer_element_error;
 
        iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data,
-               pf->timestamp);
+                                          timestamp);
 
 st_sensors_get_buffer_element_error:
        iio_trigger_notify_done(indio_dev->trig);
index dffe006921694840c24356f44bd400299e33b8f3..9e59c90f6a8d7baa1ee091ea588b75a39414032c 100644 (file)
@@ -363,6 +363,11 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
        if (err < 0)
                return err;
 
+       /* Disable DRDY, this might be still be enabled after reboot. */
+       err = st_sensors_set_dataready_irq(indio_dev, false);
+       if (err < 0)
+               return err;
+
        if (sdata->current_fullscale) {
                err = st_sensors_set_fullscale(indio_dev,
                                                sdata->current_fullscale->num);
@@ -424,6 +429,9 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
        else
                drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
 
+       /* Flag to the poll function that the hardware trigger is in use */
+       sdata->hw_irq_trigger = enable;
+
        /* Enable/Disable the interrupt generator for data ready. */
        err = st_sensors_write_data_with_mask(indio_dev,
                                        sdata->sensor_settings->drdy_irq.addr,
index da72279fcf99cbd6eac0f48c8965837566a8cdc1..296e4ff19ae8d9cb4d40a0a852ecdecefb2e127f 100644 (file)
 #include <linux/iio/common/st_sensors.h>
 #include "st_sensors_core.h"
 
+/**
+ * st_sensors_irq_handler() - top half of the IRQ-based triggers
+ * @irq: irq number
+ * @p: private handler data
+ */
+irqreturn_t st_sensors_irq_handler(int irq, void *p)
+{
+       struct iio_trigger *trig = p;
+       struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+       struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+       /* Get the time stamp as close in time as possible */
+       sdata->hw_timestamp = iio_get_time_ns();
+       return IRQ_WAKE_THREAD;
+}
+
+/**
+ * st_sensors_irq_thread() - bottom half of the IRQ-based triggers
+ * @irq: irq number
+ * @p: private handler data
+ */
+irqreturn_t st_sensors_irq_thread(int irq, void *p)
+{
+       struct iio_trigger *trig = p;
+       struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+       struct st_sensor_data *sdata = iio_priv(indio_dev);
+       int ret;
+
+       /*
+        * If this trigger is backed by a hardware interrupt and we have a
+        * status register, check if this IRQ came from us
+        */
+       if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
+               u8 status;
+
+               ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+                          sdata->sensor_settings->drdy_irq.addr_stat_drdy,
+                          &status);
+               if (ret < 0) {
+                       dev_err(sdata->dev, "could not read channel status\n");
+                       goto out_poll;
+               }
+               /*
+                * the lower bits of .active_scan_mask[0] is directly mapped
+                * to the channels on the sensor: either bit 0 for
+                * one-dimensional sensors, or e.g. x,y,z for accelerometers,
+                * gyroscopes or magnetometers. No sensor use more than 3
+                * channels, so cut the other status bits here.
+                */
+               status &= 0x07;
+
+               /*
+                * If this was not caused by any channels on this sensor,
+                * return IRQ_NONE
+                */
+               if (!indio_dev->active_scan_mask)
+                       return IRQ_NONE;
+               if (!(status & (u8)indio_dev->active_scan_mask[0]))
+                       return IRQ_NONE;
+       }
+
+out_poll:
+       /* It's our IRQ: proceed to handle the register polling */
+       iio_trigger_poll_chained(p);
+       return IRQ_HANDLED;
+}
+
 int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                                const struct iio_trigger_ops *trigger_ops)
 {
@@ -30,6 +97,10 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                return -ENOMEM;
        }
 
+       iio_trigger_set_drvdata(sdata->trig, indio_dev);
+       sdata->trig->ops = trigger_ops;
+       sdata->trig->dev.parent = sdata->dev;
+
        irq = sdata->get_irq_data_ready(indio_dev);
        irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
        /*
@@ -77,9 +148,12 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
            sdata->sensor_settings->drdy_irq.addr_stat_drdy)
                irq_trig |= IRQF_SHARED;
 
-       err = request_threaded_irq(irq,
-                       iio_trigger_generic_data_rdy_poll,
-                       NULL,
+       /* Let's create an interrupt thread masking the hard IRQ here */
+       irq_trig |= IRQF_ONESHOT;
+
+       err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+                       st_sensors_irq_handler,
+                       st_sensors_irq_thread,
                        irq_trig,
                        sdata->trig->name,
                        sdata->trig);
@@ -88,10 +162,6 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                goto iio_trigger_free;
        }
 
-       iio_trigger_set_drvdata(sdata->trig, indio_dev);
-       sdata->trig->ops = trigger_ops;
-       sdata->trig->dev.parent = sdata->dev;
-
        err = iio_trigger_register(sdata->trig);
        if (err < 0) {
                dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
@@ -119,6 +189,18 @@ void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
 }
 EXPORT_SYMBOL(st_sensors_deallocate_trigger);
 
+int st_sensors_validate_device(struct iio_trigger *trig,
+                              struct iio_dev *indio_dev)
+{
+       struct iio_dev *indio = iio_trigger_get_drvdata(trig);
+
+       if (indio != indio_dev)
+               return -EINVAL;
+
+       return 0;
+}
+EXPORT_SYMBOL(st_sensors_validate_device);
+
 MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
 MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger");
 MODULE_LICENSE("GPL v2");
index e63b957c985f4ebecbeaca10ac68867419e6900c..f7c71da42f15e28127492c8894a856a3a04da0e3 100644 (file)
@@ -247,7 +247,7 @@ config MCP4922
 
 config STX104
        tristate "Apex Embedded Systems STX104 DAC driver"
-       depends on X86 && ISA
+       depends on X86 && ISA_BUS_API
        help
          Say yes here to build support for the 2-channel DAC on the Apex
          Embedded Systems STX104 integrated analog PC/104 card. The base port
index 948f600e7059fb8837662a6aa01f3e72a3583100..69bde59098542daf480561c1bb113919736ccb2f 100644 (file)
@@ -525,7 +525,7 @@ static int ad5592r_alloc_channels(struct ad5592r_state *st)
 
        device_for_each_child_node(st->dev, child) {
                ret = fwnode_property_read_u32(child, "reg", &reg);
-               if (ret || reg > ARRAY_SIZE(st->channel_modes))
+               if (ret || reg >= ARRAY_SIZE(st->channel_modes))
                        continue;
 
                ret = fwnode_property_read_u32(child, "adi,mode", &tmp);
index d67b17b6a7aab1abd0077e297c5e0f9769eee893..a5377044e42f0ccf128f1fd9bfb0ce02bba0a0a8 100644 (file)
@@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = {
 
 int st_gyro_allocate_ring(struct iio_dev *indio_dev)
 {
-       return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       return iio_triggered_buffer_setup(indio_dev, NULL,
                &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops);
 }
 
index 52a3c87c375ca62c027f66aa3de89d2c01c6fae7..a8012955a1f6d47130f703ced0df6a5e6c424407 100644 (file)
@@ -409,6 +409,7 @@ static const struct iio_info gyro_info = {
 static const struct iio_trigger_ops st_gyro_trigger_ops = {
        .owner = THIS_MODULE,
        .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
+       .validate_device = st_sensors_validate_device,
 };
 #define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops)
 #else
index 3be6d209a1595961ddd19dbcf8fc240f07617a77..11535911a5c697fa359aec0a4626f7a80b3d1e12 100644 (file)
@@ -165,10 +165,8 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p)
        struct am2315_sensor_data sensor_data;
 
        ret = am2315_read_data(data, &sensor_data);
-       if (ret < 0) {
-               mutex_unlock(&data->lock);
+       if (ret < 0)
                goto err;
-       }
 
        mutex_lock(&data->lock);
        if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
index fa47676131733744859f25b1bfa0f2183ac8f61d..a03832a5fc95aafd81b075710d32d57acf9f2677 100644 (file)
@@ -55,7 +55,7 @@ static const struct {
        },
        { /* IIO_HUMIDITYRELATIVE channel */
                .shift = 8,
-               .mask = 2,
+               .mask = 3,
        },
 };
 
@@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
                dev_err(&client->dev, "cannot read high byte measurement");
                return ret;
        }
-       val = ret << 6;
+       val = ret << 8;
 
        ret = i2c_smbus_read_byte(client);
        if (ret < 0) {
                dev_err(&client->dev, "cannot read low byte measurement");
                return ret;
        }
-       val |= ret >> 2;
+       val |= ret;
 
        return val;
 }
@@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
                return IIO_VAL_INT_PLUS_MICRO;
        case IIO_CHAN_INFO_SCALE:
                if (chan->type == IIO_TEMP) {
-                       *val = 165;
-                       *val2 = 65536 >> 2;
+                       *val = 165000;
+                       *val2 = 65536;
                        return IIO_VAL_FRACTIONAL;
                } else {
-                       *val = 0;
-                       *val2 = 10000;
-                       return IIO_VAL_INT_PLUS_MICRO;
+                       *val = 100;
+                       *val2 = 65536;
+                       return IIO_VAL_FRACTIONAL;
                }
                break;
        case IIO_CHAN_INFO_OFFSET:
-               *val = -3971;
-               *val2 = 879096;
+               *val = -15887;
+               *val2 = 515151;
                return IIO_VAL_INT_PLUS_MICRO;
        default:
                return -EINVAL;
index 0bf92b06d7d87a1cfb2ff85ce9662abb5ec07fd3..b8a290ec984e33f79aaa8b546f543709e579b7b3 100644 (file)
@@ -209,11 +209,11 @@ static const struct  bmi160_scale_item bmi160_scale_table[] = {
 };
 
 static const struct bmi160_odr bmi160_accel_odr[] = {
-       {0x01, 0, 78125},
-       {0x02, 1, 5625},
-       {0x03, 3, 125},
-       {0x04, 6, 25},
-       {0x05, 12, 5},
+       {0x01, 0, 781250},
+       {0x02, 1, 562500},
+       {0x03, 3, 125000},
+       {0x04, 6, 250000},
+       {0x05, 12, 500000},
        {0x06, 25, 0},
        {0x07, 50, 0},
        {0x08, 100, 0},
@@ -229,7 +229,7 @@ static const struct bmi160_odr bmi160_gyro_odr[] = {
        {0x08, 100, 0},
        {0x09, 200, 0},
        {0x0A, 400, 0},
-       {0x0B, 8000, 0},
+       {0x0B, 800, 0},
        {0x0C, 1600, 0},
        {0x0D, 3200, 0},
 };
@@ -364,8 +364,8 @@ int bmi160_set_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
 
        return regmap_update_bits(data->regmap,
                                  bmi160_regs[t].config,
-                                 bmi160_odr_table[t].tbl[i].bits,
-                                 bmi160_regs[t].config_odr_mask);
+                                 bmi160_regs[t].config_odr_mask,
+                                 bmi160_odr_table[t].tbl[i].bits);
 }
 
 static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
index ae2806aafb7267c474f54ffac8bc33232739c469..0c52dfe649771db4a4a23e7e09365dbbc33932e1 100644 (file)
@@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
 
        /* Prevent the module from being removed whilst attached to a trigger */
        __module_get(pf->indio_dev->info->driver_module);
+
+       /* Get irq number */
        pf->irq = iio_trigger_get_irq(trig);
+       if (pf->irq < 0)
+               goto out_put_module;
+
+       /* Request irq */
        ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
                                   pf->type, pf->name,
                                   pf);
-       if (ret < 0) {
-               module_put(pf->indio_dev->info->driver_module);
-               return ret;
-       }
+       if (ret < 0)
+               goto out_put_irq;
 
+       /* Enable trigger in driver */
        if (trig->ops && trig->ops->set_trigger_state && notinuse) {
                ret = trig->ops->set_trigger_state(trig, true);
                if (ret < 0)
-                       module_put(pf->indio_dev->info->driver_module);
+                       goto out_free_irq;
        }
 
        return ret;
+
+out_free_irq:
+       free_irq(pf->irq, pf);
+out_put_irq:
+       iio_trigger_put_irq(trig, pf->irq);
+out_put_module:
+       module_put(pf->indio_dev->info->driver_module);
+       return ret;
 }
 
 static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
index b4dbb39129773467dad31c97eedeb33482a31ecf..651d57b8abbf910b3e94ad9e628e312b958cbb50 100644 (file)
@@ -1011,6 +1011,7 @@ static int apds9960_probe(struct i2c_client *client,
 
        iio_device_attach_buffer(indio_dev, buffer);
 
+       indio_dev->dev.parent = &client->dev;
        indio_dev->info = &apds9960_info;
        indio_dev->name = APDS9960_DRV_NAME;
        indio_dev->channels = apds9960_channels;
index 72b364e4aa727f5908fdd6ccf76902fcfb0ffe66..b54dcba05a8294d988d6bf5421cca21f01224159 100644 (file)
@@ -84,7 +84,7 @@ static int bh1780_debugfs_reg_access(struct iio_dev *indio_dev,
        int ret;
 
        if (!readval)
-               bh1780_write(bh1780, (u8)reg, (u8)writeval);
+               return bh1780_write(bh1780, (u8)reg, (u8)writeval);
 
        ret = bh1780_read(bh1780, (u8)reg);
        if (ret < 0)
@@ -187,7 +187,7 @@ static int bh1780_probe(struct i2c_client *client,
 
        indio_dev->dev.parent = &client->dev;
        indio_dev->info = &bh1780_info;
-       indio_dev->name = id->name;
+       indio_dev->name = "bh1780";
        indio_dev->channels = bh1780_channels;
        indio_dev->num_channels = ARRAY_SIZE(bh1780_channels);
        indio_dev->modes = INDIO_DIRECT_MODE;
@@ -226,7 +226,8 @@ static int bh1780_remove(struct i2c_client *client)
 static int bh1780_runtime_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       struct bh1780_data *bh1780 = i2c_get_clientdata(client);
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct bh1780_data *bh1780 = iio_priv(indio_dev);
        int ret;
 
        ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
@@ -241,7 +242,8 @@ static int bh1780_runtime_suspend(struct device *dev)
 static int bh1780_runtime_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       struct bh1780_data *bh1780 = i2c_get_clientdata(client);
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct bh1780_data *bh1780 = iio_priv(indio_dev);
        int ret;
 
        ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
index e01e58a9bd1419397dda85160e7cc625ab7fd6e7..f17cb2ea18f59c9ea4f368e26488e50d114b05ec 100644 (file)
@@ -147,7 +147,6 @@ static const struct iio_chan_spec max44000_channels[] = {
        {
                .type = IIO_PROXIMITY,
                .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .scan_index = MAX44000_SCAN_INDEX_PRX,
                .scan_type = {
                        .sign           = 'u',
index ecd3bd0a97693420c13bfae8fc583ff74854edf8..0a9e8fadfa9de8a66dd3d1fee8d82f81cc6b10a4 100644 (file)
@@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
 
 int st_magn_allocate_ring(struct iio_dev *indio_dev)
 {
-       return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       return iio_triggered_buffer_setup(indio_dev, NULL,
                &st_sensors_trigger_handler, &st_magn_buffer_setup_ops);
 }
 
index 62036d2a9956aa426cdde5ce71906e4e82663510..8250fc322c56754aa23ea4c877ff3414b1f58d81 100644 (file)
@@ -572,6 +572,7 @@ static const struct iio_info magn_info = {
 static const struct iio_trigger_ops st_magn_trigger_ops = {
        .owner = THIS_MODULE,
        .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE,
+       .validate_device = st_sensors_validate_device,
 };
 #define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops)
 #else
index 2f1498e12bb28978ca8bea9356bfac9338b407fe..724452d61846c67155120876af55921f56e54c6a 100644 (file)
@@ -879,8 +879,8 @@ static int bmp280_probe(struct i2c_client *client,
        if (ret < 0)
                return ret;
        if (chip_id != id->driver_data) {
-               dev_err(&client->dev, "bad chip id.  expected %x got %x\n",
-                       BMP280_CHIP_ID, chip_id);
+               dev_err(&client->dev, "bad chip id.  expected %lx got %x\n",
+                       id->driver_data, chip_id);
                return -EINVAL;
        }
 
index 2ff53f222352ca66d4057ff054732e6a0a581db6..99468d0a64e74ce3f5ab0701f6d38e9f77c8c49c 100644 (file)
@@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = {
 
 int st_press_allocate_ring(struct iio_dev *indio_dev)
 {
-       return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       return iio_triggered_buffer_setup(indio_dev, NULL,
                &st_sensors_trigger_handler, &st_press_buffer_setup_ops);
 }
 
index 9e9b72a8f18f78fe7d7084710b4854604b7fb0b8..92a118c3c4acf3bc1529c4350dde5154b41c4abb 100644 (file)
 #include <linux/iio/common/st_sensors.h>
 #include "st_pressure.h"
 
+#define MCELSIUS_PER_CELSIUS                   1000
+
+/* Default pressure sensitivity */
 #define ST_PRESS_LSB_PER_MBAR                  4096UL
 #define ST_PRESS_KPASCAL_NANO_SCALE            (100000000UL / \
                                                 ST_PRESS_LSB_PER_MBAR)
+
+/* Default temperature sensitivity */
 #define ST_PRESS_LSB_PER_CELSIUS               480UL
-#define ST_PRESS_CELSIUS_NANO_SCALE            (1000000000UL / \
-                                                ST_PRESS_LSB_PER_CELSIUS)
+#define ST_PRESS_MILLI_CELSIUS_OFFSET          42500UL
+
 #define ST_PRESS_NUMBER_DATA_CHANNELS          1
 
 /* FULLSCALE */
+#define ST_PRESS_FS_AVL_1100MB                 1100
 #define ST_PRESS_FS_AVL_1260MB                 1260
 
 #define ST_PRESS_1_OUT_XL_ADDR                 0x28
@@ -54,9 +60,6 @@
 #define ST_PRESS_LPS331AP_PW_MASK              0x80
 #define ST_PRESS_LPS331AP_FS_ADDR              0x23
 #define ST_PRESS_LPS331AP_FS_MASK              0x30
-#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL      0x00
-#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN     ST_PRESS_KPASCAL_NANO_SCALE
-#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN     ST_PRESS_CELSIUS_NANO_SCALE
 #define ST_PRESS_LPS331AP_BDU_ADDR             0x20
 #define ST_PRESS_LPS331AP_BDU_MASK             0x04
 #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR                0x22
 #define ST_PRESS_LPS331AP_OD_IRQ_ADDR          0x22
 #define ST_PRESS_LPS331AP_OD_IRQ_MASK          0x40
 #define ST_PRESS_LPS331AP_MULTIREAD_BIT                true
-#define ST_PRESS_LPS331AP_TEMP_OFFSET          42500
 
 /* CUSTOM VALUES FOR LPS001WP SENSOR */
+
+/* LPS001WP pressure resolution */
+#define ST_PRESS_LPS001WP_LSB_PER_MBAR         16UL
+/* LPS001WP temperature resolution */
+#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS      64UL
+
 #define ST_PRESS_LPS001WP_WAI_EXP              0xba
 #define ST_PRESS_LPS001WP_ODR_ADDR             0x20
 #define ST_PRESS_LPS001WP_ODR_MASK             0x30
@@ -78,6 +86,8 @@
 #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL     0x03
 #define ST_PRESS_LPS001WP_PW_ADDR              0x20
 #define ST_PRESS_LPS001WP_PW_MASK              0x40
+#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
+       (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
 #define ST_PRESS_LPS001WP_BDU_ADDR             0x20
 #define ST_PRESS_LPS001WP_BDU_MASK             0x04
 #define ST_PRESS_LPS001WP_MULTIREAD_BIT                true
 #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL       0x04
 #define ST_PRESS_LPS25H_PW_ADDR                        0x20
 #define ST_PRESS_LPS25H_PW_MASK                        0x80
-#define ST_PRESS_LPS25H_FS_ADDR                        0x00
-#define ST_PRESS_LPS25H_FS_MASK                        0x00
-#define ST_PRESS_LPS25H_FS_AVL_1260_VAL                0x00
-#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN       ST_PRESS_KPASCAL_NANO_SCALE
-#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN       ST_PRESS_CELSIUS_NANO_SCALE
 #define ST_PRESS_LPS25H_BDU_ADDR               0x20
 #define ST_PRESS_LPS25H_BDU_MASK               0x04
 #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR          0x23
 #define ST_PRESS_LPS25H_OD_IRQ_ADDR            0x22
 #define ST_PRESS_LPS25H_OD_IRQ_MASK            0x40
 #define ST_PRESS_LPS25H_MULTIREAD_BIT          true
-#define ST_PRESS_LPS25H_TEMP_OFFSET            42500
 #define ST_PRESS_LPS25H_OUT_XL_ADDR            0x28
 #define ST_TEMP_LPS25H_OUT_L_ADDR              0x2b
 
@@ -161,7 +165,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
                        .storagebits = 16,
                        .endianness = IIO_LE,
                },
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+               .info_mask_separate =
+                       BIT(IIO_CHAN_INFO_RAW) |
+                       BIT(IIO_CHAN_INFO_SCALE),
                .modified = 0,
        },
        {
@@ -177,7 +183,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
                },
                .info_mask_separate =
                        BIT(IIO_CHAN_INFO_RAW) |
-                       BIT(IIO_CHAN_INFO_OFFSET),
+                       BIT(IIO_CHAN_INFO_SCALE),
                .modified = 0,
        },
        IIO_CHAN_SOFT_TIMESTAMP(1)
@@ -212,11 +218,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                        .addr = ST_PRESS_LPS331AP_FS_ADDR,
                        .mask = ST_PRESS_LPS331AP_FS_MASK,
                        .fs_avl = {
+                               /*
+                                * Pressure and temperature sensitivity values
+                                * as defined in table 3 of LPS331AP datasheet.
+                                */
                                [0] = {
                                        .num = ST_PRESS_FS_AVL_1260MB,
-                                       .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
-                                       .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
-                                       .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
+                                       .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+                                       .gain2 = ST_PRESS_LSB_PER_CELSIUS,
                                },
                        },
                },
@@ -261,7 +270,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                        .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
                },
                .fs = {
-                       .addr = 0,
+                       .fs_avl = {
+                               /*
+                                * Pressure and temperature resolution values
+                                * as defined in table 3 of LPS001WP datasheet.
+                                */
+                               [0] = {
+                                       .num = ST_PRESS_FS_AVL_1100MB,
+                                       .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
+                                       .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
+                               },
+                       },
                },
                .bdu = {
                        .addr = ST_PRESS_LPS001WP_BDU_ADDR,
@@ -298,14 +317,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                        .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
                },
                .fs = {
-                       .addr = ST_PRESS_LPS25H_FS_ADDR,
-                       .mask = ST_PRESS_LPS25H_FS_MASK,
                        .fs_avl = {
+                               /*
+                                * Pressure and temperature sensitivity values
+                                * as defined in table 3 of LPS25H datasheet.
+                                */
                                [0] = {
                                        .num = ST_PRESS_FS_AVL_1260MB,
-                                       .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
-                                       .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
-                                       .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
+                                       .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+                                       .gain2 = ST_PRESS_LSB_PER_CELSIUS,
                                },
                        },
                },
@@ -364,26 +384,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
 
                return IIO_VAL_INT;
        case IIO_CHAN_INFO_SCALE:
-               *val = 0;
-
                switch (ch->type) {
                case IIO_PRESSURE:
+                       *val = 0;
                        *val2 = press_data->current_fullscale->gain;
-                       break;
+                       return IIO_VAL_INT_PLUS_NANO;
                case IIO_TEMP:
+                       *val = MCELSIUS_PER_CELSIUS;
                        *val2 = press_data->current_fullscale->gain2;
-                       break;
+                       return IIO_VAL_FRACTIONAL;
                default:
                        err = -EINVAL;
                        goto read_error;
                }
 
-               return IIO_VAL_INT_PLUS_NANO;
        case IIO_CHAN_INFO_OFFSET:
                switch (ch->type) {
                case IIO_TEMP:
-                       *val = 425;
-                       *val2 = 10;
+                       *val = ST_PRESS_MILLI_CELSIUS_OFFSET *
+                              press_data->current_fullscale->gain2;
+                       *val2 = MCELSIUS_PER_CELSIUS;
                        break;
                default:
                        err = -EINVAL;
@@ -425,6 +445,7 @@ static const struct iio_info press_info = {
 static const struct iio_trigger_ops st_press_trigger_ops = {
        .owner = THIS_MODULE,
        .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE,
+       .validate_device = st_sensors_validate_device,
 };
 #define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops)
 #else
index f4d29d5dbd5f88d6e5edffef142e86db30076d44..e2f926cdcad2acdbdd8aec50f0f479f52d2a6584 100644 (file)
@@ -64,6 +64,7 @@ struct as3935_state {
        struct delayed_work work;
 
        u32 tune_cap;
+       u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
        u8 buf[2] ____cacheline_aligned;
 };
 
@@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = {
                .type           = IIO_PROXIMITY,
                .info_mask_separate =
                        BIT(IIO_CHAN_INFO_RAW) |
-                       BIT(IIO_CHAN_INFO_PROCESSED),
+                       BIT(IIO_CHAN_INFO_PROCESSED) |
+                       BIT(IIO_CHAN_INFO_SCALE),
                .scan_index     = 0,
                .scan_type = {
                        .sign           = 'u',
@@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
                /* storm out of range */
                if (*val == AS3935_DATA_MASK)
                        return -EINVAL;
-               *val *= 1000;
+
+               if (m == IIO_CHAN_INFO_PROCESSED)
+                       *val *= 1000;
+               break;
+       case IIO_CHAN_INFO_SCALE:
+               *val = 1000;
                break;
        default:
                return -EINVAL;
@@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
        ret = as3935_read(st, AS3935_DATA, &val);
        if (ret)
                goto err_read;
-       val &= AS3935_DATA_MASK;
-       val *= 1000;
 
-       iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp);
+       st->buffer[0] = val & AS3935_DATA_MASK;
+       iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
+                                          pf->timestamp);
 err_read:
        iio_trigger_notify_done(indio_dev->trig);
 
index c2e257d97effb9813ef22b58ab580507dbf4134d..1a2984c28b9589410b381a6a0277aff188395907 100644 (file)
@@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
 {
        int ret = 0;
        struct net_device *old_net_dev;
+       enum ib_gid_type old_gid_type;
 
        /* in rdma_cap_roce_gid_table, this funciton should be protected by a
         * sleep-able lock.
@@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
        }
 
        old_net_dev = table->data_vec[ix].attr.ndev;
+       old_gid_type = table->data_vec[ix].attr.gid_type;
        if (old_net_dev && old_net_dev != attr->ndev)
                dev_put(old_net_dev);
        /* if modify_gid failed, just delete the old gid */
@@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
                attr = &zattr;
                table->data_vec[ix].context = NULL;
        }
-       if (default_gid)
-               table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+
        memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
        memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
+       if (default_gid) {
+               table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+               if (action == GID_TABLE_WRITE_ACTION_DEL)
+                       table->data_vec[ix].attr.gid_type = old_gid_type;
+       }
        if (table->data_vec[ix].attr.ndev &&
            table->data_vec[ix].attr.ndev != old_net_dev)
                dev_hold(table->data_vec[ix].attr.ndev);
@@ -405,7 +411,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 
        for (ix = 0; ix < table->sz; ix++)
                if (table->data_vec[ix].attr.ndev == ndev)
-                       if (!del_gid(ib_dev, port, table, ix, false))
+                       if (!del_gid(ib_dev, port, table, ix,
+                                    !!(table->data_vec[ix].props &
+                                       GID_TABLE_ENTRY_DEFAULT)))
                                deleted = true;
 
        write_unlock_irq(&table->rwlock);
index 1d92e091e22ef0bdb64ee7433eb2cf5fbba9600c..c99525512b3434d24a7882d4f869f6e675da44b2 100644 (file)
@@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
        work->cm_event.event = IB_CM_USER_ESTABLISHED;
 
        /* Check if the device started its remove_one */
-       spin_lock_irq(&cm.lock);
+       spin_lock_irqsave(&cm.lock, flags);
        if (!cm_dev->going_down) {
                queue_delayed_work(cm.wq, &work->work, 0);
        } else {
                kfree(work);
                ret = -ENODEV;
        }
-       spin_unlock_irq(&cm.lock);
+       spin_unlock_irqrestore(&cm.lock, flags);
 
 out:
        return ret;
index f0c91ba3178a12030a8cc4e38db7a651633759b8..ad1b1adcf6f01894f1fc6f3f35eed308d55e269c 100644 (file)
@@ -708,17 +708,6 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
                complete(&id_priv->comp);
 }
 
-static int cma_disable_callback(struct rdma_id_private *id_priv,
-                               enum rdma_cm_state state)
-{
-       mutex_lock(&id_priv->handler_mutex);
-       if (id_priv->state != state) {
-               mutex_unlock(&id_priv->handler_mutex);
-               return -EINVAL;
-       }
-       return 0;
-}
-
 struct rdma_cm_id *rdma_create_id(struct net *net,
                                  rdma_cm_event_handler event_handler,
                                  void *context, enum rdma_port_space ps,
@@ -1671,11 +1660,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
        struct rdma_cm_event event;
        int ret = 0;
 
+       mutex_lock(&id_priv->handler_mutex);
        if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
-               cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
+            id_priv->state != RDMA_CM_CONNECT) ||
            (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
-               cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
-               return 0;
+            id_priv->state != RDMA_CM_DISCONNECT))
+               goto out;
 
        memset(&event, 0, sizeof event);
        switch (ib_event->event) {
@@ -1870,7 +1860,7 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e
 
 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
 {
-       struct rdma_id_private *listen_id, *conn_id;
+       struct rdma_id_private *listen_id, *conn_id = NULL;
        struct rdma_cm_event event;
        struct net_device *net_dev;
        int offset, ret;
@@ -1884,9 +1874,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
                goto net_dev_put;
        }
 
-       if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) {
+       mutex_lock(&listen_id->handler_mutex);
+       if (listen_id->state != RDMA_CM_LISTEN) {
                ret = -ECONNABORTED;
-               goto net_dev_put;
+               goto err1;
        }
 
        memset(&event, 0, sizeof event);
@@ -1976,8 +1967,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
        struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
        struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
 
-       if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
-               return 0;
+       mutex_lock(&id_priv->handler_mutex);
+       if (id_priv->state != RDMA_CM_CONNECT)
+               goto out;
 
        memset(&event, 0, sizeof event);
        switch (iw_event->event) {
@@ -2029,6 +2021,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
                return ret;
        }
 
+out:
        mutex_unlock(&id_priv->handler_mutex);
        return ret;
 }
@@ -2039,13 +2032,15 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
        struct rdma_cm_id *new_cm_id;
        struct rdma_id_private *listen_id, *conn_id;
        struct rdma_cm_event event;
-       int ret;
+       int ret = -ECONNABORTED;
        struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
        struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
 
        listen_id = cm_id->context;
-       if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
-               return -ECONNABORTED;
+
+       mutex_lock(&listen_id->handler_mutex);
+       if (listen_id->state != RDMA_CM_LISTEN)
+               goto out;
 
        /* Create a new RDMA id for the new IW CM ID */
        new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net,
@@ -3216,8 +3211,9 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
        struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
        int ret = 0;
 
-       if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
-               return 0;
+       mutex_lock(&id_priv->handler_mutex);
+       if (id_priv->state != RDMA_CM_CONNECT)
+               goto out;
 
        memset(&event, 0, sizeof event);
        switch (ib_event->event) {
@@ -3673,12 +3669,13 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
        struct rdma_id_private *id_priv;
        struct cma_multicast *mc = multicast->context;
        struct rdma_cm_event event;
-       int ret;
+       int ret = 0;
 
        id_priv = mc->id_priv;
-       if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
-           cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
-               return 0;
+       mutex_lock(&id_priv->handler_mutex);
+       if (id_priv->state != RDMA_CM_ADDR_BOUND &&
+           id_priv->state != RDMA_CM_ADDR_RESOLVED)
+               goto out;
 
        if (!status)
                status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
@@ -3720,6 +3717,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
                return 0;
        }
 
+out:
        mutex_unlock(&id_priv->handler_mutex);
        return 0;
 }
@@ -3878,12 +3876,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
        gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
                   rdma_start_port(id_priv->cma_dev->device)];
        if (addr->sa_family == AF_INET) {
-               if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+               if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+                       mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
                        err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
                                            true);
-               if (!err) {
-                       mc->igmp_joined = true;
-                       mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
+                       if (!err)
+                               mc->igmp_joined = true;
                }
        } else {
                if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
index 5516fb0703442cafc0c917d3115b8b25e7c71b27..5c155fa91eec8380a463ad687a6aa977c10bcefd 100644 (file)
@@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device,
        if (err || port_attr->subnet_prefix)
                return err;
 
+       if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
+               return 0;
+
        err = ib_query_gid(device, port_num, 0, &gid, NULL);
        if (err)
                return err;
@@ -1024,7 +1027,8 @@ static int __init ib_core_init(void)
                goto err_mad;
        }
 
-       if (ib_add_ibnl_clients()) {
+       ret = ib_add_ibnl_clients();
+       if (ret) {
                pr_warn("Couldn't register ibnl clients\n");
                goto err_sa;
        }
index 43e3fa27102b8cd40fb39dc6e4747deed82c0f75..1c41b95cefec0a9e84a7cb151c2765c4d016fd9a 100644 (file)
@@ -506,7 +506,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
        if (!nlmsg_request) {
                pr_info("%s: Could not find a matching request (seq = %u)\n",
                                 __func__, msg_seq);
-                       return -EINVAL;
+               return -EINVAL;
        }
        pm_msg = nlmsg_request->req_buffer;
        local_sockaddr = (struct sockaddr_storage *)
index 82fb511112da745e3fdbf30fe35bcebfceee7491..2d49228f28b2b18ac537b1bb982127cf66138356 100644 (file)
@@ -1638,9 +1638,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
                /* Now, check to see if there are any methods still in use */
                if (!check_method_table(method)) {
                        /* If not, release management method table */
-                        kfree(method);
-                        class->method_table[mgmt_class] = NULL;
-                        /* Any management classes left ? */
+                       kfree(method);
+                       class->method_table[mgmt_class] = NULL;
+                       /* Any management classes left ? */
                        if (!check_class_table(class)) {
                                /* If not, release management class table */
                                kfree(class);
index 5e573bb18660d68f9e5c051ce3c7859ce08dea03..a5793c8f15901483e24e24393173ea8dbee6b191 100644 (file)
@@ -889,9 +889,9 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
 static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
                           u8 port_num)
 {
-       struct attribute_group *hsag = NULL;
+       struct attribute_group *hsag;
        struct rdma_hw_stats *stats;
-       int i = 0, ret;
+       int i, ret;
 
        stats = device->alloc_hw_stats(device, port_num);
 
@@ -899,19 +899,22 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
                return;
 
        if (!stats->names || stats->num_counters <= 0)
-               goto err;
+               goto err_free_stats;
 
+       /*
+        * Two extra attribue elements here, one for the lifespan entry and
+        * one to NULL terminate the list for the sysfs core code
+        */
        hsag = kzalloc(sizeof(*hsag) +
-                      // 1 extra for the lifespan config entry
-                      sizeof(void *) * (stats->num_counters + 1),
+                      sizeof(void *) * (stats->num_counters + 2),
                       GFP_KERNEL);
        if (!hsag)
-               return;
+               goto err_free_stats;
 
        ret = device->get_hw_stats(device, stats, port_num,
                                   stats->num_counters);
        if (ret != stats->num_counters)
-               goto err;
+               goto err_free_hsag;
 
        stats->timestamp = jiffies;
 
@@ -922,10 +925,13 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
                hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
                if (!hsag->attrs[i])
                        goto err;
+               sysfs_attr_init(hsag->attrs[i]);
        }
 
        /* treat an error here as non-fatal */
        hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
+       if (hsag->attrs[i])
+               sysfs_attr_init(hsag->attrs[i]);
 
        if (port) {
                struct kobject *kobj = &port->kobj;
@@ -946,10 +952,12 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
        return;
 
 err:
-       kfree(stats);
        for (; i >= 0; i--)
                kfree(hsag->attrs[i]);
+err_free_hsag:
        kfree(hsag);
+err_free_stats:
+       kfree(stats);
        return;
 }
 
index 1a8babb8ee3c4d328af7e08063cef8f147086a10..825021d1008b9a13ee5a3b02ed0a1dd7ce80d0ee 100644 (file)
@@ -1747,7 +1747,7 @@ static int create_qp(struct ib_uverbs_file *file,
        struct ib_srq                   *srq = NULL;
        struct ib_qp                    *qp;
        char                            *buf;
-       struct ib_qp_init_attr          attr;
+       struct ib_qp_init_attr          attr = {};
        struct ib_uverbs_ex_create_qp_resp resp;
        int                             ret;
 
index 1d7d4cf442e3c9646b6824e943e7e649c873d104..6298f54b413756a5bf0f19891a080739ec6300ef 100644 (file)
@@ -511,12 +511,16 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
                ah_attr->grh.dgid = sgid;
 
                if (!rdma_cap_eth_ah(device, port_num)) {
-                       ret = ib_find_cached_gid_by_port(device, &dgid,
-                                                        IB_GID_TYPE_IB,
-                                                        port_num, NULL,
-                                                        &gid_index);
-                       if (ret)
-                               return ret;
+                       if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
+                               ret = ib_find_cached_gid_by_port(device, &dgid,
+                                                                IB_GID_TYPE_IB,
+                                                                port_num, NULL,
+                                                                &gid_index);
+                               if (ret)
+                                       return ret;
+                       } else {
+                               gid_index = 0;
+                       }
                }
 
                ah_attr->grh.sgid_index = (u8) gid_index;
index 6e7050ab9e16049039406b8cbb207cdfced18109..14d7eeb09be6545f5f21144f0f11ea41c53203a4 100644 (file)
@@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
        const struct cpumask *node_mask,
                *proc_mask = tsk_cpus_allowed(current);
        struct cpu_mask_set *set = &dd->affinity->proc;
-       char buf[1024];
 
        /*
         * check whether process/context affinity has already
         * been set
         */
        if (cpumask_weight(proc_mask) == 1) {
-               scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
-               hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
-                         current->pid, current->comm, buf);
+               hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
+                         current->pid, current->comm,
+                         cpumask_pr_args(proc_mask));
                /*
                 * Mark the pre-set CPU as used. This is atomic so we don't
                 * need the lock
@@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
                cpumask_set_cpu(cpu, &set->used);
                goto done;
        } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
-               scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
-               hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
-                         current->pid, current->comm, buf);
+               hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
+                         current->pid, current->comm,
+                         cpumask_pr_args(proc_mask));
                goto done;
        }
 
@@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
        cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
                                  &dd->affinity->rcv_intr.mask :
                                  &dd->affinity->rcv_intr.used));
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
-       hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
+       hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
+                 cpumask_pr_args(intrs));
 
        /*
         * If we don't have a NUMA node requested, preference is towards
@@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
        if (node == -1)
                node = dd->node;
        node_mask = cpumask_of_node(node);
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
-       hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
+       hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
+                 cpumask_pr_args(node_mask));
 
        /* diff will hold all unused cpus */
        cpumask_andnot(diff, &set->mask, &set->used);
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
-       hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
+       hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
 
        /* get cpumask of available CPUs on preferred NUMA */
        cpumask_and(mask, diff, node_mask);
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
-       hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
+       hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
 
        /*
         * At first, we don't want to place processes on the same
@@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
                cpumask_andnot(diff, &set->mask, &set->used);
                cpumask_andnot(mask, diff, node_mask);
        }
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
-       hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
+       hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
+                 cpumask_pr_args(mask));
 
        cpu = cpumask_first(mask);
        if (cpu >= nr_cpu_ids) /* empty */
index 3b876da745a1dc60d2b589b5fbe756cdc33419aa..f5de851780555a5c9f06d91c2eb9bb2ff14c666e 100644 (file)
@@ -1037,7 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *);
 static void dc_start(struct hfi1_devdata *);
 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
                           unsigned int *np);
-static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd);
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
 
 /*
  * Error interrupt table entry.  This is used as input to the interrupt
@@ -6962,8 +6962,6 @@ void handle_link_down(struct work_struct *work)
        }
 
        reset_neighbor_info(ppd);
-       if (ppd->mgmt_allowed)
-               remove_full_mgmt_pkey(ppd);
 
        /* disable the port */
        clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
@@ -7070,12 +7068,16 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
                            __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
        ppd->pkeys[2] = FULL_MGMT_P_KEY;
        (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+       hfi1_event_pkey_change(ppd->dd, ppd->port);
 }
 
-static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
 {
-       ppd->pkeys[2] = 0;
-       (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+       if (ppd->pkeys[2] != 0) {
+               ppd->pkeys[2] = 0;
+               (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+               hfi1_event_pkey_change(ppd->dd, ppd->port);
+       }
 }
 
 /*
@@ -7832,8 +7834,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
                         * save first 2 flits in the packet that caused
                         * the error
                         */
-                        dd->err_info_rcvport.packet_flit1 = hdr0;
-                        dd->err_info_rcvport.packet_flit2 = hdr1;
+                       dd->err_info_rcvport.packet_flit1 = hdr0;
+                       dd->err_info_rcvport.packet_flit2 = hdr1;
                }
                switch (info) {
                case 1:
@@ -9168,6 +9170,13 @@ int start_link(struct hfi1_pportdata *ppd)
                return 0;
        }
 
+       /*
+        * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
+        * pkey table can be configured properly if the HFI unit is connected
+        * to switch port with MgmtAllowed=NO
+        */
+       clear_full_mgmt_pkey(ppd);
+
        return set_link_state(ppd, HLS_DN_POLL);
 }
 
@@ -9777,7 +9786,7 @@ static void set_send_length(struct hfi1_pportdata *ppd)
        u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
                              & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
                SEND_LEN_CHECK1_LEN_VL15_SHIFT;
-       int i;
+       int i, j;
        u32 thres;
 
        for (i = 0; i < ppd->vls_supported; i++) {
@@ -9801,7 +9810,10 @@ static void set_send_length(struct hfi1_pportdata *ppd)
                            sc_mtu_to_threshold(dd->vld[i].sc,
                                                dd->vld[i].mtu,
                                                dd->rcd[0]->rcvhdrqentsize));
-               sc_set_cr_threshold(dd->vld[i].sc, thres);
+               for (j = 0; j < INIT_SC_PER_VL; j++)
+                       sc_set_cr_threshold(
+                                       pio_select_send_context_vl(dd, j, i),
+                                           thres);
        }
        thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
                    sc_mtu_to_threshold(dd->vld[15].sc,
@@ -11906,7 +11918,7 @@ static void update_synth_timer(unsigned long opaque)
                hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
        }
 
-mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
+       mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
 }
 
 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
index 7a5b0e676cc7a4b1c1d9a65c76d66ba2f23a1f32..c702a009608f27a62b06b7ad4eb2d54914fab03b 100644 (file)
@@ -203,6 +203,9 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
 
        switch (cmd) {
        case HFI1_IOCTL_ASSIGN_CTXT:
+               if (uctxt)
+                       return -EINVAL;
+
                if (copy_from_user(&uinfo,
                                   (struct hfi1_user_info __user *)arg,
                                   sizeof(uinfo)))
index 5cc492e5776d8d87f3833136ed6d33b05f700da0..eed971ccd2a1e88e7180f2eae5aba92da5448c96 100644 (file)
@@ -1337,7 +1337,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
                dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
                                  (void *)dd->rcvhdrtail_dummy_kvaddr,
                                  dd->rcvhdrtail_dummy_physaddr);
-                                 dd->rcvhdrtail_dummy_kvaddr = NULL;
+               dd->rcvhdrtail_dummy_kvaddr = NULL;
        }
 
        for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
@@ -1383,7 +1383,7 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        int ret = 0, j, pidx, initfail;
-       struct hfi1_devdata *dd = NULL;
+       struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
        struct hfi1_pportdata *ppd;
 
        /* First, lock the non-writable module parameters */
index 219029576ba0b746646b0bfc9402279d40649c9a..fca07a1d6c284b90238543777e85462cc2249651 100644 (file)
@@ -78,6 +78,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp)
        memset(data, 0, size);
 }
 
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
+{
+       struct ib_event event;
+
+       event.event = IB_EVENT_PKEY_CHANGE;
+       event.device = &dd->verbs_dev.rdi.ibdev;
+       event.element.port_num = port;
+       ib_dispatch_event(&event);
+}
+
 static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
 {
        struct ib_mad_send_buf *send_buf;
@@ -1418,15 +1428,10 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
        }
 
        if (changed) {
-               struct ib_event event;
-
                (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
-
-               event.event = IB_EVENT_PKEY_CHANGE;
-               event.device = &dd->verbs_dev.rdi.ibdev;
-               event.element.port_num = port;
-               ib_dispatch_event(&event);
+               hfi1_event_pkey_change(dd, port);
        }
+
        return 0;
 }
 
index 55ee0867533324bd9b325ffbac1562abc22767db..8b734aaae88adf48be52a1a2e3d17ca4f800b7ce 100644 (file)
@@ -434,4 +434,6 @@ struct sc2vlnt {
                    COUNTER_MASK(1, 3) | \
                    COUNTER_MASK(1, 4))
 
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
+
 #endif                         /* _HFI1_MAD_H */
index d5edb1afbb8f8b8ed98135bfc9b961e00d535b4e..d4022450b73f7a9c6d8d528fbe44b3dd2fa0e00a 100644 (file)
@@ -995,7 +995,7 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
                /* counter is reset if occupancy count changes */
                if (reg != reg_prev)
                        loop = 0;
-               if (loop > 500) {
+               if (loop > 50000) {
                        /* timed out - bounce the link */
                        dd_dev_err(dd,
                                   "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
@@ -1797,6 +1797,21 @@ static void pio_map_rcu_callback(struct rcu_head *list)
        pio_map_free(m);
 }
 
+/*
+ * Set credit return threshold for the kernel send context
+ */
+static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
+{
+       u32 thres;
+
+       thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
+                                           50),
+                   sc_mtu_to_threshold(dd->kernel_send_context[scontext],
+                                       dd->vld[i].mtu,
+                                       dd->rcd[0]->rcvhdrqentsize));
+       sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
+}
+
 /*
  * pio_map_init - called when #vls change
  * @dd: hfi1_devdata
@@ -1872,11 +1887,16 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
                        if (!newmap->map[i])
                                goto bail;
                        newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
-                       /* assign send contexts */
+                       /*
+                        * assign send contexts and
+                        * adjust credit return threshold
+                        */
                        for (j = 0; j < sz; j++) {
-                               if (dd->kernel_send_context[scontext])
+                               if (dd->kernel_send_context[scontext]) {
                                        newmap->map[i]->ksc[j] =
                                        dd->kernel_send_context[scontext];
+                                       set_threshold(dd, scontext, i);
+                               }
                                if (++scontext >= first_scontext +
                                                  vl_scontexts[i])
                                        /* wrap back to first send context */
index 2441669f0817dbec51c7fbcdc847c0bff9f48bfb..9fb561682c661b92e2a09beca2b53daf9f8c5b5e 100644 (file)
@@ -579,7 +579,8 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
 
        if (ppd->qsfp_info.cache_valid) {
                if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
-                       sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]);
+                       snprintf(lenstr, sizeof(lenstr), "%dM ",
+                                cache[QSFP_MOD_LEN_OFFS]);
 
                power_byte = cache[QSFP_MOD_PWR_OFFS];
                sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
index 79b2952c0dfb31a50f0f08e4a74d3f60b754e861..4cfb13771897ca35b91e476c17afabed48a9e5ce 100644 (file)
@@ -214,19 +214,6 @@ const char *print_u32_array(
        return ret;
 }
 
-const char *print_u64_array(
-       struct trace_seq *p,
-       u64 *arr, int len)
-{
-       int i;
-       const char *ret = trace_seq_buffer_ptr(p);
-
-       for (i = 0; i < len; i++)
-               trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]);
-       trace_seq_putc(p, 0);
-       return ret;
-}
-
 __hfi1_trace_fn(PKT);
 __hfi1_trace_fn(PROC);
 __hfi1_trace_fn(SDMA);
index 29f4795f866cd19185c680d28f95302e206f62e4..47ffd273ecbd7745d25067f4f8268301d89dde25 100644 (file)
@@ -183,7 +183,7 @@ struct user_sdma_iovec {
        struct sdma_mmu_node *node;
 };
 
-#define SDMA_CACHE_NODE_EVICT BIT(0)
+#define SDMA_CACHE_NODE_EVICT 0
 
 struct sdma_mmu_node {
        struct mmu_rb_node rb;
@@ -1355,11 +1355,11 @@ static int set_txreq_header(struct user_sdma_request *req,
                 */
                SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
                         req->tidoffset, req->tidoffset / req->omfactor,
-                        !!(req->omfactor - KDETH_OM_SMALL));
+                        req->omfactor != KDETH_OM_SMALL);
                KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
                          req->tidoffset / req->omfactor);
                KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
-                         !!(req->omfactor - KDETH_OM_SMALL));
+                         req->omfactor != KDETH_OM_SMALL);
        }
 done:
        trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
index bc95c4112c615eeec6fef748088f74dae0939fa1..d8fb056526f8ba079bc7f57feffa63fd0ffa3415 100644 (file)
@@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
 
 struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
                                struct rvt_qp *qp)
+       __must_hold(&qp->s_lock)
 {
        struct verbs_txreq *tx = ERR_PTR(-EBUSY);
-       unsigned long flags;
 
-       spin_lock_irqsave(&qp->s_lock, flags);
        write_seqlock(&dev->iowait_lock);
        if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
                struct hfi1_qp_priv *priv;
@@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
        }
 out:
        write_sequnlock(&dev->iowait_lock);
-       spin_unlock_irqrestore(&qp->s_lock, flags);
        return tx;
 }
 
index 1cf69b2fe4a52ef68f1e0dd1b9e954fe74db94dc..a1d6e0807f97161f5336120ab7e6d0ffdb175b4e 100644 (file)
@@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
 
 static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
                                            struct rvt_qp *qp)
+       __must_hold(&qp->slock)
 {
        struct verbs_txreq *tx;
        struct hfi1_qp_priv *priv = qp->priv;
index 8b95320345583802c9263cfd308a877af54bc014..b738acdb9b027705f11257c8ab25cbaeba652159 100644 (file)
 
 #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
 #define IW_CFG_FPM_QP_COUNT            32768
+#define I40IW_MAX_PAGES_PER_FMR                512
+#define I40IW_MIN_PAGES_PER_FMR                1
 
 #define I40IW_MTU_TO_MSS               40
 #define I40IW_DEFAULT_MSS              1460
index 02a735b64208c83f35e00e3dd29b4be1bba5b068..33959ed14563e0f43e5209320ba0fd5ff0338abb 100644 (file)
@@ -79,6 +79,7 @@ static int i40iw_query_device(struct ib_device *ibdev,
        props->max_qp_init_rd_atom = props->max_qp_rd_atom;
        props->atomic_cap = IB_ATOMIC_NONE;
        props->max_map_per_fmr = 1;
+       props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
        return 0;
 }
 
@@ -1527,7 +1528,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
        mutex_lock(&iwdev->pbl_mutex);
        status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
        mutex_unlock(&iwdev->pbl_mutex);
-       if (!status)
+       if (status)
                goto err1;
 
        if (palloc->level != I40IW_LEVEL_1)
@@ -2149,6 +2150,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
                        struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
                        struct i40iw_fast_reg_stag_info info;
 
+                       memset(&info, 0, sizeof(info));
                        info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
                        info.access_rights |= i40iw_get_user_access(flags);
                        info.stag_key = reg_wr(ib_wr)->key & 0xff;
@@ -2158,10 +2160,14 @@ static int i40iw_post_send(struct ib_qp *ibqp,
                        info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
                        info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
                        info.total_len = iwmr->ibmr.length;
+                       info.reg_addr_pa = *(u64 *)palloc->level1.addr;
                        info.first_pm_pbl_index = palloc->level1.idx;
                        info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
                        info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
 
+                       if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
+                               info.chunk_size = 1;
+
                        if (page_shift == 21)
                                info.page_size = 1; /* 2M page */
 
@@ -2327,13 +2333,16 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq,
 {
        struct i40iw_cq *iwcq;
        struct i40iw_cq_uk *ukcq;
-       enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED;
+       unsigned long flags;
+       enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
 
        iwcq = (struct i40iw_cq *)ibcq;
        ukcq = &iwcq->sc_cq.cq_uk;
-       if (notify_flags == IB_CQ_NEXT_COMP)
-               cq_notify = IW_CQ_COMPL_EVENT;
+       if (notify_flags == IB_CQ_SOLICITED)
+               cq_notify = IW_CQ_COMPL_SOLICITED;
+       spin_lock_irqsave(&iwcq->lock, flags);
        ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
+       spin_unlock_irqrestore(&iwcq->lock, flags);
        return 0;
 }
 
index 105246fba2e7c381958c2e5e74b3a4af7b1441a8..5fc62336273132a80eebe1e8f3045758edd12d72 100644 (file)
@@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
 
        ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
        ah->av.ib.g_slid  = ah_attr->src_path_bits;
+       ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
        if (ah_attr->ah_flags & IB_AH_GRH) {
                ah->av.ib.g_slid   |= 0x80;
                ah->av.ib.gid_index = ah_attr->grh.sgid_index;
@@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
                       !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
                        --ah->av.ib.stat_rate;
        }
-       ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
 
        return &ah->ibah;
 }
index d68f506c1922e8d18af269a4fcde33fd0e3ae5d5..9c2e53d28f985740c3d22f9c13be776e58010567 100644 (file)
@@ -527,7 +527,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
                tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
        spin_unlock(&tun_qp->tx_lock);
        if (ret)
-               goto out;
+               goto end;
 
        tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
        if (tun_qp->tx_ring[tun_tx_ix].ah)
@@ -596,9 +596,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
        wr.wr.send_flags = IB_SEND_SIGNALED;
 
        ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
-out:
-       if (ret)
-               ib_destroy_ah(ah);
+       if (!ret)
+               return 0;
+ out:
+       spin_lock(&tun_qp->tx_lock);
+       tun_qp->tx_ix_tail++;
+       spin_unlock(&tun_qp->tx_lock);
+       tun_qp->tx_ring[tun_tx_ix].ah = NULL;
+end:
+       ib_destroy_ah(ah);
        return ret;
 }
 
@@ -1326,9 +1332,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
 
 
        ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
+       if (!ret)
+               return 0;
+
+       spin_lock(&sqp->tx_lock);
+       sqp->tx_ix_tail++;
+       spin_unlock(&sqp->tx_lock);
+       sqp->tx_ring[wire_tx_ix].ah = NULL;
 out:
-       if (ret)
-               ib_destroy_ah(ah);
+       ib_destroy_ah(ah);
        return ret;
 }
 
index b01ef6eee6e838b550758c8e4865bab7c1fceb09..42a46078d7d52755109784e769f29c2e5532bbca 100644 (file)
@@ -505,9 +505,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
                        props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
                else
                        props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
-       if (dev->steering_support ==  MLX4_STEERING_MODE_DEVICE_MANAGED)
-               props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
        }
+       if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+               props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
 
        props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
 
@@ -1704,6 +1704,9 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
        struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
        int is_bonded = mlx4_is_bonded(dev);
 
+       if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
+               return ERR_PTR(-EINVAL);
+
        if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
            (flow_attr->type != IB_FLOW_ATTR_NORMAL))
                return ERR_PTR(-EOPNOTSUPP);
index 6c5ac5d8f32ffcfafbb4dd7a4b9202b4049b95b5..29acda249612dd444ee42c37494674cc4e482790 100644 (file)
@@ -139,7 +139,7 @@ struct mlx4_ib_mr {
        u32                     max_pages;
        struct mlx4_mr          mmr;
        struct ib_umem         *umem;
-       void                    *pages_alloc;
+       size_t                  page_map_size;
 };
 
 struct mlx4_ib_mw {
index 631272172a0b9793c0074e6ab9c00a4b9f2e4665..5d73989d977135d9d560add27e074417ceaca258 100644 (file)
@@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device,
                      struct mlx4_ib_mr *mr,
                      int max_pages)
 {
-       int size = max_pages * sizeof(u64);
-       int add_size;
        int ret;
 
-       add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
+       /* Ensure that size is aligned to DMA cacheline
+        * requirements.
+        * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
+        * so page_map_size will never cross PAGE_SIZE.
+        */
+       mr->page_map_size = roundup(max_pages * sizeof(u64),
+                                   MLX4_MR_PAGES_ALIGN);
 
-       mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
-       if (!mr->pages_alloc)
+       /* Prevent cross page boundary allocation. */
+       mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
+       if (!mr->pages)
                return -ENOMEM;
 
-       mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
-
        mr->page_map = dma_map_single(device->dma_device, mr->pages,
-                                     size, DMA_TO_DEVICE);
+                                     mr->page_map_size, DMA_TO_DEVICE);
 
        if (dma_mapping_error(device->dma_device, mr->page_map)) {
                ret = -ENOMEM;
@@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device,
        }
 
        return 0;
-err:
-       kfree(mr->pages_alloc);
 
+err:
+       free_page((unsigned long)mr->pages);
        return ret;
 }
 
@@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
 {
        if (mr->pages) {
                struct ib_device *device = mr->ibmr.device;
-               int size = mr->max_pages * sizeof(u64);
 
                dma_unmap_single(device->dma_device, mr->page_map,
-                                size, DMA_TO_DEVICE);
-               kfree(mr->pages_alloc);
+                                mr->page_map_size, DMA_TO_DEVICE);
+               free_page((unsigned long)mr->pages);
                mr->pages = NULL;
        }
 }
@@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
        mr->npages = 0;
 
        ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
-                                  sizeof(u64) * mr->max_pages,
-                                  DMA_TO_DEVICE);
+                                  mr->page_map_size, DMA_TO_DEVICE);
 
        rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
 
        ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
-                                     sizeof(u64) * mr->max_pages,
-                                     DMA_TO_DEVICE);
+                                     mr->page_map_size, DMA_TO_DEVICE);
 
        return rc;
 }
index 81b0e1fbec1d96ddce580e4021f2560589cc22b2..8db8405c1e99e72a1e50206182bd17ad1219d47d 100644 (file)
@@ -362,7 +362,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
                        sizeof (struct mlx4_wqe_raddr_seg);
        case MLX4_IB_QPT_RC:
                return sizeof (struct mlx4_wqe_ctrl_seg) +
-                       sizeof (struct mlx4_wqe_atomic_seg) +
+                       sizeof (struct mlx4_wqe_masked_atomic_seg) +
                        sizeof (struct mlx4_wqe_raddr_seg);
        case MLX4_IB_QPT_SMI:
        case MLX4_IB_QPT_GSI:
@@ -1191,8 +1191,10 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
        {
                err = create_qp_common(to_mdev(pd->device), pd, init_attr,
                                       udata, 0, &qp, gfp);
-               if (err)
+               if (err) {
+                       kfree(qp);
                        return ERR_PTR(err);
+               }
 
                qp->ibqp.qp_num = qp->mqp.qpn;
                qp->xrcdn = xrcdn;
index dabcc65bd65e3b6a93adb47b9b53bbd67d08e15c..9c0e67bd2ba7326cf711e3eba0cf2e6bbd630d3e 100644 (file)
@@ -822,7 +822,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
        int eqn;
        int err;
 
-       if (entries < 0)
+       if (entries < 0 ||
+           (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
                return ERR_PTR(-EINVAL);
 
        if (check_cq_create_flags(attr->flags))
@@ -1168,11 +1169,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                return -ENOSYS;
        }
 
-       if (entries < 1)
+       if (entries < 1 ||
+           entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+               mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
+                            entries,
+                            1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
                return -EINVAL;
+       }
 
        entries = roundup_pow_of_two(entries + 1);
-       if (entries >  (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
+       if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
                return -EINVAL;
 
        if (entries == ibcq->cqe + 1)
index 1534af1130588f03b5be1f4fae333d4277a02486..364aab9f3c9e23a1963aa22c082b864fb0125914 100644 (file)
@@ -121,7 +121,7 @@ static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
        pma_cnt_ext->port_xmit_data =
                cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
                                         transmitted_ib_multicast.octets) >> 2);
-       pma_cnt_ext->port_xmit_data =
+       pma_cnt_ext->port_rcv_data =
                cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
                                         received_ib_multicast.octets) >> 2);
        pma_cnt_ext->port_xmit_packets =
index c72797cd9e4f199302ededac309b133d4ddd1b27..b48ad85315dc671da59f0453e8d1393a3c3d1a43 100644 (file)
@@ -524,6 +524,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
            MLX5_CAP_ETH(dev->mdev, scatter_fcs))
                props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
 
+       if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
+               props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
        props->vendor_part_id      = mdev->pdev->device;
        props->hw_ver              = mdev->pdev->revision;
 
@@ -915,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
        gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
        resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
-       resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+       if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+               resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
        resp.cache_line_size = L1_CACHE_BYTES;
        resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
        resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
@@ -988,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        if (field_avail(typeof(resp), cqe_version, udata->outlen))
                resp.response_length += sizeof(resp.cqe_version);
 
-       if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+       /*
+        * We don't want to expose information from the PCI bar that is located
+        * after 4096 bytes, so if the arch only supports larger pages, let's
+        * pretend we don't support reading the HCA's core clock. This is also
+        * forced by mmap function.
+        */
+       if (PAGE_SIZE <= 4096 &&
+           field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
                resp.comp_mask |=
                        MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
                resp.hca_core_clock_offset =
@@ -1798,7 +1809,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
 {
        struct mlx5_ib_dev *dev =
                container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-       return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+       return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
                       fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
 }
 
@@ -1866,14 +1877,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
                break;
 
        case MLX5_DEV_EVENT_PORT_DOWN:
+       case MLX5_DEV_EVENT_PORT_INITIALIZED:
                ibev.event = IB_EVENT_PORT_ERR;
                port = (u8)param;
                break;
 
-       case MLX5_DEV_EVENT_PORT_INITIALIZED:
-               /* not used by ULPs */
-               return;
-
        case MLX5_DEV_EVENT_LID_CHANGE:
                ibev.event = IB_EVENT_LID_CHANGE;
                port = (u8)param;
index 504117657d41ffccd9d65467b748bede40fb1054..ce0a7ab35a227c569deae7f6b322b4c84eb5dcec 100644 (file)
@@ -235,6 +235,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
                qp->rq.max_gs = 0;
                qp->rq.wqe_cnt = 0;
                qp->rq.wqe_shift = 0;
+               cap->max_recv_wr = 0;
+               cap->max_recv_sge = 0;
        } else {
                if (ucmd) {
                        qp->rq.wqe_cnt = ucmd->rq_wqe_count;
@@ -1851,13 +1853,15 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                         const struct ib_ah_attr *ah,
                         struct mlx5_qp_path *path, u8 port, int attr_mask,
-                        u32 path_flags, const struct ib_qp_attr *attr)
+                        u32 path_flags, const struct ib_qp_attr *attr,
+                        bool alt)
 {
        enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
        int err;
 
        if (attr_mask & IB_QP_PKEY_INDEX)
-               path->pkey_index = attr->pkey_index;
+               path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
+                                                    attr->pkey_index);
 
        if (ah->ah_flags & IB_AH_GRH) {
                if (ah->grh.sgid_index >=
@@ -1877,9 +1881,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                                                          ah->grh.sgid_index);
                path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
        } else {
-               path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
-               path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
-                                                                       0;
+               path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
+               path->fl_free_ar |=
+                       (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
                path->rlid = cpu_to_be16(ah->dlid);
                path->grh_mlid = ah->src_path_bits & 0x7f;
                if (ah->ah_flags & IB_AH_GRH)
@@ -1903,7 +1907,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        path->port = port;
 
        if (attr_mask & IB_QP_TIMEOUT)
-               path->ackto_lt = attr->timeout << 3;
+               path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
 
        if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
                return modify_raw_packet_eth_prio(dev->mdev,
@@ -2264,7 +2268,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
 
        if (attr_mask & IB_QP_PKEY_INDEX)
-               context->pri_path.pkey_index = attr->pkey_index;
+               context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
 
        /* todo implement counter_index functionality */
 
@@ -2277,7 +2281,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        if (attr_mask & IB_QP_AV) {
                err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
                                    attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
-                                   attr_mask, 0, attr);
+                                   attr_mask, 0, attr, false);
                if (err)
                        goto out;
        }
@@ -2288,7 +2292,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        if (attr_mask & IB_QP_ALT_PATH) {
                err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
                                    &context->alt_path,
-                                   attr->alt_port_num, attr_mask, 0, attr);
+                                   attr->alt_port_num,
+                                   attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
+                                   0, attr, true);
                if (err)
                        goto out;
        }
@@ -3326,10 +3332,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
                        return MLX5_FENCE_MODE_SMALL_AND_FENCE;
                else
                        return fence;
-
-       } else {
-               return 0;
+       } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
+               return MLX5_FENCE_MODE_FENCE;
        }
+
+       return 0;
 }
 
 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
@@ -4013,11 +4020,12 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
                to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
                to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
-               qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
+               qp_attr->alt_pkey_index =
+                       be16_to_cpu(context->alt_path.pkey_index);
                qp_attr->alt_port_num   = qp_attr->alt_ah_attr.port_num;
        }
 
-       qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
+       qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
        qp_attr->port_num = context->pri_path.port;
 
        /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
@@ -4079,17 +4087,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
        qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
 
        if (!ibqp->uobject) {
-               qp_attr->cap.max_send_wr  = qp->sq.wqe_cnt;
+               qp_attr->cap.max_send_wr  = qp->sq.max_post;
                qp_attr->cap.max_send_sge = qp->sq.max_gs;
+               qp_init_attr->qp_context = ibqp->qp_context;
        } else {
                qp_attr->cap.max_send_wr  = 0;
                qp_attr->cap.max_send_sge = 0;
        }
 
-       /* We don't support inline sends for kernel QPs (yet), and we
-        * don't know what userspace's value should be.
-        */
-       qp_attr->cap.max_inline_data = 0;
+       qp_init_attr->qp_type = ibqp->qp_type;
+       qp_init_attr->recv_cq = ibqp->recv_cq;
+       qp_init_attr->send_cq = ibqp->send_cq;
+       qp_init_attr->srq = ibqp->srq;
+       qp_attr->cap.max_inline_data = qp->max_inline_data;
 
        qp_init_attr->cap            = qp_attr->cap;
 
index ff946d5f59e4071606ebc00fff43df5a56dde2f6..382466a90da7331e4492fab59d60bf904db36df1 100644 (file)
@@ -2178,6 +2178,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
 
        switch (cmd.type) {
        case QIB_CMD_ASSIGN_CTXT:
+               if (rcd) {
+                       ret = -EINVAL;
+                       goto bail;
+               }
+
                ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
                if (ret)
                        goto bail;
index 7209fbc03ccb90a7360f403fe99303b58c7932ee..a0b6ebee4d8a047e2fdffb8bbd831e5c36107fec 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
 #include <linux/iommu.h>
 #include <linux/workqueue.h>
 #include <linux/list.h>
@@ -112,10 +111,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
        int i;
        int flags;
        dma_addr_t pa;
-       DEFINE_DMA_ATTRS(attrs);
-
-       if (dmasync)
-               dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
 
        if (!can_do_mlock())
                return -EPERM;
index 5fa4d4d81ee0cfcc5389897c904e343ff4903897..41ba7e9cadaab29895fec78c0284ddfed2558949 100644 (file)
@@ -369,8 +369,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
                        /* wrap to first map page, invert bit 0 */
                        offset = qpt->incr | ((offset & 1) ^ 1);
                }
-               /* there can be no bits at shift and below */
-               WARN_ON(offset & (rdi->dparms.qos_shift - 1));
+               /* there can be no set bits in low-order QoS bits */
+               WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
                qpn = mk_qpn(qpt, map, offset);
        }
 
@@ -502,6 +502,12 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
  */
 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
                  enum ib_qp_type type)
+       __releases(&qp->s_lock)
+       __releases(&qp->s_hlock)
+       __releases(&qp->r_lock)
+       __acquires(&qp->r_lock)
+       __acquires(&qp->s_hlock)
+       __acquires(&qp->s_lock)
 {
        if (qp->state != IB_QPS_RESET) {
                qp->state = IB_QPS_RESET;
@@ -570,12 +576,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
        qp->s_ssn = 1;
        qp->s_lsn = 0;
        qp->s_mig_state = IB_MIG_MIGRATED;
-       if (qp->s_ack_queue)
-               memset(
-                       qp->s_ack_queue,
-                       0,
-                       rvt_max_atomic(rdi) *
-                               sizeof(*qp->s_ack_queue));
        qp->r_head_ack_queue = 0;
        qp->s_tail_ack_queue = 0;
        qp->s_num_rd_atomic = 0;
@@ -699,8 +699,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
                 * initialization that is needed.
                 */
                priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
-               if (!priv)
+               if (IS_ERR(priv)) {
+                       ret = priv;
                        goto bail_qp;
+               }
                qp->priv = priv;
                qp->timeout_jiffies =
                        usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
index e1cc2cc42f2550e1a2113b8c471f4456b182dd14..30c4fda7a05a6552e5a26d5dc1e52af200959a17 100644 (file)
@@ -501,9 +501,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
                            !rdi->driver_f.quiesce_qp ||
                            !rdi->driver_f.notify_error_qp ||
                            !rdi->driver_f.mtu_from_qp ||
-                           !rdi->driver_f.mtu_to_path_mtu ||
-                           !rdi->driver_f.shut_down_port ||
-                           !rdi->driver_f.cap_mask_chg)
+                           !rdi->driver_f.mtu_to_path_mtu)
                                return -EINVAL;
                break;
 
index bab7db6fa9abf8c045b9e1b7e7a55f8996bf3745..4f7d9b48df643c7ef5f69b80f870a0f15c6efe87 100644 (file)
@@ -94,6 +94,7 @@ enum {
        IPOIB_NEIGH_TBL_FLUSH     = 12,
        IPOIB_FLAG_DEV_ADDR_SET   = 13,
        IPOIB_FLAG_DEV_ADDR_CTRL  = 14,
+       IPOIB_FLAG_GOING_DOWN     = 15,
 
        IPOIB_MAX_BACKOFF_SECONDS = 16,
 
index b2f42835d76d51cfed64266d6f884f46de3a11d1..951d9abcca8b283e652368c7c9c96f1fa25f3df4 100644 (file)
@@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
 {
        struct net_device *dev = to_net_dev(d);
        int ret;
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+       if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
+               return -EPERM;
 
        if (!rtnl_trylock())
                return restart_syscall();
index 45c40a17d6a6ffc60cac0cab92bc24c689f75c83..dc6d241b9406e9661394c7e985fda4b0eacbfa6b 100644 (file)
@@ -1015,7 +1015,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
        if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
                return false;
 
-       netif_addr_lock(priv->dev);
+       netif_addr_lock_bh(priv->dev);
 
        /* The subnet prefix may have changed, update it now so we won't have
         * to do it later
@@ -1026,12 +1026,12 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
 
        search_gid.global.interface_id = priv->local_gid.global.interface_id;
 
-       netif_addr_unlock(priv->dev);
+       netif_addr_unlock_bh(priv->dev);
 
        err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
                          priv->dev, &port, &index);
 
-       netif_addr_lock(priv->dev);
+       netif_addr_lock_bh(priv->dev);
 
        if (search_gid.global.interface_id !=
            priv->local_gid.global.interface_id)
@@ -1092,7 +1092,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
        }
 
 out:
-       netif_addr_unlock(priv->dev);
+       netif_addr_unlock_bh(priv->dev);
 
        return ret;
 }
index 2d7c1634664826a83f1f4efe42b1004c3b23d09f..5f58c41ef787d22692920855f397c1bbcb079e6e 100644 (file)
@@ -1206,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
                                neigh = NULL;
                                goto out_unlock;
                        }
-                       neigh->alive = jiffies;
+
+                       if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+                               neigh->alive = jiffies;
                        goto out_unlock;
                }
        }
@@ -1851,7 +1853,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
        struct ipoib_dev_priv *child_priv;
        struct net_device *netdev = priv->dev;
 
-       netif_addr_lock(netdev);
+       netif_addr_lock_bh(netdev);
 
        memcpy(&priv->local_gid.global.interface_id,
               &gid->global.interface_id,
@@ -1859,7 +1861,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
        memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
        clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
 
-       netif_addr_unlock(netdev);
+       netif_addr_unlock_bh(netdev);
 
        if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
                down_read(&priv->vlan_rwsem);
@@ -1875,7 +1877,7 @@ static int ipoib_check_lladdr(struct net_device *dev,
        union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
        int ret = 0;
 
-       netif_addr_lock(dev);
+       netif_addr_lock_bh(dev);
 
        /* Make sure the QPN, reserved and subnet prefix match the current
         * lladdr, it also makes sure the lladdr is unicast.
@@ -1885,7 +1887,7 @@ static int ipoib_check_lladdr(struct net_device *dev,
            gid->global.interface_id == 0)
                ret = -EINVAL;
 
-       netif_addr_unlock(dev);
+       netif_addr_unlock_bh(dev);
 
        return ret;
 }
@@ -2141,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
                ib_unregister_event_handler(&priv->event_handler);
                flush_workqueue(ipoib_workqueue);
 
+               /* mark interface in the middle of destruction */
+               set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
+
                rtnl_lock();
                dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
                rtnl_unlock();
index 82fbc9442608f6e10fb0b851a08baf32cfacc5c8..d3394b6add24a0303dd51710d72f86087a36c7fe 100644 (file)
@@ -582,13 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
                return;
        }
        priv->local_lid = port_attr.lid;
-       netif_addr_lock(dev);
+       netif_addr_lock_bh(dev);
 
        if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
-               netif_addr_unlock(dev);
+               netif_addr_unlock_bh(dev);
                return;
        }
-       netif_addr_unlock(dev);
+       netif_addr_unlock_bh(dev);
 
        spin_lock_irq(&priv->lock);
        if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
index 64a35595eab83783ade2451f928832548fcb7271..a2f9f29c6ab589e746170ac6463dd6c7781cd20e 100644 (file)
@@ -131,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
 
        ppriv = netdev_priv(pdev);
 
+       if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+               return -EPERM;
+
        snprintf(intf_name, sizeof intf_name, "%s.%04x",
                 ppriv->dev->name, pkey);
        priv = ipoib_intf_alloc(intf_name);
@@ -183,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
 
        ppriv = netdev_priv(pdev);
 
+       if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+               return -EPERM;
+
        if (!rtnl_trylock())
                return restart_syscall();
 
index 646de170ec12e44a9eb20661fa4614e54dd0afe2..3322ed750172ea78f2f57cc1ad08f6f9946eca63 100644 (file)
@@ -1457,7 +1457,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
 {
        unsigned int sg_offset = 0;
 
-       state->desc = req->indirect_desc;
        state->fr.next = req->fr_list;
        state->fr.end = req->fr_list + ch->target->mr_per_cmd;
        state->sg = scat;
@@ -1489,7 +1488,6 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
        struct scatterlist *sg;
        int i;
 
-       state->desc = req->indirect_desc;
        for_each_sg(scat, sg, count, i) {
                srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
                             ib_sg_dma_len(dev->dev, sg),
@@ -1655,6 +1653,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
                                   target->indirect_size, DMA_TO_DEVICE);
 
        memset(&state, 0, sizeof(state));
+       state.desc = req->indirect_desc;
        if (dev->use_fast_reg)
                ret = srp_map_sg_fr(&state, ch, req, scat, count);
        else if (dev->use_fmr)
@@ -3526,7 +3525,7 @@ static void srp_add_one(struct ib_device *device)
        int mr_page_shift, p;
        u64 max_pages_per_mr;
 
-       srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
+       srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
        if (!srp_dev)
                return;
 
@@ -3586,8 +3585,6 @@ static void srp_add_one(struct ib_device *device)
                                                   IB_ACCESS_REMOTE_WRITE);
                if (IS_ERR(srp_dev->global_mr))
                        goto err_pd;
-       } else {
-               srp_dev->global_mr = NULL;
        }
 
        for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
index e68b20cba70b344d7b33512a6d9c868fb3d2ff28..4a4155640d516252fba99c5728bb794e005218cf 100644 (file)
@@ -1638,8 +1638,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
         */
        qp_init->cap.max_send_wr = srp_sq_size / 2;
        qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
-       qp_init->cap.max_send_sge = max(sdev->device->attrs.max_sge_rd,
-                                       sdev->device->attrs.max_sge);
+       qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
        qp_init->port_num = ch->sport->port;
 
        ch->qp = ib_create_qp(sdev->pd, qp_init);
index fee6bfd7ca218f138e38f58318bbc56f12402f5b..389030487da7eecb521342e8a83859b39b3523a6 100644 (file)
@@ -106,6 +106,7 @@ enum {
        SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
 
        SRPT_DEF_SG_TABLESIZE = 128,
+       SRPT_DEF_SG_PER_WQE = 16,
 
        MIN_SRPT_SQ_SIZE = 16,
        DEF_SRPT_SQ_SIZE = 4096,
index 94b68213c50dae945b9c0789afe8b6465dfd5411..5f6b3bcab0782be2d29d465390b1b4bd179bf433 100644 (file)
@@ -1941,6 +1941,7 @@ static struct iommu_ops arm_smmu_ops = {
        .attach_dev             = arm_smmu_attach_dev,
        .map                    = arm_smmu_map,
        .unmap                  = arm_smmu_unmap,
+       .map_sg                 = default_iommu_map_sg,
        .iova_to_phys           = arm_smmu_iova_to_phys,
        .add_device             = arm_smmu_add_device,
        .remove_device          = arm_smmu_remove_device,
index a644d0cec2d8275d202fd3d871673b7787dd258c..10700945994eed2e470c92d267c0bb5723ed0d1a 100644 (file)
@@ -3222,11 +3222,6 @@ static int __init init_dmars(void)
                        }
                }
 
-               iommu_flush_write_buffer(iommu);
-               iommu_set_root_entry(iommu);
-               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
-               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-
                if (!ecap_pass_through(iommu->ecap))
                        hw_pass_through = 0;
 #ifdef CONFIG_INTEL_IOMMU_SVM
@@ -3235,6 +3230,18 @@ static int __init init_dmars(void)
 #endif
        }
 
+       /*
+        * Now that qi is enabled on all iommus, set the root entry and flush
+        * caches. This is required on some Intel X58 chipsets, otherwise the
+        * flush_context function will loop forever and the boot hangs.
+        */
+       for_each_active_iommu(iommu, drhd) {
+               iommu_flush_write_buffer(iommu);
+               iommu_set_root_entry(iommu);
+               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+       }
+
        if (iommu_pass_through)
                iommu_identity_mapping |= IDENTMAP_ALL;
 
index c7d6156ff5360873b1394c746ba62b05d06113f5..25b4627cb57fa18e66d32548f18d8d22669a5bcb 100644 (file)
@@ -815,7 +815,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
        dte_addr = virt_to_phys(rk_domain->dt);
        for (i = 0; i < iommu->num_mmu; i++) {
                rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
-               rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+               rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
                rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
        }
 
index 3b5e10aa48ab48443210edde5c0fc4350b54d294..8a4adbeb2b8cd694a08e18bfb3d2df8d31b7e937 100644 (file)
@@ -746,6 +746,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
                /* verify that it doesn't conflict with an IPI irq */
                if (test_bit(spec->hwirq, ipi_resrv))
                        return -EBUSY;
+
+               hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
+
+               return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+                                                    &gic_level_irq_controller,
+                                                    NULL);
        } else {
                base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
                if (base_hwirq == gic_shared_intrs) {
@@ -867,10 +873,14 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
                                                    &gic_level_irq_controller,
                                                    NULL);
                if (ret)
-                       return ret;
+                       goto error;
        }
 
        return 0;
+
+error:
+       irq_domain_free_irqs_parent(d, virq, nr_irqs);
+       return ret;
 }
 
 void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
index 3495d5d6547f65a6fe507f063dce397bc424496f..3bce44893021365a999724a9aff0cf17d462a95f 100644 (file)
@@ -53,11 +53,12 @@ static void led_timer_function(unsigned long data)
 
        if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
                led_set_brightness_nosleep(led_cdev, LED_OFF);
+               led_cdev->flags &= ~LED_BLINK_SW;
                return;
        }
 
        if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
-               led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+               led_cdev->flags &=  ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW);
                return;
        }
 
@@ -151,6 +152,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
                return;
        }
 
+       led_cdev->flags |= LED_BLINK_SW;
        mod_timer(&led_cdev->blink_timer, jiffies + 1);
 }
 
@@ -219,6 +221,7 @@ void led_stop_software_blink(struct led_classdev *led_cdev)
        del_timer_sync(&led_cdev->blink_timer);
        led_cdev->blink_delay_on = 0;
        led_cdev->blink_delay_off = 0;
+       led_cdev->flags &= ~LED_BLINK_SW;
 }
 EXPORT_SYMBOL_GPL(led_stop_software_blink);
 
@@ -226,10 +229,10 @@ void led_set_brightness(struct led_classdev *led_cdev,
                        enum led_brightness brightness)
 {
        /*
-        * In case blinking is on delay brightness setting
+        * If software blink is active, delay brightness setting
         * until the next timer tick.
         */
-       if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
+       if (led_cdev->flags & LED_BLINK_SW) {
                /*
                 * If we need to disable soft blinking delegate this to the
                 * work queue task to avoid problems in case we are called
index 410c39c62dc7f1c3bdfa3b5dd305bbef031aef25..c9f386213e9ef16ae006e2270e2615e4bb60253e 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/leds.h>
 #include <linux/reboot.h>
+#include <linux/suspend.h>
 #include "../leds.h"
 
 static int panic_heartbeats;
@@ -154,6 +155,30 @@ static struct led_trigger heartbeat_led_trigger = {
        .deactivate = heartbeat_trig_deactivate,
 };
 
+static int heartbeat_pm_notifier(struct notifier_block *nb,
+                                unsigned long pm_event, void *unused)
+{
+       int rc;
+
+       switch (pm_event) {
+       case PM_SUSPEND_PREPARE:
+       case PM_HIBERNATION_PREPARE:
+       case PM_RESTORE_PREPARE:
+               led_trigger_unregister(&heartbeat_led_trigger);
+               break;
+       case PM_POST_SUSPEND:
+       case PM_POST_HIBERNATION:
+       case PM_POST_RESTORE:
+               rc = led_trigger_register(&heartbeat_led_trigger);
+               if (rc)
+                       pr_err("could not re-register heartbeat trigger\n");
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
 static int heartbeat_reboot_notifier(struct notifier_block *nb,
                                     unsigned long code, void *unused)
 {
@@ -168,6 +193,10 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
+static struct notifier_block heartbeat_pm_nb = {
+       .notifier_call = heartbeat_pm_notifier,
+};
+
 static struct notifier_block heartbeat_reboot_nb = {
        .notifier_call = heartbeat_reboot_notifier,
 };
@@ -184,12 +213,14 @@ static int __init heartbeat_trig_init(void)
                atomic_notifier_chain_register(&panic_notifier_list,
                                               &heartbeat_panic_nb);
                register_reboot_notifier(&heartbeat_reboot_nb);
+               register_pm_notifier(&heartbeat_pm_nb);
        }
        return rc;
 }
 
 static void __exit heartbeat_trig_exit(void)
 {
+       unregister_pm_notifier(&heartbeat_pm_nb);
        unregister_reboot_notifier(&heartbeat_reboot_nb);
        atomic_notifier_chain_unregister(&panic_notifier_list,
                                         &heartbeat_panic_nb);
index b73c6e7d28e4b9b0f337355aa75ce09d56c7e8f7..6f2c8522e14aaf74e0ed7eef7a1cdf60ee335b9b 100644 (file)
@@ -61,21 +61,36 @@ static int mcb_probe(struct device *dev)
        struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
        struct mcb_device *mdev = to_mcb_device(dev);
        const struct mcb_device_id *found_id;
+       struct module *carrier_mod;
+       int ret;
 
        found_id = mcb_match_id(mdrv->id_table, mdev);
        if (!found_id)
                return -ENODEV;
 
-       return mdrv->probe(mdev, found_id);
+       carrier_mod = mdev->dev.parent->driver->owner;
+       if (!try_module_get(carrier_mod))
+               return -EINVAL;
+
+       get_device(dev);
+       ret = mdrv->probe(mdev, found_id);
+       if (ret)
+               module_put(carrier_mod);
+
+       return ret;
 }
 
 static int mcb_remove(struct device *dev)
 {
        struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
        struct mcb_device *mdev = to_mcb_device(dev);
+       struct module *carrier_mod;
 
        mdrv->remove(mdev);
 
+       carrier_mod = mdev->dev.parent->driver->owner;
+       module_put(carrier_mod);
+
        put_device(&mdev->dev);
 
        return 0;
index d7723ce772b3af6b43804a4996656f9b7b58bc34..c04bc6afb965e59025169333713e0e9d46ddba2c 100644 (file)
@@ -1274,8 +1274,6 @@ struct uvc_xu_control_mapping32 {
 static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
                        const struct uvc_xu_control_mapping32 __user *up)
 {
-       struct uvc_menu_info __user *umenus;
-       struct uvc_menu_info __user *kmenus;
        compat_caddr_t p;
 
        if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
@@ -1292,17 +1290,7 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
 
        if (__get_user(p, &up->menu_info))
                return -EFAULT;
-       umenus = compat_ptr(p);
-       if (!access_ok(VERIFY_READ, umenus, kp->menu_count * sizeof(*umenus)))
-               return -EFAULT;
-
-       kmenus = compat_alloc_user_space(kp->menu_count * sizeof(*kmenus));
-       if (kmenus == NULL)
-               return -EFAULT;
-       kp->menu_info = kmenus;
-
-       if (copy_in_user(kmenus, umenus, kp->menu_count * sizeof(*umenus)))
-               return -EFAULT;
+       kp->menu_info = compat_ptr(p);
 
        return 0;
 }
@@ -1310,10 +1298,6 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
 static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
                        struct uvc_xu_control_mapping32 __user *up)
 {
-       struct uvc_menu_info __user *umenus;
-       struct uvc_menu_info __user *kmenus = kp->menu_info;
-       compat_caddr_t p;
-
        if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
            __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
            __put_user(kp->menu_count, &up->menu_count))
@@ -1322,16 +1306,6 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
        if (__clear_user(up->reserved, sizeof(up->reserved)))
                return -EFAULT;
 
-       if (kp->menu_count == 0)
-               return 0;
-
-       if (get_user(p, &up->menu_info))
-               return -EFAULT;
-       umenus = compat_ptr(p);
-
-       if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus)))
-               return -EFAULT;
-
        return 0;
 }
 
@@ -1346,8 +1320,6 @@ struct uvc_xu_control_query32 {
 static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
                        const struct uvc_xu_control_query32 __user *up)
 {
-       u8 __user *udata;
-       u8 __user *kdata;
        compat_caddr_t p;
 
        if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
@@ -1361,17 +1333,7 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
 
        if (__get_user(p, &up->data))
                return -EFAULT;
-       udata = compat_ptr(p);
-       if (!access_ok(VERIFY_READ, udata, kp->size))
-               return -EFAULT;
-
-       kdata = compat_alloc_user_space(kp->size);
-       if (kdata == NULL)
-               return -EFAULT;
-       kp->data = kdata;
-
-       if (copy_in_user(kdata, udata, kp->size))
-               return -EFAULT;
+       kp->data = compat_ptr(p);
 
        return 0;
 }
@@ -1379,26 +1341,10 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
 static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
                        struct uvc_xu_control_query32 __user *up)
 {
-       u8 __user *udata;
-       u8 __user *kdata = kp->data;
-       compat_caddr_t p;
-
        if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
            __copy_to_user(up, kp, offsetof(typeof(*up), data)))
                return -EFAULT;
 
-       if (kp->size == 0)
-               return 0;
-
-       if (get_user(p, &up->data))
-               return -EFAULT;
-       udata = compat_ptr(p);
-       if (!access_ok(VERIFY_READ, udata, kp->size))
-               return -EFAULT;
-
-       if (copy_in_user(udata, kdata, kp->size))
-               return -EFAULT;
-
        return 0;
 }
 
@@ -1408,47 +1354,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
 static long uvc_v4l2_compat_ioctl32(struct file *file,
                     unsigned int cmd, unsigned long arg)
 {
+       struct uvc_fh *handle = file->private_data;
        union {
                struct uvc_xu_control_mapping xmap;
                struct uvc_xu_control_query xqry;
        } karg;
        void __user *up = compat_ptr(arg);
-       mm_segment_t old_fs;
        long ret;
 
        switch (cmd) {
        case UVCIOC_CTRL_MAP32:
-               cmd = UVCIOC_CTRL_MAP;
                ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
+               if (ret)
+                       return ret;
+               ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
+               if (ret)
+                       return ret;
+               ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
+               if (ret)
+                       return ret;
+
                break;
 
        case UVCIOC_CTRL_QUERY32:
-               cmd = UVCIOC_CTRL_QUERY;
                ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
+               if (ret)
+                       return ret;
+               ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
+               if (ret)
+                       return ret;
+               ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
+               if (ret)
+                       return ret;
                break;
 
        default:
                return -ENOIOCTLCMD;
        }
 
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       ret = video_ioctl2(file, cmd, (unsigned long)&karg);
-       set_fs(old_fs);
-
-       if (ret < 0)
-               return ret;
-
-       switch (cmd) {
-       case UVCIOC_CTRL_MAP:
-               ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
-               break;
-
-       case UVCIOC_CTRL_QUERY:
-               ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
-               break;
-       }
-
        return ret;
 }
 #endif
index ca94bded33864377f585099b2024f0c7477c8424..8bef4331bd511261acacf1b58acbeb56749a90ff 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Media Controller ancillary functions
  *
- * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
  * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com>
  * Copyright (C) 2006-2010 Nokia Corporation
  * Copyright (c) 2016 Intel Corporation.
index af4884ba6b7cafa6fe5b2eeda1bee00e59fdd2a3..15508df24e5d36fe1e6c7337de88a3fe9dd66797 100644 (file)
@@ -398,7 +398,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
        gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
                           GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
        gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
-                          GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
+                          GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
        gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
                           GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
                           p->cycle2cyclesamecsen);
index eed254da63a8c79ebb95ff711aef5b4e8926f0d9..641c1a5666879b9715f5ee0924db447411a2e653 100644 (file)
@@ -730,7 +730,7 @@ static void mei_cl_wake_all(struct mei_cl *cl)
        /* synchronized under device mutex */
        if (waitqueue_active(&cl->wait)) {
                cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
-               wake_up_interruptible(&cl->wait);
+               wake_up(&cl->wait);
        }
 }
 
index 16baeb51b2bdfe2b37d5179b9816ba6493622e99..ef3618299494f4973fd528ae72325490eb622b2c 100644 (file)
@@ -1147,11 +1147,17 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
  */
 static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
 {
-       struct kstat stat;
        int err, minor;
+       struct path path;
+       struct kstat stat;
 
        /* Probably this is an MTD character device node path */
-       err = vfs_stat(mtd_dev, &stat);
+       err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
+       if (err)
+               return ERR_PTR(err);
+
+       err = vfs_getattr(&path, &stat);
+       path_put(&path);
        if (err)
                return ERR_PTR(err);
 
@@ -1160,6 +1166,7 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
                return ERR_PTR(-EINVAL);
 
        minor = MINOR(stat.rdev);
+
        if (minor & 1)
                /*
                 * Just do not think the "/dev/mtdrX" devices support is need,
index 5780dd1ba79de96f21d4f08f7c12c5426b0d1c3b..ebf517271d2925452d911eb8df9a9d973b05926c 100644 (file)
@@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
        int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
        struct ubi_volume *vol = ubi->volumes[idx];
        struct ubi_vid_hdr *vid_hdr;
+       uint32_t crc;
 
        vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
        if (!vid_hdr)
@@ -599,14 +600,8 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
                goto out_put;
        }
 
-       vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
-       err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
-       if (err) {
-               up_read(&ubi->fm_eba_sem);
-               goto write_error;
-       }
+       ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
 
-       data_size = offset + len;
        mutex_lock(&ubi->buf_mutex);
        memset(ubi->peb_buf + offset, 0xFF, len);
 
@@ -621,6 +616,19 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
 
        memcpy(ubi->peb_buf + offset, buf, len);
 
+       data_size = offset + len;
+       crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
+       vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+       vid_hdr->copy_flag = 1;
+       vid_hdr->data_size = cpu_to_be32(data_size);
+       vid_hdr->data_crc = cpu_to_be32(crc);
+       err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
+       if (err) {
+               mutex_unlock(&ubi->buf_mutex);
+               up_read(&ubi->fm_eba_sem);
+               goto write_error;
+       }
+
        err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
        if (err) {
                mutex_unlock(&ubi->buf_mutex);
index 348dbbcbedc8fc3e3c93f9257cf1c4ccb7abddf8..a9e2cef7c95c24c0deb4d6500d7da7f00424db7b 100644 (file)
@@ -302,6 +302,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
 struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
 {
        int error, ubi_num, vol_id;
+       struct path path;
        struct kstat stat;
 
        dbg_gen("open volume %s, mode %d", pathname, mode);
@@ -309,7 +310,12 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
        if (!pathname || !*pathname)
                return ERR_PTR(-EINVAL);
 
-       error = vfs_stat(pathname, &stat);
+       error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+       if (error)
+               return ERR_PTR(error);
+
+       error = vfs_getattr(&path, &stat);
+       path_put(&path);
        if (error)
                return ERR_PTR(error);
 
index c5fe915870ad4184dbf1c909f8d8714a2008e9c5..a59d55e25d5f02cbc69d903daaa577e21bafc10f 100644 (file)
@@ -12895,52 +12895,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
        return rc;
 }
 
-int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
 {
        struct bnx2x_vlan_entry *vlan;
        int rc = 0;
 
-       if (!bp->vlan_cnt) {
-               DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
-               return 0;
-       }
-
+       /* Configure all non-configured entries */
        list_for_each_entry(vlan, &bp->vlan_reg, link) {
-               /* Prepare for cleanup in case of errors */
-               if (rc) {
-                       vlan->hw = false;
-                       continue;
-               }
-
-               if (!vlan->hw)
+               if (vlan->hw)
                        continue;
 
-               DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+               if (bp->vlan_cnt >= bp->vlan_credit)
+                       return -ENOBUFS;
 
                rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
                if (rc) {
-                       BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
-                       vlan->hw = false;
-                       rc = -EINVAL;
-                       continue;
+                       BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
+                       return rc;
                }
+
+               DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
+               vlan->hw = true;
+               bp->vlan_cnt++;
        }
 
-       return rc;
+       return 0;
+}
+
+static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+{
+       bool need_accept_any_vlan;
+
+       need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
+
+       if (bp->accept_any_vlan != need_accept_any_vlan) {
+               bp->accept_any_vlan = need_accept_any_vlan;
+               DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
+                  bp->accept_any_vlan ? "raised" : "cleared");
+               if (set_rx_mode) {
+                       if (IS_PF(bp))
+                               bnx2x_set_rx_mode_inner(bp);
+                       else
+                               bnx2x_vfpf_storm_rx_mode(bp);
+               }
+       }
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+       struct bnx2x_vlan_entry *vlan;
+
+       /* The hw forgot all entries after reload */
+       list_for_each_entry(vlan, &bp->vlan_reg, link)
+               vlan->hw = false;
+       bp->vlan_cnt = 0;
+
+       /* Don't set rx mode here. Our caller will do it. */
+       bnx2x_vlan_configure(bp, false);
+
+       return 0;
 }
 
 static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
        struct bnx2x *bp = netdev_priv(dev);
        struct bnx2x_vlan_entry *vlan;
-       bool hw = false;
-       int rc = 0;
-
-       if (!netif_running(bp->dev)) {
-               DP(NETIF_MSG_IFUP,
-                  "Ignoring VLAN configuration the interface is down\n");
-               return -EFAULT;
-       }
 
        DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
 
@@ -12948,93 +12967,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        if (!vlan)
                return -ENOMEM;
 
-       bp->vlan_cnt++;
-       if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
-               DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
-               bp->accept_any_vlan = true;
-               if (IS_PF(bp))
-                       bnx2x_set_rx_mode_inner(bp);
-               else
-                       bnx2x_vfpf_storm_rx_mode(bp);
-       } else if (bp->vlan_cnt <= bp->vlan_credit) {
-               rc = __bnx2x_vlan_configure_vid(bp, vid, true);
-               hw = true;
-       }
-
        vlan->vid = vid;
-       vlan->hw = hw;
+       vlan->hw = false;
+       list_add_tail(&vlan->link, &bp->vlan_reg);
 
-       if (!rc) {
-               list_add(&vlan->link, &bp->vlan_reg);
-       } else {
-               bp->vlan_cnt--;
-               kfree(vlan);
-       }
-
-       DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+       if (netif_running(dev))
+               bnx2x_vlan_configure(bp, true);
 
-       return rc;
+       return 0;
 }
 
 static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
        struct bnx2x *bp = netdev_priv(dev);
        struct bnx2x_vlan_entry *vlan;
+       bool found = false;
        int rc = 0;
 
-       if (!netif_running(bp->dev)) {
-               DP(NETIF_MSG_IFUP,
-                  "Ignoring VLAN configuration the interface is down\n");
-               return -EFAULT;
-       }
-
        DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
 
-       if (!bp->vlan_cnt) {
-               BNX2X_ERR("Unable to kill VLAN %d\n", vid);
-               return -EINVAL;
-       }
-
        list_for_each_entry(vlan, &bp->vlan_reg, link)
-               if (vlan->vid == vid)
+               if (vlan->vid == vid) {
+                       found = true;
                        break;
+               }
 
-       if (vlan->vid != vid) {
+       if (!found) {
                BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
                return -EINVAL;
        }
 
-       if (vlan->hw)
+       if (netif_running(dev) && vlan->hw) {
                rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+               DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
+               bp->vlan_cnt--;
+       }
 
        list_del(&vlan->link);
        kfree(vlan);
 
-       bp->vlan_cnt--;
-
-       if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
-               /* Configure all non-configured entries */
-               list_for_each_entry(vlan, &bp->vlan_reg, link) {
-                       if (vlan->hw)
-                               continue;
-
-                       rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
-                       if (rc) {
-                               BNX2X_ERR("Unable to config VLAN %d\n",
-                                         vlan->vid);
-                               continue;
-                       }
-                       DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
-                          vlan->vid);
-                       vlan->hw = true;
-               }
-               DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
-               bp->accept_any_vlan = false;
-               if (IS_PF(bp))
-                       bnx2x_set_rx_mode_inner(bp);
-               else
-                       bnx2x_vfpf_storm_rx_mode(bp);
-       }
+       if (netif_running(dev))
+               bnx2x_vlan_configure(bp, true);
 
        DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
 
index 72a2efff8e494c7f077b2ee1311bdd54e857ca03..c777cde85ce416d98f3959178c193c0cca4b6c1c 100644 (file)
@@ -286,7 +286,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
                txr->tx_prod = prod;
 
+               tx_buf->is_push = 1;
                netdev_tx_sent_queue(txq, skb->len);
+               wmb();  /* Sync is_push and byte queue before pushing data */
 
                push_len = (length + sizeof(*tx_push) + 7) / 8;
                if (push_len > 16) {
@@ -298,7 +300,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                         push_len);
                }
 
-               tx_buf->is_push = 1;
                goto tx_done;
        }
 
@@ -1112,19 +1113,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
        if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
                skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
 
-       if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
-               netdev_features_t features = skb->dev->features;
+       if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
+           (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
                u16 vlan_proto = tpa_info->metadata >>
                        RX_CMP_FLAGS2_METADATA_TPID_SFT;
+               u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
 
-               if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
-                    vlan_proto == ETH_P_8021Q) ||
-                   ((features & NETIF_F_HW_VLAN_STAG_RX) &&
-                    vlan_proto == ETH_P_8021AD)) {
-                       __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
-                                              tpa_info->metadata &
-                                              RX_CMP_FLAGS2_METADATA_VID_MASK);
-               }
+               __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
        }
 
        skb_checksum_none_assert(skb);
@@ -1277,19 +1272,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 
        skb->protocol = eth_type_trans(skb, dev);
 
-       if (rxcmp1->rx_cmp_flags2 &
-           cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
-               netdev_features_t features = skb->dev->features;
+       if ((rxcmp1->rx_cmp_flags2 &
+            cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+           (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
                u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+               u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
                u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
 
-               if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
-                    vlan_proto == ETH_P_8021Q) ||
-                   ((features & NETIF_F_HW_VLAN_STAG_RX) &&
-                    vlan_proto == ETH_P_8021AD))
-                       __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
-                                              meta_data &
-                                              RX_CMP_FLAGS2_METADATA_VID_MASK);
+               __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
        }
 
        skb_checksum_none_assert(skb);
@@ -5466,6 +5456,20 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
 
        if (!bnxt_rfs_capable(bp))
                features &= ~NETIF_F_NTUPLE;
+
+       /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+        * turned on or off together.
+        */
+       if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
+           (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+               if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+                       features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+                                     NETIF_F_HW_VLAN_STAG_RX);
+               else
+                       features |= NETIF_F_HW_VLAN_CTAG_RX |
+                                   NETIF_F_HW_VLAN_STAG_RX;
+       }
+
        return features;
 }
 
index a2cdfc1261dc77092049575f5519d7160a1c8f29..50812a1d67bdf8aab1ca8cb5fe560c7c5e2cb778 100644 (file)
@@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
        CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
        CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+       CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
        CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
        CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
        CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
index 41b01064510098c00a66d86bb49cf2f3fe02f9c3..4edb98c3c6c70a9c620caa7928e8e5d555db1242 100644 (file)
@@ -1195,7 +1195,7 @@ static int ethoc_probe(struct platform_device *pdev)
        priv->mdio = mdiobus_alloc();
        if (!priv->mdio) {
                ret = -ENOMEM;
-               goto free;
+               goto free2;
        }
 
        priv->mdio->name = "ethoc-mdio";
@@ -1208,7 +1208,7 @@ static int ethoc_probe(struct platform_device *pdev)
        ret = mdiobus_register(priv->mdio);
        if (ret) {
                dev_err(&netdev->dev, "failed to register MDIO bus\n");
-               goto free;
+               goto free2;
        }
 
        ret = ethoc_mdio_probe(netdev);
@@ -1241,9 +1241,10 @@ static int ethoc_probe(struct platform_device *pdev)
 error:
        mdiobus_unregister(priv->mdio);
        mdiobus_free(priv->mdio);
-free:
+free2:
        if (priv->clk)
                clk_disable_unprepare(priv->clk);
+free:
        free_netdev(netdev);
 out:
        return ret;
index 3c0255e98535f3c43948ae0b7e116643e2e39287..fea0f330ddbdeb2a5cf99b81f30e46919d066f62 100644 (file)
@@ -2416,24 +2416,24 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
                return -EOPNOTSUPP;
 
        if (ec->rx_max_coalesced_frames > 255) {
-               pr_err("Rx coalesced frames exceed hardware limiation");
+               pr_err("Rx coalesced frames exceed hardware limitation\n");
                return -EINVAL;
        }
 
        if (ec->tx_max_coalesced_frames > 255) {
-               pr_err("Tx coalesced frame exceed hardware limiation");
+               pr_err("Tx coalesced frame exceed hardware limitation\n");
                return -EINVAL;
        }
 
        cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
        if (cycle > 0xFFFF) {
-               pr_err("Rx coalesed usec exceeed hardware limiation");
+               pr_err("Rx coalesced usec exceed hardware limitation\n");
                return -EINVAL;
        }
 
        cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
        if (cycle > 0xFFFF) {
-               pr_err("Rx coalesed usec exceeed hardware limiation");
+               pr_err("Rx coalesced usec exceed hardware limitation\n");
                return -EINVAL;
        }
 
index 7615e0668acbf9b223b2087ed89646146dc6e827..2e6785b6e8bee1d971d739e160abe1ce17da9ca0 100644 (file)
@@ -2440,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                                 tx_queue->tx_ring_size);
 
        if (likely(!nr_frags)) {
-               lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+               if (likely(!do_tstamp))
+                       lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
        } else {
                u32 lstatus_start = lstatus;
 
index c984462fad2a26fa267faab7707181a19c1d1814..4763252bbf8557ac98c73876e685d4b7413e3f43 100644 (file)
@@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
 static void mtk_phy_link_adjust(struct net_device *dev)
 {
        struct mtk_mac *mac = netdev_priv(dev);
+       u16 lcl_adv = 0, rmt_adv = 0;
+       u8 flowctrl;
        u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
                  MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
                  MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
@@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev)
        if (mac->phy_dev->link)
                mcr |= MAC_MCR_FORCE_LINK;
 
-       if (mac->phy_dev->duplex)
+       if (mac->phy_dev->duplex) {
                mcr |= MAC_MCR_FORCE_DPX;
 
-       if (mac->phy_dev->pause)
-               mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+               if (mac->phy_dev->pause)
+                       rmt_adv = LPA_PAUSE_CAP;
+               if (mac->phy_dev->asym_pause)
+                       rmt_adv |= LPA_PAUSE_ASYM;
+
+               if (mac->phy_dev->advertising & ADVERTISED_Pause)
+                       lcl_adv |= ADVERTISE_PAUSE_CAP;
+               if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+                       lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+               flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+               if (flowctrl & FLOW_CTRL_TX)
+                       mcr |= MAC_MCR_FORCE_TX_FC;
+               if (flowctrl & FLOW_CTRL_RX)
+                       mcr |= MAC_MCR_FORCE_RX_FC;
+
+               netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
+                         flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
+                         flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+       }
 
        mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
 
@@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
        u32 val, ge_mode;
 
        np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+       if (!np && of_phy_is_fixed_link(mac->of_node))
+               if (!of_phy_register_fixed_link(mac->of_node))
+                       np = of_node_get(mac->of_node);
        if (!np)
                return -ENODEV;
 
        switch (of_get_phy_mode(np)) {
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_ID:
        case PHY_INTERFACE_MODE_RGMII:
                ge_mode = 0;
                break;
@@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac)
        mac->phy_dev->autoneg = AUTONEG_ENABLE;
        mac->phy_dev->speed = 0;
        mac->phy_dev->duplex = 0;
-       mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+       mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+                                  SUPPORTED_Asym_Pause;
        mac->phy_dev->advertising = mac->phy_dev->supported |
                                    ADVERTISED_Autoneg;
        phy_start_aneg(mac->phy_dev);
@@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
        return 0;
 
 err_free_bus:
-       kfree(eth->mii_bus);
+       mdiobus_free(eth->mii_bus);
 
 err_put_node:
        of_node_put(mii_np);
@@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
 
        mdiobus_unregister(eth->mii_bus);
        of_node_put(eth->mii_bus->dev.of_node);
-       kfree(eth->mii_bus);
+       mdiobus_free(eth->mii_bus);
 }
 
 static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
index fd4392999eeefcd9a2f77b05583b04d04a10d663..f5c8d5db25a8cebc367c752c4333bc839b72505a 100644 (file)
@@ -3192,10 +3192,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        flush_workqueue(priv->wq);
        if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
                netif_device_detach(netdev);
-               mutex_lock(&priv->state_lock);
-               if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-                       mlx5e_close_locked(netdev);
-               mutex_unlock(&priv->state_lock);
+               mlx5e_close(netdev);
        } else {
                unregister_netdev(netdev);
        }
index 229ab16fb8d3a5c92afec2ece0a30e2de9a46236..b000ddc29553bb40e297f075c1a3f7606487a87e 100644 (file)
@@ -317,7 +317,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        while ((sq->pc & wq->sz_m1) > sq->edge)
                mlx5e_send_nop(sq, false);
 
-       sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+       if (bf)
+               sq->bf_budget--;
 
        sq->stats.packets++;
        sq->stats.bytes += num_bytes;
index b84a6918a7006253c503cb924c347f2123854781..aebbd6ccb9fe29fb90d46518f854252236d35934 100644 (file)
@@ -383,7 +383,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
                                   0, &dest);
-       if (IS_ERR_OR_NULL(flow_rule)) {
+       if (IS_ERR(flow_rule)) {
                pr_warn(
                        "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
                         dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
@@ -457,7 +457,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
 
        table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
        fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
-       if (IS_ERR_OR_NULL(fdb)) {
+       if (IS_ERR(fdb)) {
                err = PTR_ERR(fdb);
                esw_warn(dev, "Failed to create FDB Table err %d\n", err);
                goto out;
@@ -474,7 +474,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
        eth_broadcast_addr(dmac);
        g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create flow group err(%d)\n", err);
                goto out;
@@ -489,7 +489,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
        eth_zero_addr(dmac);
        dmac[0] = 0x01;
        g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
                goto out;
@@ -506,7 +506,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
        g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
                goto out;
@@ -529,7 +529,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
                }
        }
 
-       kfree(flow_group_in);
+       kvfree(flow_group_in);
        return err;
 }
 
@@ -651,6 +651,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
                                        esw_fdb_set_vport_rule(esw,
                                                               mac,
                                                               vport_idx);
+                       iter_vaddr->mc_promisc = true;
                        break;
                case MLX5_ACTION_DEL:
                        if (!iter_vaddr)
@@ -1060,7 +1061,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
                return;
 
        acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
-       if (IS_ERR_OR_NULL(acl)) {
+       if (IS_ERR(acl)) {
                err = PTR_ERR(acl);
                esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
                         vport->vport, err);
@@ -1075,7 +1076,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
 
        vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(vlan_grp)) {
+       if (IS_ERR(vlan_grp)) {
                err = PTR_ERR(vlan_grp);
                esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
                         vport->vport, err);
@@ -1086,7 +1087,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
        drop_grp = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(drop_grp)) {
+       if (IS_ERR(drop_grp)) {
                err = PTR_ERR(drop_grp);
                esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
                         vport->vport, err);
@@ -1097,7 +1098,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        vport->egress.drop_grp = drop_grp;
        vport->egress.allowed_vlans_grp = vlan_grp;
 out:
-       kfree(flow_group_in);
+       kvfree(flow_group_in);
        if (err && !IS_ERR_OR_NULL(vlan_grp))
                mlx5_destroy_flow_group(vlan_grp);
        if (err && !IS_ERR_OR_NULL(acl))
@@ -1174,7 +1175,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
                return;
 
        acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
-       if (IS_ERR_OR_NULL(acl)) {
+       if (IS_ERR(acl)) {
                err = PTR_ERR(acl);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
                         vport->vport, err);
@@ -1192,7 +1193,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
 
        g = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
                         vport->vport, err);
@@ -1207,7 +1208,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
 
        g = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
                         vport->vport, err);
@@ -1223,7 +1224,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
 
        g = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
                         vport->vport, err);
@@ -1236,7 +1237,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
 
        g = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
                         vport->vport, err);
@@ -1259,7 +1260,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
                        mlx5_destroy_flow_table(vport->ingress.acl);
        }
 
-       kfree(flow_group_in);
+       kvfree(flow_group_in);
 }
 
 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1363,7 +1364,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_ALLOW,
                                   0, NULL);
-       if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
+       if (IS_ERR(vport->ingress.allow_rule)) {
                err = PTR_ERR(vport->ingress.allow_rule);
                pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
                        vport->vport, err);
@@ -1380,7 +1381,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_DROP,
                                   0, NULL);
-       if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
+       if (IS_ERR(vport->ingress.drop_rule)) {
                err = PTR_ERR(vport->ingress.drop_rule);
                pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
                        vport->vport, err);
@@ -1439,7 +1440,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_ALLOW,
                                   0, NULL);
-       if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+       if (IS_ERR(vport->egress.allowed_vlan)) {
                err = PTR_ERR(vport->egress.allowed_vlan);
                pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
                        vport->vport, err);
@@ -1457,7 +1458,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_DROP,
                                   0, NULL);
-       if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
+       if (IS_ERR(vport->egress.drop_rule)) {
                err = PTR_ERR(vport->egress.drop_rule);
                pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
                        vport->vport, err);
@@ -1491,14 +1492,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
 
        /* Sync with current vport context */
        vport->enabled_events = enable_events;
-       esw_vport_change_handle_locked(vport);
-
        vport->enabled = true;
 
        /* only PF is trusted by default */
        vport->trusted = (vport_num) ? false : true;
-
-       arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
+       esw_vport_change_handle_locked(vport);
 
        esw->enabled_vports++;
        esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
@@ -1728,11 +1726,24 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
        (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
 
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+       ((u8 *)node_guid)[7] = mac[0];
+       ((u8 *)node_guid)[6] = mac[1];
+       ((u8 *)node_guid)[5] = mac[2];
+       ((u8 *)node_guid)[4] = 0xff;
+       ((u8 *)node_guid)[3] = 0xfe;
+       ((u8 *)node_guid)[2] = mac[3];
+       ((u8 *)node_guid)[1] = mac[4];
+       ((u8 *)node_guid)[0] = mac[5];
+}
+
 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
                               int vport, u8 mac[ETH_ALEN])
 {
-       int err = 0;
        struct mlx5_vport *evport;
+       u64 node_guid;
+       int err = 0;
 
        if (!ESW_ALLOWED(esw))
                return -EPERM;
@@ -1756,11 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
                return err;
        }
 
+       node_guid_gen_from_mac(&node_guid, mac);
+       err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+       if (err)
+               mlx5_core_warn(esw->dev,
+                              "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
+                              vport, err);
+
        mutex_lock(&esw->state_lock);
        if (evport->enabled)
                err = esw_vport_ingress_config(esw, evport);
        mutex_unlock(&esw->state_lock);
-
        return err;
 }
 
index 8b5f0b2c0d5cd5d8f62cf6560ee9fa498800ac03..e912a3d2505ed39bee13717224e1c41111a29cda 100644 (file)
@@ -1292,8 +1292,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
                                       ft->id);
                        return err;
                }
-               root->root_ft = new_root_ft;
        }
+       root->root_ft = new_root_ft;
        return 0;
 }
 
@@ -1767,6 +1767,9 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev)
 
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
 {
+       if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+               return;
+
        cleanup_root_ns(dev);
        cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
        cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
@@ -1828,29 +1831,36 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
 {
        int err = 0;
 
+       if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+               return 0;
+
        err = mlx5_init_fc_stats(dev);
        if (err)
                return err;
 
-       if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+       if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+           MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
                err = init_root_ns(dev);
                if (err)
                        goto err;
        }
+
        if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
-               err = init_fdb_root_ns(dev);
-               if (err)
-                       goto err;
-       }
-       if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
-               err = init_egress_acl_root_ns(dev);
-               if (err)
-                       goto err;
-       }
-       if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
-               err = init_ingress_acl_root_ns(dev);
-               if (err)
-                       goto err;
+               if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+                       err = init_fdb_root_ns(dev);
+                       if (err)
+                               goto err;
+               }
+               if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+                       err = init_egress_acl_root_ns(dev);
+                       if (err)
+                               goto err;
+               }
+               if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+                       err = init_ingress_acl_root_ns(dev);
+                       if (err)
+                               goto err;
+               }
        }
 
        return 0;
index b720a274220d95793f5803620d18db8fcae2073b..b82d65802d964a2d03d9e2a99c2719c7642d72d3 100644 (file)
@@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
        if (out.hdr.status)
                err = mlx5_cmd_status_to_err(&out.hdr);
        else
-               *xrcdn = be32_to_cpu(out.xrcdn);
+               *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
 
        return err;
 }
index b69dadcfb897a25a51b89a9caed4d1454d603d8e..daf44cd4c566f45649b9efe1e4417b4e868c9048 100644 (file)
@@ -508,6 +508,44 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
 }
 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
 
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+                                   u32 vport, u64 node_guid)
+{
+       int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+       void *nic_vport_context;
+       u8 *guid;
+       void *in;
+       int err;
+
+       if (!vport)
+               return -EINVAL;
+       if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+               return -EACCES;
+       if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+               return -ENOTSUPP;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(modify_nic_vport_context_in, in,
+                field_select.node_guid, 1);
+       MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+       MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+
+       nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+                                        in, nic_vport_context);
+       guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
+                           node_guid);
+       MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+       err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+       kvfree(in);
+
+       return err;
+}
+
 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
                                        u16 *qkey_viol_cntr)
 {
index 4a7273771028db08cc4e1bd9e76df1d05fd73785..6f9e3ddff4a8df98dd95818db7e722aaf8cb76a9 100644 (file)
@@ -247,15 +247,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 }
 
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                                   u8 swid)
 {
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        char pspa_pl[MLXSW_REG_PSPA_LEN];
 
-       mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+       mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
 }
 
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+       return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
+                                       swid);
+}
+
 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
                                     bool enable)
 {
@@ -305,9 +313,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
 }
 
-static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
-                                          u8 local_port, u8 *p_module,
-                                          u8 *p_width, u8 *p_lane)
+static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
+                                        u8 local_port, u8 *p_module,
+                                        u8 *p_width, u8 *p_lane)
 {
        char pmlp_pl[MLXSW_REG_PMLP_LEN];
        int err;
@@ -322,16 +330,6 @@ static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
        return 0;
 }
 
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
-                                        u8 local_port, u8 *p_module,
-                                        u8 *p_width)
-{
-       u8 lane;
-
-       return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
-                                              p_width, &lane);
-}
-
 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                                    u8 module, u8 width, u8 lane)
 {
@@ -949,17 +947,11 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
                                            size_t len)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       u8 module, width, lane;
+       u8 module = mlxsw_sp_port->mapping.module;
+       u8 width = mlxsw_sp_port->mapping.width;
+       u8 lane = mlxsw_sp_port->mapping.lane;
        int err;
 
-       err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
-                                             mlxsw_sp_port->local_port,
-                                             &module, &width, &lane);
-       if (err) {
-               netdev_err(dev, "Failed to retrieve module information\n");
-               return err;
-       }
-
        if (!mlxsw_sp_port->split)
                err = snprintf(name, len, "p%d", module + 1);
        else
@@ -1681,8 +1673,8 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
        return 0;
 }
 
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-                                 bool split, u8 module, u8 width)
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                               bool split, u8 module, u8 width, u8 lane)
 {
        struct mlxsw_sp_port *mlxsw_sp_port;
        struct net_device *dev;
@@ -1697,6 +1689,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
        mlxsw_sp_port->local_port = local_port;
        mlxsw_sp_port->split = split;
+       mlxsw_sp_port->mapping.module = module;
+       mlxsw_sp_port->mapping.width = width;
+       mlxsw_sp_port->mapping.lane = lane;
        bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
        mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
        if (!mlxsw_sp_port->active_vlans) {
@@ -1839,28 +1834,6 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        return err;
 }
 
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-                               bool split, u8 module, u8 width, u8 lane)
-{
-       int err;
-
-       err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
-                                      lane);
-       if (err)
-               return err;
-
-       err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
-                                    width);
-       if (err)
-               goto err_port_create;
-
-       return 0;
-
-err_port_create:
-       mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
-       return err;
-}
-
 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
 {
        struct net_device *dev = mlxsw_sp_port->dev;
@@ -1909,8 +1882,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
 
 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
 {
+       u8 module, width, lane;
        size_t alloc_size;
-       u8 module, width;
        int i;
        int err;
 
@@ -1921,13 +1894,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
 
        for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
                err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
-                                                   &width);
+                                                   &width, &lane);
                if (err)
                        goto err_port_module_info_get;
                if (!width)
                        continue;
                mlxsw_sp->port_to_module[i] = module;
-               err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
+               err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
+                                          lane);
                if (err)
                        goto err_port_create;
        }
@@ -1948,12 +1922,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
        return local_port - offset;
 }
 
+static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+                                     u8 module, unsigned int count)
+{
+       u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+       int err, i;
+
+       for (i = 0; i < count; i++) {
+               err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
+                                              width, i * width);
+               if (err)
+                       goto err_port_module_map;
+       }
+
+       for (i = 0; i < count; i++) {
+               err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
+               if (err)
+                       goto err_port_swid_set;
+       }
+
+       for (i = 0; i < count; i++) {
+               err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+                                          module, width, i * width);
+               if (err)
+                       goto err_port_create;
+       }
+
+       return 0;
+
+err_port_create:
+       for (i--; i >= 0; i--)
+               mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+       i = count;
+err_port_swid_set:
+       for (i--; i >= 0; i--)
+               __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
+                                        MLXSW_PORT_SWID_DISABLED_PORT);
+       i = count;
+err_port_module_map:
+       for (i--; i >= 0; i--)
+               mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
+       return err;
+}
+
+static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
+                                        u8 base_port, unsigned int count)
+{
+       u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+       int i;
+
+       /* Split by four means we need to re-create two ports, otherwise
+        * only one.
+        */
+       count = count / 2;
+
+       for (i = 0; i < count; i++) {
+               local_port = base_port + i * 2;
+               module = mlxsw_sp->port_to_module[local_port];
+
+               mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+                                        0);
+       }
+
+       for (i = 0; i < count; i++)
+               __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
+
+       for (i = 0; i < count; i++) {
+               local_port = base_port + i * 2;
+               module = mlxsw_sp->port_to_module[local_port];
+
+               mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
+                                    width, 0);
+       }
+}
+
 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
                               unsigned int count)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
        struct mlxsw_sp_port *mlxsw_sp_port;
-       u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
        u8 module, cur_width, base_port;
        int i;
        int err;
@@ -1965,18 +2012,14 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
                return -EINVAL;
        }
 
+       module = mlxsw_sp_port->mapping.module;
+       cur_width = mlxsw_sp_port->mapping.width;
+
        if (count != 2 && count != 4) {
                netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
                return -EINVAL;
        }
 
-       err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
-                                           &cur_width);
-       if (err) {
-               netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
-               return err;
-       }
-
        if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
                netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
                return -EINVAL;
@@ -2001,25 +2044,16 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
        for (i = 0; i < count; i++)
                mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
 
-       for (i = 0; i < count; i++) {
-               err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
-                                          module, width, i * width);
-               if (err) {
-                       dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
-                       goto err_port_create;
-               }
+       err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
+               goto err_port_split_create;
        }
 
        return 0;
 
-err_port_create:
-       for (i--; i >= 0; i--)
-               mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
-       for (i = 0; i < count / 2; i++) {
-               module = mlxsw_sp->port_to_module[base_port + i * 2];
-               mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
-                                    module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
-       }
+err_port_split_create:
+       mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
        return err;
 }
 
@@ -2027,10 +2061,9 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
        struct mlxsw_sp_port *mlxsw_sp_port;
-       u8 module, cur_width, base_port;
+       u8 cur_width, base_port;
        unsigned int count;
        int i;
-       int err;
 
        mlxsw_sp_port = mlxsw_sp->ports[local_port];
        if (!mlxsw_sp_port) {
@@ -2044,12 +2077,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
                return -EINVAL;
        }
 
-       err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
-                                           &cur_width);
-       if (err) {
-               netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
-               return err;
-       }
+       cur_width = mlxsw_sp_port->mapping.width;
        count = cur_width == 1 ? 4 : 2;
 
        base_port = mlxsw_sp_cluster_base_port_get(local_port);
@@ -2061,14 +2089,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
        for (i = 0; i < count; i++)
                mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
 
-       for (i = 0; i < count / 2; i++) {
-               module = mlxsw_sp->port_to_module[base_port + i * 2];
-               err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
-                                          module, MLXSW_PORT_MODULE_MAX_WIDTH,
-                                          0);
-               if (err)
-                       dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
-       }
+       mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
 
        return 0;
 }
index e2c022d3e2f3bf19df2084049920f40b7eac4c26..13b30eaa13d471180d473459aa8fe38606a0b8f1 100644 (file)
@@ -229,6 +229,11 @@ struct mlxsw_sp_port {
                struct ieee_maxrate *maxrate;
                struct ieee_pfc *pfc;
        } dcb;
+       struct {
+               u8 module;
+               u8 width;
+               u8 lane;
+       } mapping;
        /* 802.1Q bridge VLANs */
        unsigned long *active_vlans;
        unsigned long *untagged_vlans;
index 753064679bdeba5d38fa903f0350480b96c71978..61cc6869fa6508f549d59bc9a525e5e3901d73a7 100644 (file)
@@ -1105,6 +1105,39 @@ static int qed_get_port_type(u32 media_type)
        return port_type;
 }
 
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+                            struct qed_mcp_link_params *params,
+                            struct qed_mcp_link_state *link,
+                            struct qed_mcp_link_capabilities *link_caps)
+{
+       void *p;
+
+       if (!IS_PF(hwfn->cdev)) {
+               qed_vf_get_link_params(hwfn, params);
+               qed_vf_get_link_state(hwfn, link);
+               qed_vf_get_link_caps(hwfn, link_caps);
+
+               return 0;
+       }
+
+       p = qed_mcp_get_link_params(hwfn);
+       if (!p)
+               return -ENXIO;
+       memcpy(params, p, sizeof(*params));
+
+       p = qed_mcp_get_link_state(hwfn);
+       if (!p)
+               return -ENXIO;
+       memcpy(link, p, sizeof(*link));
+
+       p = qed_mcp_get_link_capabilities(hwfn);
+       if (!p)
+               return -ENXIO;
+       memcpy(link_caps, p, sizeof(*link_caps));
+
+       return 0;
+}
+
 static void qed_fill_link(struct qed_hwfn *hwfn,
                          struct qed_link_output *if_link)
 {
@@ -1116,15 +1149,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
        memset(if_link, 0, sizeof(*if_link));
 
        /* Prepare source inputs */
-       if (IS_PF(hwfn->cdev)) {
-               memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
-               memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
-               memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
-                      sizeof(link_caps));
-       } else {
-               qed_vf_get_link_params(hwfn, &params);
-               qed_vf_get_link_state(hwfn, &link);
-               qed_vf_get_link_caps(hwfn, &link_caps);
+       if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
+               dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+               return;
        }
 
        /* Set the link parameters to pass to protocol driver */
index c8667c65e685b3be8709a0daa188a1e07d389e22..c90b2b6ad96937feb7f04ef1a8123a73bca061d4 100644 (file)
 #include "qed_vf.h"
 #define QED_VF_ARRAY_LENGTH (3)
 
+#ifdef CONFIG_QED_SRIOV
 #define IS_VF(cdev)             ((cdev)->b_is_vf)
 #define IS_PF(cdev)             (!((cdev)->b_is_vf))
-#ifdef CONFIG_QED_SRIOV
 #define IS_PF_SRIOV(p_hwfn)     (!!((p_hwfn)->cdev->p_iov_info))
 #else
+#define IS_VF(cdev)             (0)
+#define IS_PF(cdev)             (1)
 #define IS_PF_SRIOV(p_hwfn)     (0)
 #endif
 #define IS_PF_SRIOV_ALLOC(p_hwfn)       (!!((p_hwfn)->pf_iov_info))
index 5d00d1404bfc1b3e0e6ef76af0f392dff28fbe2b..5733d188822348c45c5a4f23cd245a0e3cfc8188 100644 (file)
@@ -87,7 +87,9 @@ static const struct pci_device_id qede_pci_tbl[] = {
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
        { 0 }
 };
 
index 7f295c4d7b80cb866c3f2f10f71aeeaefaf3caa5..2a9228a6e4a08cea22ba5bed9e1eae24cc25f591 100644 (file)
@@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
 
        case MC_CMD_MEDIA_XFP:
        case MC_CMD_MEDIA_SFP_PLUS:
-               result |= SUPPORTED_FIBRE;
-               break;
-
        case MC_CMD_MEDIA_QSFP_PLUS:
                result |= SUPPORTED_FIBRE;
+               if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+                       result |= SUPPORTED_1000baseT_Full;
+               if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+                       result |= SUPPORTED_10000baseT_Full;
                if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
                        result |= SUPPORTED_40000baseCR4_Full;
                break;
index 4f7283d05588b53be530270b481cc100d05eb63a..44da877d2483806daf07c2c57760774141d635bf 100644 (file)
@@ -156,7 +156,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
                struct netdev_hw_addr *ha;
 
                netdev_for_each_uc_addr(ha, dev) {
-                       dwmac4_set_umac_addr(ioaddr, ha->addr, reg);
+                       dwmac4_set_umac_addr(hw, ha->addr, reg);
                        reg++;
                }
        }
index eac45d0c75e2c0cc3855f69767737bc9fb7c440f..a473c182c91d0e4f981640ee285486e4a48e95d8 100644 (file)
@@ -3450,8 +3450,6 @@ int stmmac_resume(struct device *dev)
        if (!netif_running(ndev))
                return 0;
 
-       spin_lock_irqsave(&priv->lock, flags);
-
        /* Power Down bit, into the PM register, is cleared
         * automatically as soon as a magic packet or a Wake-up frame
         * is received. Anyway, it's better to manually clear
@@ -3459,7 +3457,9 @@ int stmmac_resume(struct device *dev)
         * from another devices (e.g. serial console).
         */
        if (device_may_wakeup(priv->device)) {
+               spin_lock_irqsave(&priv->lock, flags);
                priv->hw->mac->pmt(priv->hw, 0);
+               spin_unlock_irqrestore(&priv->lock, flags);
                priv->irq_wake = 0;
        } else {
                pinctrl_pm_select_default_state(priv->device);
@@ -3473,6 +3473,8 @@ int stmmac_resume(struct device *dev)
 
        netif_device_attach(ndev);
 
+       spin_lock_irqsave(&priv->lock, flags);
+
        priv->cur_rx = 0;
        priv->dirty_rx = 0;
        priv->dirty_tx = 0;
index 4b08a2f52b3e6074218fb6ba876a639855818d3d..e6bb0ecb12c74cb88204635d3ec2712816597a44 100644 (file)
@@ -1339,7 +1339,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
        if (priv->coal_intvl != 0) {
                struct ethtool_coalesce coal;
 
-               coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+               coal.rx_coalesce_usecs = priv->coal_intvl;
                cpsw_set_coalesce(ndev, &coal);
        }
 
index db8022ae415bd234c19c8348dc6f90697d6ad840..08885bc8d6db6a11a775b30d3a39b4e9bc826f8e 100644 (file)
@@ -1369,7 +1369,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                                rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
 
                                segCnt = rcdlro->segCnt;
-                               BUG_ON(segCnt <= 1);
+                               WARN_ON_ONCE(segCnt == 0);
                                mss = rcdlro->mss;
                                if (unlikely(segCnt <= 1))
                                        segCnt = 0;
index c4825392d64b6c8de17822532d113689f88d8bb7..3d2b64e63408d69ae86fb15b2b18368ab01b5025 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.7.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.8.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040700
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040800
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index d0631b6cfd5302ee7d73c25937f29f8184e0be83..62f475e31077ca1fa1df15863f02ee0911c46997 100644 (file)
@@ -2540,12 +2540,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
                           const u8 *mac, struct station_info *sinfo)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_scb_val_le scb_val;
        s32 err = 0;
        struct brcmf_sta_info_le sta_info_le;
        u32 sta_flags;
        u32 is_tdls_peer;
        s32 total_rssi;
        s32 count_rssi;
+       int rssi;
        u32 i;
 
        brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
@@ -2629,6 +2631,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
                        sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
                        total_rssi /= count_rssi;
                        sinfo->signal = total_rssi;
+               } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+                       &ifp->vif->sme_state)) {
+                       memset(&scb_val, 0, sizeof(scb_val));
+                       err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
+                                                    &scb_val, sizeof(scb_val));
+                       if (err) {
+                               brcmf_err("Could not get rssi (%d)\n", err);
+                               goto done;
+                       } else {
+                               rssi = le32_to_cpu(scb_val.val);
+                               sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+                               sinfo->signal = rssi;
+                               brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
+                       }
                }
        }
 done:
index 68f1ce02f4bf83d8d787e256cb5d74628cc4040e..2b9a2bc429d6fc23a49aecad27e36e14ac5408ce 100644 (file)
@@ -1157,6 +1157,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
                brcmu_pkt_buf_free_skb(skb);
                return;
        }
+
+       skb->protocol = eth_type_trans(skb, ifp->ndev);
        brcmf_netif_rx(ifp, skb);
 }
 
index 9ed0ed1bf51481e4458dbeae2d105731bbf0f575..4dd5adcdd29bafae3f1432b87758ea2468189dbe 100644 (file)
@@ -2776,6 +2776,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
        if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
            !info->attrs[HWSIM_ATTR_FLAGS] ||
            !info->attrs[HWSIM_ATTR_COOKIE] ||
+           !info->attrs[HWSIM_ATTR_SIGNAL] ||
            !info->attrs[HWSIM_ATTR_TX_INFO])
                goto out;
 
index 0f48048b865407fe8d984b1057bd262ccd28dc7f..3a0faa8fe9d46deef63514afeba5dbbb9ecd7d14 100644 (file)
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m);
 void rtl_addr_delay(u32 addr)
 {
        if (addr == 0xfe)
-               msleep(50);
+               mdelay(50);
        else if (addr == 0xfd)
                msleep(5);
        else if (addr == 0xfc)
@@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
                rtl_addr_delay(addr);
        } else {
                rtl_set_rfreg(hw, rfpath, addr, mask, data);
-               usleep_range(1, 2);
+               udelay(1);
        }
 }
 EXPORT_SYMBOL(rtl_rfreg_delay);
@@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
                rtl_addr_delay(addr);
        } else {
                rtl_set_bbreg(hw, addr, MASKDWORD, data);
-               usleep_range(1, 2);
+               udelay(1);
        }
 }
 EXPORT_SYMBOL(rtl_bb_delay);
index 78dca3193ca4ccc61df9784ff7699834c61e08b2..befac5b19490ee91e13dec00c471931005b871b7 100644 (file)
@@ -1679,9 +1679,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
 
 static void nvme_dev_unmap(struct nvme_dev *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
+       int bars;
+
        if (dev->bar)
                iounmap(dev->bar);
-       pci_release_regions(to_pci_dev(dev->dev));
+
+       bars = pci_select_bars(pdev, IORESOURCE_MEM);
+       pci_release_selected_regions(pdev, bars);
 }
 
 static void nvme_pci_disable(struct nvme_dev *dev)
@@ -1924,7 +1929,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
 
        return 0;
   release:
-       pci_release_regions(pdev);
+       pci_release_selected_regions(pdev, bars);
        return -ENODEV;
 }
 
index 14f2f8c7c2607e603337b30a37117195f95f0880..33daffc4392c646a25455ebe5bf7f2ecdcc261b9 100644 (file)
@@ -395,7 +395,7 @@ static int unflatten_dt_nodes(const void *blob,
                              struct device_node **nodepp)
 {
        struct device_node *root;
-       int offset = 0, depth = 0;
+       int offset = 0, depth = 0, initial_depth = 0;
 #define FDT_MAX_DEPTH  64
        unsigned int fpsizes[FDT_MAX_DEPTH];
        struct device_node *nps[FDT_MAX_DEPTH];
@@ -405,11 +405,22 @@ static int unflatten_dt_nodes(const void *blob,
        if (nodepp)
                *nodepp = NULL;
 
+       /*
+        * We're unflattening device sub-tree if @dad is valid. There are
+        * possibly multiple nodes in the first level of depth. We need
+        * set @depth to 1 to make fdt_next_node() happy as it bails
+        * immediately when negative @depth is found. Otherwise, the device
+        * nodes except the first one won't be unflattened successfully.
+        */
+       if (dad)
+               depth = initial_depth = 1;
+
        root = dad;
        fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0;
        nps[depth] = dad;
+
        for (offset = 0;
-            offset >= 0 && depth >= 0;
+            offset >= 0 && depth >= initial_depth;
             offset = fdt_next_node(blob, offset, &depth)) {
                if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
                        continue;
index e7bfc175b8e1e9a0a171a3173191861399d21b1d..6ec743faabe8af781294eb2e275fb8eb16fd4344 100644 (file)
@@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 EXPORT_SYMBOL_GPL(of_irq_to_resource);
 
 /**
- * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
+ * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
  * @dev: pointer to device tree node
- * @index: zero-based index of the irq
- *
- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
- * is not yet created.
+ * @index: zero-based index of the IRQ
  *
+ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
+ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
+ * of any other failure.
  */
 int of_irq_get(struct device_node *dev, int index)
 {
@@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index)
 EXPORT_SYMBOL_GPL(of_irq_get);
 
 /**
- * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number
+ * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
  * @dev: pointer to device tree node
- * @name: irq name
+ * @name: IRQ name
  *
- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
- * is not yet created, or error code in case of any other failure.
+ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
+ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
+ * of any other failure.
  */
 int of_irq_get_byname(struct device_node *dev, const char *name)
 {
index ed01c0172e4a5f8b45e0a382b789f0cd20d60987..216648233874fabdb3cd7d7a5c06072485af1508 100644 (file)
@@ -127,8 +127,15 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
        }
 
        /* Need adjust the alignment to satisfy the CMA requirement */
-       if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool"))
-               align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
+       if (IS_ENABLED(CONFIG_CMA)
+           && of_flat_dt_is_compatible(node, "shared-dma-pool")
+           && of_get_flat_dt_prop(node, "reusable", NULL)
+           && !of_get_flat_dt_prop(node, "no-map", NULL)) {
+               unsigned long order =
+                       max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
+
+               align = max(align, (phys_addr_t)PAGE_SIZE << order);
+       }
 
        prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
        if (prop) {
index dfbab61a1b473d72cb6e631c6738d1c60fb99d96..1fa3a3219c454e23e0081b7073acd342909aacda 100644 (file)
@@ -221,9 +221,9 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
                else
                        pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
                                              *(u16 *)buf);
-               buf += 2;
+               buf += 4;
        }
-       len += 2;
+       len += 4;
 
        /*
         * If we have any Low Priority VCs and a VC Arbitration Table Offset
index 1b8304e1efaa687d71226e62de7c56ac19340bd0..140436a046c03f992c3fa3fd71ed485bfdb7d109 100644 (file)
@@ -1010,8 +1010,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
                if (!ret)
                        ret = init_fn(pmu);
        } else {
-               ret = probe_current_pmu(pmu, probe_table);
                cpumask_setall(&pmu->supported_cpus);
+               ret = probe_current_pmu(pmu, probe_table);
        }
 
        if (ret) {
index cc093ebfda9435a28ec627ca1f022b877c42b342..8b851f718123a1110016cfc462385b85a9da3bac 100644 (file)
@@ -233,8 +233,12 @@ static inline int __is_running(const struct exynos_mipi_phy_desc *data,
                        struct exynos_mipi_video_phy *state)
 {
        u32 val;
+       int ret;
+
+       ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
+       if (ret)
+               return 0;
 
-       regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
        return val & data->resetn_val;
 }
 
index 0a477d24cf763326abace4ae8d20e1a834bfee8a..bf46844dc387c69f9c52d9cec3da2f5f0c7f16fe 100644 (file)
@@ -293,11 +293,18 @@ static int ti_pipe3_init(struct phy *x)
                ret = ti_pipe3_dpll_wait_lock(phy);
        }
 
-       /* Program the DPLL only if not locked */
+       /* SATA has issues if re-programmed when locked */
        val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
-       if (!(val & PLL_LOCK))
-               if (ti_pipe3_dpll_program(phy))
-                       return -EINVAL;
+       if ((val & PLL_LOCK) && of_device_is_compatible(phy->dev->of_node,
+                                                       "ti,phy-pipe3-sata"))
+               return ret;
+
+       /* Program the DPLL */
+       ret = ti_pipe3_dpll_program(phy);
+       if (ret) {
+               ti_pipe3_disable_clocks(phy);
+               return -EINVAL;
+       }
 
        return ret;
 }
index 6b6af6cba45401ee98d1c7b967796157d2f9fcdc..d9b10a39a2cf77c6f67b3655b3a1f56143f0083e 100644 (file)
@@ -463,7 +463,8 @@ static int twl4030_phy_power_on(struct phy *phy)
        twl4030_usb_set_mode(twl, twl->usb_mode);
        if (twl->usb_mode == T2_USB_MODE_ULPI)
                twl4030_i2c_access(twl, 0);
-       schedule_delayed_work(&twl->id_workaround_work, 0);
+       twl->linkstat = MUSB_UNKNOWN;
+       schedule_delayed_work(&twl->id_workaround_work, HZ);
 
        return 0;
 }
@@ -537,6 +538,7 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
        struct twl4030_usb *twl = _twl;
        enum musb_vbus_id_status status;
        bool status_changed = false;
+       int err;
 
        status = twl4030_usb_linkstat(twl);
 
@@ -567,7 +569,9 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
                        pm_runtime_mark_last_busy(twl->dev);
                        pm_runtime_put_autosuspend(twl->dev);
                }
-               musb_mailbox(status);
+               err = musb_mailbox(status);
+               if (err)
+                       twl->linkstat = MUSB_UNKNOWN;
        }
 
        /* don't schedule during sleep - irq works right then */
@@ -595,7 +599,8 @@ static int twl4030_phy_init(struct phy *phy)
        struct twl4030_usb *twl = phy_get_drvdata(phy);
 
        pm_runtime_get_sync(twl->dev);
-       schedule_delayed_work(&twl->id_workaround_work, 0);
+       twl->linkstat = MUSB_UNKNOWN;
+       schedule_delayed_work(&twl->id_workaround_work, HZ);
        pm_runtime_mark_last_busy(twl->dev);
        pm_runtime_put_autosuspend(twl->dev);
 
@@ -763,7 +768,8 @@ static int twl4030_usb_remove(struct platform_device *pdev)
        if (cable_present(twl->linkstat))
                pm_runtime_put_noidle(twl->dev);
        pm_runtime_mark_last_busy(twl->dev);
-       pm_runtime_put_sync_suspend(twl->dev);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
+       pm_runtime_put_sync(twl->dev);
        pm_runtime_disable(twl->dev);
 
        /* autogate 60MHz ULPI clock,
index d5bf9fae2ddd425cf32570b668a2c0a5b3aec59f..a87439ee4cdd1da81d40926560c8d9ca89b1285f 100644 (file)
@@ -391,4 +391,12 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
 }
 EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map);
 
+void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
+                                struct pinctrl_map *map,
+                                unsigned num_maps)
+{
+       pinctrl_utils_free_map(pctldev, map, num_maps);
+}
+EXPORT_SYMBOL_GPL(pinconf_generic_dt_free_map);
+
 #endif
index c06bb85c283912590d306e3336e28419f32a0c7c..3ec0025d19e7e0e069a793cfe11e5452c2661831 100644 (file)
@@ -103,7 +103,6 @@ config DELL_SMBIOS
 
 config DELL_LAPTOP
        tristate "Dell Laptop Extras"
-       depends on X86
        depends on DELL_SMBIOS
        depends on DMI
        depends on BACKLIGHT_CLASS_DEVICE
@@ -505,7 +504,7 @@ config THINKPAD_ACPI_HOTKEY_POLL
 
 config SENSORS_HDAPS
        tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
-       depends on INPUT && X86
+       depends on INPUT
        select INPUT_POLLDEV
        default n
        help
@@ -749,7 +748,7 @@ config TOSHIBA_WMI
 
 config ACPI_CMPC
        tristate "CMPC Laptop Extras"
-       depends on X86 && ACPI
+       depends on ACPI
        depends on RFKILL || RFKILL=n
        select INPUT
        select BACKLIGHT_CLASS_DEVICE
@@ -848,7 +847,7 @@ config INTEL_IMR
 
 config INTEL_PMC_CORE
        bool "Intel PMC Core driver"
-       depends on X86 && PCI
+       depends on PCI
        ---help---
          The Intel Platform Controller Hub for Intel Core SoCs provides access
          to Power Management Controller registers via a PCI interface. This
@@ -860,7 +859,7 @@ config INTEL_PMC_CORE
 
 config IBM_RTL
        tristate "Device driver to enable PRTL support"
-       depends on X86 && PCI
+       depends on PCI
        ---help---
         Enable support for IBM Premium Real Time Mode (PRTM).
         This module will allow you the enter and exit PRTM in the BIOS via
@@ -894,7 +893,6 @@ config XO15_EBOOK
 
 config SAMSUNG_LAPTOP
        tristate "Samsung Laptop driver"
-       depends on X86
        depends on RFKILL || RFKILL = n
        depends on ACPI_VIDEO || ACPI_VIDEO = n
        depends on BACKLIGHT_CLASS_DEVICE
index 4a23fbc66b715bcc4fce2a6ddccb2c8b743ae7b2..d1a091b93192c7dc4a20d8dbf9901d65d918e1e1 100644 (file)
@@ -567,6 +567,7 @@ static void ideapad_sysfs_exit(struct ideapad_private *priv)
 static const struct key_entry ideapad_keymap[] = {
        { KE_KEY, 6,  { KEY_SWITCHVIDEOMODE } },
        { KE_KEY, 7,  { KEY_CAMERA } },
+       { KE_KEY, 8,  { KEY_MICMUTE } },
        { KE_KEY, 11, { KEY_F16 } },
        { KE_KEY, 13, { KEY_WLAN } },
        { KE_KEY, 16, { KEY_PROG1 } },
@@ -809,6 +810,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
                                break;
                        case 13:
                        case 11:
+                       case 8:
                        case 7:
                        case 6:
                                ideapad_input_report(priv, vpc_bit);
index c3bfa1fe95bf3fb9c61d36c1f42012547ee07580..b65ce7519411a87f2d279360df1b8fe252efce0b 100644 (file)
@@ -2043,6 +2043,7 @@ static int hotkey_autosleep_ack;
 
 static u32 hotkey_orig_mask;           /* events the BIOS had enabled */
 static u32 hotkey_all_mask;            /* all events supported in fw */
+static u32 hotkey_adaptive_all_mask;   /* all adaptive events supported in fw */
 static u32 hotkey_reserved_mask;       /* events better left disabled */
 static u32 hotkey_driver_mask;         /* events needed by the driver */
 static u32 hotkey_user_mask;           /* events visible to userspace */
@@ -2742,6 +2743,17 @@ static ssize_t hotkey_all_mask_show(struct device *dev,
 
 static DEVICE_ATTR_RO(hotkey_all_mask);
 
+/* sysfs hotkey all_mask ----------------------------------------------- */
+static ssize_t hotkey_adaptive_all_mask_show(struct device *dev,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+                       hotkey_adaptive_all_mask | hotkey_source_mask);
+}
+
+static DEVICE_ATTR_RO(hotkey_adaptive_all_mask);
+
 /* sysfs hotkey recommended_mask --------------------------------------- */
 static ssize_t hotkey_recommended_mask_show(struct device *dev,
                                            struct device_attribute *attr,
@@ -2985,6 +2997,7 @@ static struct attribute *hotkey_attributes[] __initdata = {
        &dev_attr_wakeup_hotunplug_complete.attr,
        &dev_attr_hotkey_mask.attr,
        &dev_attr_hotkey_all_mask.attr,
+       &dev_attr_hotkey_adaptive_all_mask.attr,
        &dev_attr_hotkey_recommended_mask.attr,
 #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
        &dev_attr_hotkey_source_mask.attr,
@@ -3321,20 +3334,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        if (!tp_features.hotkey)
                return 1;
 
-       /*
-        * Check if we have an adaptive keyboard, like on the
-        * Lenovo Carbon X1 2014 (2nd Gen).
-        */
-       if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
-               if ((hkeyv >> 8) == 2) {
-                       tp_features.has_adaptive_kbd = true;
-                       res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
-                                       &adaptive_kbd_attr_group);
-                       if (res)
-                               goto err_exit;
-               }
-       }
-
        quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
                                     ARRAY_SIZE(tpacpi_hotkey_qtable));
 
@@ -3357,30 +3356,70 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
           A30, R30, R31, T20-22, X20-21, X22-24.  Detected by checking
           for HKEY interface version 0x100 */
        if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
-               if ((hkeyv >> 8) != 1) {
-                       pr_err("unknown version of the HKEY interface: 0x%x\n",
-                              hkeyv);
-                       pr_err("please report this to %s\n", TPACPI_MAIL);
-               } else {
+               vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+                           "firmware HKEY interface version: 0x%x\n",
+                           hkeyv);
+
+               switch (hkeyv >> 8) {
+               case 1:
                        /*
                         * MHKV 0x100 in A31, R40, R40e,
                         * T4x, X31, and later
                         */
-                       vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
-                               "firmware HKEY interface version: 0x%x\n",
-                               hkeyv);
 
                        /* Paranoia check AND init hotkey_all_mask */
                        if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
                                        "MHKA", "qd")) {
-                               pr_err("missing MHKA handler, "
-                                      "please report this to %s\n",
+                               pr_err("missing MHKA handler, please report this to %s\n",
                                       TPACPI_MAIL);
                                /* Fallback: pre-init for FN+F3,F4,F12 */
                                hotkey_all_mask = 0x080cU;
                        } else {
                                tp_features.hotkey_mask = 1;
                        }
+                       break;
+
+               case 2:
+                       /*
+                        * MHKV 0x200 in X1, T460s, X260, T560, X1 Tablet (2016)
+                        */
+
+                       /* Paranoia check AND init hotkey_all_mask */
+                       if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
+                                       "MHKA", "dd", 1)) {
+                               pr_err("missing MHKA handler, please report this to %s\n",
+                                      TPACPI_MAIL);
+                               /* Fallback: pre-init for FN+F3,F4,F12 */
+                               hotkey_all_mask = 0x080cU;
+                       } else {
+                               tp_features.hotkey_mask = 1;
+                       }
+
+                       /*
+                        * Check if we have an adaptive keyboard, like on the
+                        * Lenovo Carbon X1 2014 (2nd Gen).
+                        */
+                       if (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask,
+                                      "MHKA", "dd", 2)) {
+                               if (hotkey_adaptive_all_mask != 0) {
+                                       tp_features.has_adaptive_kbd = true;
+                                       res = sysfs_create_group(
+                                               &tpacpi_pdev->dev.kobj,
+                                               &adaptive_kbd_attr_group);
+                                       if (res)
+                                               goto err_exit;
+                               }
+                       } else {
+                               tp_features.has_adaptive_kbd = false;
+                               hotkey_adaptive_all_mask = 0x0U;
+                       }
+                       break;
+
+               default:
+                       pr_err("unknown version of the HKEY interface: 0x%x\n",
+                              hkeyv);
+                       pr_err("please report this to %s\n", TPACPI_MAIL);
+                       break;
                }
        }
 
index dba3843c53b8d2162d10100d2473be79014d4b4b..ed337a8c34ab4bb231814136fe5cd9628962eff1 100644 (file)
@@ -457,7 +457,8 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
 {
        int err;
 
-       if (!pwm)
+       if (!pwm || !state || !state->period ||
+           state->duty_cycle > state->period)
                return -EINVAL;
 
        if (!memcmp(state, &pwm->state, sizeof(*state)))
index f994c7eaf41c6c3d67a0c688247561dadee49863..14fc011faa32fee990c7ab9253d5faabe67465d4 100644 (file)
@@ -272,7 +272,7 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
        chip->chip.of_pwm_n_cells = 3;
        chip->chip.can_sleep = 1;
 
-       ret = pwmchip_add(&chip->chip);
+       ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED);
        if (ret) {
                clk_disable_unprepare(hlcdc->periph_clk);
                return ret;
index d98599249a05688137325fcf906415e14aa7aed4..01695d48dd543152624b9ed40a9c3afd9de45d0d 100644 (file)
@@ -152,7 +152,7 @@ static ssize_t enable_store(struct device *child,
                goto unlock;
        }
 
-       pwm_apply_state(pwm, &state);
+       ret = pwm_apply_state(pwm, &state);
 
 unlock:
        mutex_unlock(&export->lock);
index 56a17ec5b5efe2432937351a7f45d441a9e48722..526bf23dcb49a543032c6ac17da76c2e09009545 100644 (file)
@@ -140,6 +140,19 @@ static const struct regulator_ops rpm_smps_ldo_ops = {
        .enable = rpm_reg_enable,
        .disable = rpm_reg_disable,
        .is_enabled = rpm_reg_is_enabled,
+       .list_voltage = regulator_list_voltage_linear_range,
+
+       .get_voltage = rpm_reg_get_voltage,
+       .set_voltage = rpm_reg_set_voltage,
+
+       .set_load = rpm_reg_set_load,
+};
+
+static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
+       .enable = rpm_reg_enable,
+       .disable = rpm_reg_disable,
+       .is_enabled = rpm_reg_is_enabled,
+       .list_voltage = regulator_list_voltage_linear_range,
 
        .get_voltage = rpm_reg_get_voltage,
        .set_voltage = rpm_reg_set_voltage,
@@ -247,7 +260,7 @@ static const struct regulator_desc pm8941_nldo = {
 static const struct regulator_desc pm8941_lnldo = {
        .fixed_uV = 1740000,
        .n_voltages = 1,
-       .ops = &rpm_smps_ldo_ops,
+       .ops = &rpm_smps_ldo_ops_fixed,
 };
 
 static const struct regulator_desc pm8941_switch = {
index 572816e30095aebac1025e5a97343c4272cde2d5..c139890c1514401b38ff02367c67c2475f3d2739 100644 (file)
@@ -94,11 +94,14 @@ static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
                int ramp_delay)
 {
        struct tps51632_chip *tps = rdev_get_drvdata(rdev);
-       int bit = ramp_delay/6000;
+       int bit;
        int ret;
 
-       if (bit)
-               bit--;
+       if (ramp_delay == 0)
+               bit = 0;
+       else
+               bit = DIV_ROUND_UP(ramp_delay, 6000) - 1;
+
        ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit));
        if (ret < 0)
                dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret);
index d4c285688ce98b8222dcc778ee646f6581e48df3..3ddc85e6efd65c845fa3a306c832ad2b1a48ac1d 100644 (file)
@@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
                } else {
                        struct scsi_cmnd *SCp;
 
-                       SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG);
+                       SCp = SDp->current_cmnd;
                        if(unlikely(SCp == NULL)) {
                                sdev_printk(KERN_ERR, SDp,
                                        "no saved request for untagged cmd\n");
@@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
                       slot->tag, slot);
        } else {
                slot->tag = SCSI_NO_TAG;
-               /* must populate current_cmnd for scsi_host_find_tag to work */
+               /* save current command for reselection */
                SCp->device->current_cmnd = SCp;
        }
        /* sanity check: some of the commands generated by the mid-layer
index 3408578b08d6adb23fe38c6a8c84c066236bc4ad..ff41c310c900a5760bf33423a5db4362c5dfc25b 100644 (file)
@@ -230,6 +230,7 @@ static struct {
        {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
        {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
        {"Promise", "", NULL, BLIST_SPARSELUN},
+       {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES},
        {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
        {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
        {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
index a8b610eaa0caa48bb83a36ccb0c514f6dc217d2d..106a6adbd6f12d0a86906239345ec3423eab88cf 100644 (file)
@@ -1128,7 +1128,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
  */
 void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
 {
-       scmd->device->host->host_failed--;
        scmd->eh_eflags = 0;
        list_move_tail(&scmd->eh_entry, done_q);
 }
@@ -2227,6 +2226,9 @@ int scsi_error_handler(void *data)
                else
                        scsi_unjam_host(shost);
 
+               /* All scmds have been handled */
+               shost->host_failed = 0;
+
                /*
                 * Note - if the above fails completely, the action is to take
                 * individual devices offline and flush the queue of any
index f459dff30512efefabac12c33f18d34f5b46c541..60bff78e9ead8703e1e912307c504330d1a76d1f 100644 (file)
@@ -2867,10 +2867,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
        if (sdkp->opt_xfer_blocks &&
            sdkp->opt_xfer_blocks <= dev_max &&
            sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
-           sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
-               rw_max = q->limits.io_opt =
-                       sdkp->opt_xfer_blocks * sdp->sector_size;
-       else
+           logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
+               q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+               rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+       else
                rw_max = BLK_DEF_MAX_SECTORS;
 
        /* Combine with controller limits */
index 654630bb7d0edeb48438654e47d8a60151d2c87b..765a6f1ac1b7320edc860b1eb6baa30e7e1beac4 100644 (file)
@@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
        return blocks << (ilog2(sdev->sector_size) - 9);
 }
 
+static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
+{
+       return blocks * sdev->sector_size;
+}
+
 /*
  * A DIF-capable target device can be formatted with different
  * protection schemes.  Currently 0 through 3 are defined:
index bbfee53cfcf50296e52a7849b4f0964145db1b13..845e49a52430db2f091ed4ff7bbeeed8f52efad7 100644 (file)
@@ -2521,12 +2521,13 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
        return 0;
 
  failed:
-       if (ni)
+       if (ni) {
                lnet_ni_decref(ni);
+               rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
+               rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
+       }
 
        rej.ibr_version             = version;
-       rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
-       rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
        kiblnd_reject(cmid, &rej);
 
        return -ECONNREFUSED;
index c17870cddb5b10e9077623b0887ffe7c41105902..fbce1f7e68caf5c3517179aa01bfbb16265f8c42 100644 (file)
@@ -102,7 +102,7 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8  *pbuf)
        if (!efuseTbl)
                return;
 
-       eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(*eFuseWord));
+       eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
        if (!eFuseWord) {
                DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
                goto eFuseWord_failed;
index 87ea3b8449518e6a1e508a8434a139588876da51..363f3a34ddce48abb4ccac7c7a09edbe45107c48 100644 (file)
@@ -2072,7 +2072,8 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt)
 {
        struct hal_ops  *halfunc = &adapt->HalFunc;
 
-       adapt->HalData = kzalloc(sizeof(*adapt->HalData), GFP_KERNEL);
+
+       adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
        if (!adapt->HalData)
                DBG_88E("cant not alloc memory for HAL DATA\n");
 
index 6ceac4f2d4b227d52045632992568d08ea158c5b..5b4b47ed948ba2735870e78e50fe860c14e7d4a7 100644 (file)
@@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np,
                goto free_power_table;
        }
 
-       snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
-                cpufreq_dev->id);
-
-       cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
-                                                     &cpufreq_cooling_ops);
-       if (IS_ERR(cool_dev))
-               goto remove_idr;
-
        /* Fill freq-table in descending order of frequencies */
        for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
                freq = find_next_max(table, freq);
@@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np,
                        pr_debug("%s: freq:%u KHz\n", __func__, freq);
        }
 
+       snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
+                cpufreq_dev->id);
+
+       cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
+                                                     &cpufreq_cooling_ops);
+       if (IS_ERR(cool_dev))
+               goto remove_idr;
+
        cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
        cpufreq_dev->cool_dev = cool_dev;
 
index 6dc810bce295ab22dbb10b47755dddb421f96957..944a6dca0fcb50298ce1b0c21f9f7cd36c1707ca 100644 (file)
@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Creative SB Audigy 2 NX */
        { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* USB3503 */
+       { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Microsoft Wireless Laser Mouse 6000 Receiver */
        { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -173,6 +176,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* MAYA44USB sound device */
        { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* ASUS Base Station(T100) */
+       { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
+                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
        /* Action Semiconductor flash disk */
        { USB_DEVICE(0x10d6, 0x2200), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
@@ -188,26 +195,22 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1908, 0x1315), .driver_info =
                        USB_QUIRK_HONOR_BNUMINTERFACES },
 
-       /* INTEL VALUE SSD */
-       { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
-
-       /* USB3503 */
-       { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
-
-       /* ASUS Base Station(T100) */
-       { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
-                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
-
        /* Protocol and OTG Electrical Test Device */
        { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
                        USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
 
+       /* Acer C120 LED Projector */
+       { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Blackmagic Design Intensity Shuttle */
        { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
 
        /* Blackmagic Design UltraStudio SDI */
        { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
 
+       /* INTEL VALUE SSD */
+       { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+
        { }  /* terminating entry must be last */
 };
 
index 3c58d633ce8057a7941ba521e568e21f295bbfca..dec0b21fc62687d700ffc3b040ec16a70a530710 100644 (file)
        DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt),                \
                                dev_name(hsotg->dev), ##__VA_ARGS__)
 
+#ifdef CONFIG_MIPS
+/*
+ * There are some MIPS machines that can run in either big-endian
+ * or little-endian mode and that use the dwc2 register without
+ * a byteswap in both ways.
+ * Unlike other architectures, MIPS apparently does not require a
+ * barrier before the __raw_writel() to synchronize with DMA but does
+ * require the barrier after the __raw_writel() to serialize a set of
+ * writes. This set of operations was added specifically for MIPS and
+ * should only be used there.
+ */
 static inline u32 dwc2_readl(const void __iomem *addr)
 {
        u32 value = __raw_readl(addr);
@@ -90,6 +101,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr)
        pr_info("INFO:: wrote %08x to %p\n", value, addr);
 #endif
 }
+#else
+/* Normal architectures just use readl/write */
+static inline u32 dwc2_readl(const void __iomem *addr)
+{
+       return readl(addr);
+}
+
+static inline void dwc2_writel(u32 value, void __iomem *addr)
+{
+       writel(value, addr);
+
+#ifdef DWC2_LOG_WRITES
+       pr_info("info:: wrote %08x to %p\n", value, addr);
+#endif
+}
+#endif
 
 /* Maximum number of Endpoints/HostChannels */
 #define MAX_EPS_CHANNELS       16
index 4c5e3005e1dc780c1db4632c7f3356398e1c544b..26cf09d0fe3ce330ed65ac8dfa3dc4c38d809769 100644 (file)
@@ -1018,7 +1018,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
        return 1;
 }
 
-static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value);
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
 
 /**
  * get_ep_head - return the first request on the endpoint
@@ -1094,7 +1094,7 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
                case USB_ENDPOINT_HALT:
                        halted = ep->halted;
 
-                       dwc2_hsotg_ep_sethalt(&ep->ep, set);
+                       dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
 
                        ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
                        if (ret) {
@@ -2948,8 +2948,13 @@ static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
  * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
  * @ep: The endpoint to set halt.
  * @value: Set or unset the halt.
+ * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
+ *       the endpoint is busy processing requests.
+ *
+ * We need to stall the endpoint immediately if request comes from set_feature
+ * protocol command handler.
  */
-static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value)
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
 {
        struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
        struct dwc2_hsotg *hs = hs_ep->parent;
@@ -2969,6 +2974,17 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value)
                return 0;
        }
 
+       if (hs_ep->isochronous) {
+               dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
+               return -EINVAL;
+       }
+
+       if (!now && value && !list_empty(&hs_ep->queue)) {
+               dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
+                       ep->name);
+               return -EAGAIN;
+       }
+
        if (hs_ep->dir_in) {
                epreg = DIEPCTL(index);
                epctl = dwc2_readl(hs->regs + epreg);
@@ -3020,7 +3036,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
        int ret = 0;
 
        spin_lock_irqsave(&hs->lock, flags);
-       ret = dwc2_hsotg_ep_sethalt(ep, value);
+       ret = dwc2_hsotg_ep_sethalt(ep, value, false);
        spin_unlock_irqrestore(&hs->lock, flags);
 
        return ret;
index 7ddf9449a06302ed7c72bb692ed4e5cc52bd40a5..654050684f4fadb4004587edcb653ca03c0d4f3f 100644 (file)
 #define DWC3_DEPCMD_GET_RSC_IDX(x)     (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
 #define DWC3_DEPCMD_STATUS(x)          (((x) >> 12) & 0x0F)
 #define DWC3_DEPCMD_HIPRI_FORCERM      (1 << 11)
+#define DWC3_DEPCMD_CLEARPENDIN                (1 << 11)
 #define DWC3_DEPCMD_CMDACT             (1 << 10)
 #define DWC3_DEPCMD_CMDIOC             (1 << 8)
 
index dd5cb5577dcaf62d411fb9776e47a23d4a4dacf7..2f1fb7e7aa548f831fb9cfa63d24513c292d15b4 100644 (file)
@@ -128,12 +128,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, exynos);
 
-       ret = dwc3_exynos_register_phys(exynos);
-       if (ret) {
-               dev_err(dev, "couldn't register PHYs\n");
-               return ret;
-       }
-
        exynos->dev     = dev;
 
        exynos->clk = devm_clk_get(dev, "usbdrd30");
@@ -183,20 +177,29 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
                goto err3;
        }
 
+       ret = dwc3_exynos_register_phys(exynos);
+       if (ret) {
+               dev_err(dev, "couldn't register PHYs\n");
+               goto err4;
+       }
+
        if (node) {
                ret = of_platform_populate(node, NULL, NULL, dev);
                if (ret) {
                        dev_err(dev, "failed to add dwc3 core\n");
-                       goto err4;
+                       goto err5;
                }
        } else {
                dev_err(dev, "no device node, failed to add dwc3 core\n");
                ret = -ENODEV;
-               goto err4;
+               goto err5;
        }
 
        return 0;
 
+err5:
+       platform_device_unregister(exynos->usb2_phy);
+       platform_device_unregister(exynos->usb3_phy);
 err4:
        regulator_disable(exynos->vdd10);
 err3:
index 5c0adb9c6fb27baa778615f17705ac6ab7896079..50d6ae6f88bc336a8631ce76f9c45c8de077b481 100644 (file)
@@ -129,12 +129,18 @@ static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data)
        switch (dwc3_data->dr_mode) {
        case USB_DR_MODE_PERIPHERAL:
 
-               val &= ~(USB3_FORCE_VBUSVALID | USB3_DELAY_VBUSVALID
+               val &= ~(USB3_DELAY_VBUSVALID
                        | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3)
                        | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2
                        | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
 
-               val |= USB3_DEVICE_NOT_HOST;
+               /*
+                * USB3_PORT2_FORCE_VBUSVALID When '1' and when
+                * USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input
+                * of the pico PHY to 1.
+                */
+
+               val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID;
                break;
 
        case USB_DR_MODE_HOST:
index 9a7d0bd15dc311c2a697678f05a7cc67ca3ef2ad..07248ff1be5c150b04c2d0f6297290489c94c205 100644 (file)
@@ -347,6 +347,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
        return ret;
 }
 
+static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
+{
+       struct dwc3 *dwc = dep->dwc;
+       struct dwc3_gadget_ep_cmd_params params;
+       u32 cmd = DWC3_DEPCMD_CLEARSTALL;
+
+       /*
+        * As of core revision 2.60a the recommended programming model
+        * is to set the ClearPendIN bit when issuing a Clear Stall EP
+        * command for IN endpoints. This is to prevent an issue where
+        * some (non-compliant) hosts may not send ACK TPs for pending
+        * IN transfers due to a mishandled error condition. Synopsys
+        * STAR 9000614252.
+        */
+       if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
+               cmd |= DWC3_DEPCMD_CLEARPENDIN;
+
+       memset(&params, 0, sizeof(params));
+
+       return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+}
+
 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
                struct dwc3_trb *trb)
 {
@@ -1314,8 +1336,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
                else
                        dep->flags |= DWC3_EP_STALL;
        } else {
-               ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
-                       DWC3_DEPCMD_CLEARSTALL, &params);
+               ret = dwc3_send_clear_stall_ep_cmd(dep);
                if (ret)
                        dev_err(dwc->dev, "failed to clear STALL on %s\n",
                                        dep->name);
@@ -2247,7 +2268,6 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
 
        for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
                struct dwc3_ep *dep;
-               struct dwc3_gadget_ep_cmd_params params;
                int ret;
 
                dep = dwc->eps[epnum];
@@ -2259,9 +2279,7 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
 
                dep->flags &= ~DWC3_EP_STALL;
 
-               memset(&params, 0, sizeof(params));
-               ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
-                               DWC3_DEPCMD_CLEARSTALL, &params);
+               ret = dwc3_send_clear_stall_ep_cmd(dep);
                WARN_ON_ONCE(ret);
        }
 }
index d67de0d22a2be52b853f734e16bed29f27041bad..eb648485a58c571001c4e31b7c79aadaaee04445 100644 (file)
@@ -1868,14 +1868,19 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                                }
                                break;
                        }
-                       req->length = value;
-                       req->context = cdev;
-                       req->zero = value < w_length;
-                       value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
-                       if (value < 0) {
-                               DBG(cdev, "ep_queue --> %d\n", value);
-                               req->status = 0;
-                               composite_setup_complete(gadget->ep0, req);
+
+                       if (value >= 0) {
+                               req->length = value;
+                               req->context = cdev;
+                               req->zero = value < w_length;
+                               value = composite_ep0_queue(cdev, req,
+                                                           GFP_ATOMIC);
+                               if (value < 0) {
+                                       DBG(cdev, "ep_queue --> %d\n", value);
+                                       req->status = 0;
+                                       composite_setup_complete(gadget->ep0,
+                                                                req);
+                               }
                        }
                        return value;
                }
index b6f60ca8a0357be74845d78dcb04b7ca1d7dda98..70cf3477f951a11931032c2c73d59ace5b7a1163 100644 (file)
@@ -1401,6 +1401,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
                .owner          = THIS_MODULE,
                .name           = "configfs-gadget",
        },
+       .match_existing_only = 1,
 };
 
 static struct config_group *gadgets_make(
index 73515d54e1cc1138cdb2ab5c74d480cc780dfd29..cc33d26674080a1359ed7be97410bb640453b469 100644 (file)
@@ -2051,7 +2051,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
 
                if (len < sizeof(*d) ||
                    d->bFirstInterfaceNumber >= ffs->interfaces_count ||
-                   d->Reserved1)
+                   !d->Reserved1)
                        return -EINVAL;
                for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
                        if (d->Reserved2[i])
@@ -2729,6 +2729,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
                func->ffs->ss_descs_count;
 
        int fs_len, hs_len, ss_len, ret, i;
+       struct ffs_ep *eps_ptr;
 
        /* Make it a single chunk, less management later on */
        vla_group(d);
@@ -2777,12 +2778,9 @@ static int _ffs_func_bind(struct usb_configuration *c,
               ffs->raw_descs_length);
 
        memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
-       for (ret = ffs->eps_count; ret; --ret) {
-               struct ffs_ep *ptr;
-
-               ptr = vla_ptr(vlabuf, d, eps);
-               ptr[ret].num = -1;
-       }
+       eps_ptr = vla_ptr(vlabuf, d, eps);
+       for (i = 0; i < ffs->eps_count; i++)
+               eps_ptr[i].num = -1;
 
        /* Save pointers
         * d_eps == vlabuf, func->eps used to kfree vlabuf later
@@ -2851,7 +2849,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
                goto error;
 
        func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
-       if (c->cdev->use_os_string)
+       if (c->cdev->use_os_string) {
                for (i = 0; i < ffs->interfaces_count; ++i) {
                        struct usb_os_desc *desc;
 
@@ -2862,13 +2860,15 @@ static int _ffs_func_bind(struct usb_configuration *c,
                                vla_ptr(vlabuf, d, ext_compat) + i * 16;
                        INIT_LIST_HEAD(&desc->ext_prop);
                }
-       ret = ffs_do_os_descs(ffs->ms_os_descs_count,
-                             vla_ptr(vlabuf, d, raw_descs) +
-                             fs_len + hs_len + ss_len,
-                             d_raw_descs__sz - fs_len - hs_len - ss_len,
-                             __ffs_func_bind_do_os_desc, func);
-       if (unlikely(ret < 0))
-               goto error;
+               ret = ffs_do_os_descs(ffs->ms_os_descs_count,
+                                     vla_ptr(vlabuf, d, raw_descs) +
+                                     fs_len + hs_len + ss_len,
+                                     d_raw_descs__sz - fs_len - hs_len -
+                                     ss_len,
+                                     __ffs_func_bind_do_os_desc, func);
+               if (unlikely(ret < 0))
+                       goto error;
+       }
        func->function.os_desc_n =
                c->cdev->use_os_string ? ffs->interfaces_count : 0;
 
index c45104e3a64bfe147e93b4055ef7ad7d21b1cd2a..64706a789580dd3bb1641b151d15f0b0831c43ed 100644 (file)
@@ -161,14 +161,6 @@ static struct usb_endpoint_descriptor hs_ep_out_desc = {
        .wMaxPacketSize =       cpu_to_le16(512)
 };
 
-static struct usb_qualifier_descriptor dev_qualifier = {
-       .bLength =              sizeof(dev_qualifier),
-       .bDescriptorType =      USB_DT_DEVICE_QUALIFIER,
-       .bcdUSB =               cpu_to_le16(0x0200),
-       .bDeviceClass =         USB_CLASS_PRINTER,
-       .bNumConfigurations =   1
-};
-
 static struct usb_descriptor_header *hs_printer_function[] = {
        (struct usb_descriptor_header *) &intf_desc,
        (struct usb_descriptor_header *) &hs_ep_in_desc,
index 35fe3c80cfc09a432cd4793a9ea125781bf4525f..197f73386fac9ab45473de09db462ab5e4640d90 100644 (file)
@@ -1445,16 +1445,18 @@ static void usbg_drop_tpg(struct se_portal_group *se_tpg)
        for (i = 0; i < TPG_INSTANCES; ++i)
                if (tpg_instances[i].tpg == tpg)
                        break;
-       if (i < TPG_INSTANCES)
+       if (i < TPG_INSTANCES) {
                tpg_instances[i].tpg = NULL;
-       opts = container_of(tpg_instances[i].func_inst,
-               struct f_tcm_opts, func_inst);
-       mutex_lock(&opts->dep_lock);
-       if (opts->has_dep)
-               module_put(opts->dependent);
-       else
-               configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item);
-       mutex_unlock(&opts->dep_lock);
+               opts = container_of(tpg_instances[i].func_inst,
+                       struct f_tcm_opts, func_inst);
+               mutex_lock(&opts->dep_lock);
+               if (opts->has_dep)
+                       module_put(opts->dependent);
+               else
+                       configfs_undepend_item_unlocked(
+                               &opts->func_inst.group.cg_item);
+               mutex_unlock(&opts->dep_lock);
+       }
        mutex_unlock(&tpg_instances_lock);
 
        kfree(tpg);
index 186d4b1625248462ec51e099ef9677d2dacc788d..cd214ec8a601b4913b67f7d67e52fb8c57e003c4 100644 (file)
@@ -598,18 +598,6 @@ static struct usb_gadget_strings *fn_strings[] = {
        NULL,
 };
 
-static struct usb_qualifier_descriptor devqual_desc = {
-       .bLength = sizeof devqual_desc,
-       .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
-
-       .bcdUSB = cpu_to_le16(0x200),
-       .bDeviceClass = USB_CLASS_MISC,
-       .bDeviceSubClass = 0x02,
-       .bDeviceProtocol = 0x01,
-       .bNumConfigurations = 1,
-       .bRESERVED = 0,
-};
-
 static struct usb_interface_assoc_descriptor iad_desc = {
        .bLength = sizeof iad_desc,
        .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
@@ -1292,6 +1280,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 
        if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
                struct cntrl_cur_lay3 c;
+               memset(&c, 0, sizeof(struct cntrl_cur_lay3));
 
                if (entity_id == USB_IN_CLK_ID)
                        c.dCUR = p_srate;
index d62683017cf3c1c052f78eb243799e1ca46ca8d5..990df221c6299a2cd9d2ecb9aab54fda5447560e 100644 (file)
@@ -83,9 +83,7 @@ EXPORT_SYMBOL_GPL(fsg_fs_function);
  * USB 2.0 devices need to expose both high speed and full speed
  * descriptors, unless they only run at full speed.
  *
- * That means alternate endpoint descriptors (bigger packets)
- * and a "device qualifier" ... plus more construction options
- * for the configuration descriptor.
+ * That means alternate endpoint descriptors (bigger packets).
  */
 struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
index e64479f882a52e787822b865eb447e8c1e484f9a..aa3707bdebb4ad7fc2d56f50bbe80ab7093cf878 100644 (file)
@@ -938,8 +938,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
                        struct usb_ep           *ep = dev->gadget->ep0;
                        struct usb_request      *req = dev->req;
 
-                       if ((retval = setup_req (ep, req, 0)) == 0)
-                               retval = usb_ep_queue (ep, req, GFP_ATOMIC);
+                       if ((retval = setup_req (ep, req, 0)) == 0) {
+                               spin_unlock_irq (&dev->lock);
+                               retval = usb_ep_queue (ep, req, GFP_KERNEL);
+                               spin_lock_irq (&dev->lock);
+                       }
                        dev->state = STATE_DEV_CONNECTED;
 
                        /* assume that was SET_CONFIGURATION */
@@ -1457,8 +1460,11 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                                                        w_length);
                                if (value < 0)
                                        break;
+
+                               spin_unlock (&dev->lock);
                                value = usb_ep_queue (gadget->ep0, dev->req,
-                                                       GFP_ATOMIC);
+                                                       GFP_KERNEL);
+                               spin_lock (&dev->lock);
                                if (value < 0) {
                                        clean_req (gadget->ep0, dev->req);
                                        break;
@@ -1481,11 +1487,14 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
        if (value >= 0 && dev->state != STATE_DEV_SETUP) {
                req->length = value;
                req->zero = value < w_length;
-               value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
+
+               spin_unlock (&dev->lock);
+               value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
                if (value < 0) {
                        DBG (dev, "ep_queue --> %d\n", value);
                        req->status = 0;
                }
+               return value;
        }
 
        /* device stalls when value < 0 */
index 6e8300d6a7372043ded9916c540fbd2d4c6b05fd..e1b2dcebdc2ecb741e850355a546325f9b3121ff 100644 (file)
@@ -603,11 +603,15 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
                }
        }
 
-       list_add_tail(&driver->pending, &gadget_driver_pending_list);
-       pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
-               driver->function);
+       if (!driver->match_existing_only) {
+               list_add_tail(&driver->pending, &gadget_driver_pending_list);
+               pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
+                       driver->function);
+               ret = 0;
+       }
+
        mutex_unlock(&udc_lock);
-       return 0;
+       return ret;
 found:
        ret = udc_bind_to_driver(udc, driver);
        mutex_unlock(&udc_lock);
index ae1b6e69eb965b88dd13a3934d026af25d27c808..a962b89b65a665df737e8ebd4f48646064805334 100644 (file)
@@ -368,6 +368,15 @@ static void ehci_shutdown(struct usb_hcd *hcd)
 {
        struct ehci_hcd *ehci = hcd_to_ehci(hcd);
 
+       /**
+        * Protect the system from crashing at system shutdown in cases where
+        * usb host is not added yet from OTG controller driver.
+        * As ehci_setup() not done yet, so stop accessing registers or
+        * variables initialized in ehci_setup()
+        */
+       if (!ehci->sbrn)
+               return;
+
        spin_lock_irq(&ehci->lock);
        ehci->shutdown = true;
        ehci->rh_state = EHCI_RH_STOPPING;
index ffc90295a95f284e7e234f121429b5f957dedc07..74f62d68f01362d144e8de21bd188c06e31a7945 100644 (file)
@@ -872,14 +872,22 @@ int ehci_hub_control(
 ) {
        struct ehci_hcd *ehci = hcd_to_ehci (hcd);
        int             ports = HCS_N_PORTS (ehci->hcs_params);
-       u32 __iomem     *status_reg = &ehci->regs->port_status[
-                               (wIndex & 0xff) - 1];
-       u32 __iomem     *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1];
+       u32 __iomem     *status_reg, *hostpc_reg;
        u32             temp, temp1, status;
        unsigned long   flags;
        int             retval = 0;
        unsigned        selector;
 
+       /*
+        * Avoid underflow while calculating (wIndex & 0xff) - 1.
+        * The compiler might deduce that wIndex can never be 0 and then
+        * optimize away the tests for !wIndex below.
+        */
+       temp = wIndex & 0xff;
+       temp -= (temp > 0);
+       status_reg = &ehci->regs->port_status[temp];
+       hostpc_reg = &ehci->regs->hostpc[temp];
+
        /*
         * FIXME:  support SetPortFeatures USB_PORT_FEAT_INDICATOR.
         * HCS_INDICATOR may say we can change LEDs to off/amber/green.
index d3afc89d00f533089a207662bff530e9672d6c2e..2f8d3af811ce0689b15eb1f19b98edd092d61198 100644 (file)
@@ -179,22 +179,32 @@ static int ehci_msm_remove(struct platform_device *pdev)
 static int ehci_msm_pm_suspend(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
+       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
        bool do_wakeup = device_may_wakeup(dev);
 
        dev_dbg(dev, "ehci-msm PM suspend\n");
 
-       return ehci_suspend(hcd, do_wakeup);
+       /* Only call ehci_suspend if ehci_setup has been done */
+       if (ehci->sbrn)
+               return ehci_suspend(hcd, do_wakeup);
+
+       return 0;
 }
 
 static int ehci_msm_pm_resume(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
+       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
 
        dev_dbg(dev, "ehci-msm PM resume\n");
-       ehci_resume(hcd, false);
+
+       /* Only call ehci_resume if ehci_setup has been done */
+       if (ehci->sbrn)
+               ehci_resume(hcd, false);
 
        return 0;
 }
+
 #else
 #define ehci_msm_pm_suspend    NULL
 #define ehci_msm_pm_resume     NULL
index 4031b372008ef06c4f6f29c34b3f3399f736d00b..9a3d7db5be57cb001ac982516e0e5d847c14409a 100644 (file)
@@ -81,15 +81,23 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
        struct usb_hcd *hcd = platform_get_drvdata(pdev);
        struct tegra_ehci_hcd *tegra =
                (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+       bool has_utmi_pad_registers = false;
 
        phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
        if (!phy_np)
                return -ENOENT;
 
+       if (of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers"))
+               has_utmi_pad_registers = true;
+
        if (!usb1_reset_attempted) {
                struct reset_control *usb1_reset;
 
-               usb1_reset = of_reset_control_get(phy_np, "usb");
+               if (!has_utmi_pad_registers)
+                       usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
+               else
+                       usb1_reset = tegra->rst;
+
                if (IS_ERR(usb1_reset)) {
                        dev_warn(&pdev->dev,
                                 "can't get utmi-pads reset from the PHY\n");
@@ -99,13 +107,15 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
                        reset_control_assert(usb1_reset);
                        udelay(1);
                        reset_control_deassert(usb1_reset);
+
+                       if (!has_utmi_pad_registers)
+                               reset_control_put(usb1_reset);
                }
 
-               reset_control_put(usb1_reset);
                usb1_reset_attempted = true;
        }
 
-       if (!of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) {
+       if (!has_utmi_pad_registers) {
                reset_control_assert(tegra->rst);
                udelay(1);
                reset_control_deassert(tegra->rst);
index d029bbe9eb36a884fed45fd6f648bfca51199d65..641fed609911578ca8481ebb7a830f7ae418a924 100644 (file)
@@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
 {
        int     branch;
 
-       ed->state = ED_OPER;
        ed->ed_prev = NULL;
        ed->ed_next = NULL;
        ed->hwNextED = 0;
@@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
        /* the HC may not see the schedule updates yet, but if it does
         * then they'll be properly ordered.
         */
+
+       ed->state = ED_OPER;
        return 0;
 }
 
index 48672fac7ff3eb22389acfb4889a513e8ac79948..c10972fcc8e489fe07598c9cc036574c926a4a4b 100644 (file)
@@ -37,6 +37,7 @@
 /* Device for a quirk */
 #define PCI_VENDOR_ID_FRESCO_LOGIC     0x1b73
 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
+#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009      0x1009
 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400      0x1400
 
 #define PCI_VENDOR_ID_ETRON            0x1b6f
@@ -114,6 +115,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_TRUST_TX_LENGTH;
        }
 
+       if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+                       pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
+               xhci->quirks |= XHCI_BROKEN_STREAMS;
+
        if (pdev->vendor == PCI_VENDOR_ID_NEC)
                xhci->quirks |= XHCI_NEC_HOST;
 
index 676ea458148b8e87631c317b0356a381ccc04e31..1f3f981fe7f8182b4b4b1707aa4d5f439e556fe6 100644 (file)
@@ -196,6 +196,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
                ret = clk_prepare_enable(clk);
                if (ret)
                        goto put_hcd;
+       } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
+               goto put_hcd;
        }
 
        xhci = hcd_to_xhci(hcd);
index 52deae4b7eac2567201f0277d27cfd89c1b01499..d7d502578d799a59c735d8a8ce777f1f6157498d 100644 (file)
@@ -290,6 +290,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
 
        temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
        xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
+
+       /*
+        * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
+        * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
+        * but the completion event in never sent. Use the cmd timeout timer to
+        * handle those cases. Use twice the time to cover the bit polling retry
+        */
+       mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
        xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
                        &xhci->op_regs->cmd_ring);
 
@@ -314,6 +322,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
 
                xhci_err(xhci, "Stopped the command ring failed, "
                                "maybe the host is dead\n");
+               del_timer(&xhci->cmd_timer);
                xhci->xhc_state |= XHCI_STATE_DYING;
                xhci_quiesce(xhci);
                xhci_halt(xhci);
@@ -1246,22 +1255,21 @@ void xhci_handle_command_timeout(unsigned long data)
        int ret;
        unsigned long flags;
        u64 hw_ring_state;
-       struct xhci_command *cur_cmd = NULL;
+       bool second_timeout = false;
        xhci = (struct xhci_hcd *) data;
 
        /* mark this command to be cancelled */
        spin_lock_irqsave(&xhci->lock, flags);
        if (xhci->current_cmd) {
-               cur_cmd = xhci->current_cmd;
-               cur_cmd->status = COMP_CMD_ABORT;
+               if (xhci->current_cmd->status == COMP_CMD_ABORT)
+                       second_timeout = true;
+               xhci->current_cmd->status = COMP_CMD_ABORT;
        }
 
-
        /* Make sure command ring is running before aborting it */
        hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
        if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
            (hw_ring_state & CMD_RING_RUNNING))  {
-
                spin_unlock_irqrestore(&xhci->lock, flags);
                xhci_dbg(xhci, "Command timeout\n");
                ret = xhci_abort_cmd_ring(xhci);
@@ -1273,6 +1281,15 @@ void xhci_handle_command_timeout(unsigned long data)
                }
                return;
        }
+
+       /* command ring failed to restart, or host removed. Bail out */
+       if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
+               xhci_cleanup_command_queue(xhci);
+               return;
+       }
+
        /* command timeout on stopped ring, ring can't be aborted */
        xhci_dbg(xhci, "Command timeout on stopped ring\n");
        xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
@@ -2721,7 +2738,8 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
                writel(irq_pending, &xhci->ir_set->irq_pending);
        }
 
-       if (xhci->xhc_state & XHCI_STATE_DYING) {
+       if (xhci->xhc_state & XHCI_STATE_DYING ||
+           xhci->xhc_state & XHCI_STATE_HALTED) {
                xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
                                "Shouldn't IRQs be disabled?\n");
                /* Clear the event handler busy flag (RW1C);
index fa7e1ef36cd943648b606fa18da18943b3ad7cdc..f2f9518c53ab437548133349101983389c8de78b 100644 (file)
@@ -685,20 +685,23 @@ void xhci_stop(struct usb_hcd *hcd)
        u32 temp;
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
-       if (xhci->xhc_state & XHCI_STATE_HALTED)
-               return;
-
        mutex_lock(&xhci->mutex);
-       spin_lock_irq(&xhci->lock);
-       xhci->xhc_state |= XHCI_STATE_HALTED;
-       xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
 
-       /* Make sure the xHC is halted for a USB3 roothub
-        * (xhci_stop() could be called as part of failed init).
-        */
-       xhci_halt(xhci);
-       xhci_reset(xhci);
-       spin_unlock_irq(&xhci->lock);
+       if (!(xhci->xhc_state & XHCI_STATE_HALTED)) {
+               spin_lock_irq(&xhci->lock);
+
+               xhci->xhc_state |= XHCI_STATE_HALTED;
+               xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+               xhci_halt(xhci);
+               xhci_reset(xhci);
+
+               spin_unlock_irq(&xhci->lock);
+       }
+
+       if (!usb_hcd_is_primary_hcd(hcd)) {
+               mutex_unlock(&xhci->mutex);
+               return;
+       }
 
        xhci_cleanup_msix(xhci);
 
@@ -4886,7 +4889,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
        xhci_print_registers(xhci);
 
-       xhci->quirks = quirks;
+       xhci->quirks |= quirks;
 
        get_quirks(dev, xhci);
 
index 39fd95833eb8317b63f4f1e1a781f611bfcda5cd..f824336def5c558de1362f9dead14880ba2fb88f 100644 (file)
@@ -1090,29 +1090,6 @@ void musb_stop(struct musb *musb)
        musb_platform_try_idle(musb, 0);
 }
 
-static void musb_shutdown(struct platform_device *pdev)
-{
-       struct musb     *musb = dev_to_musb(&pdev->dev);
-       unsigned long   flags;
-
-       pm_runtime_get_sync(musb->controller);
-
-       musb_host_cleanup(musb);
-       musb_gadget_cleanup(musb);
-
-       spin_lock_irqsave(&musb->lock, flags);
-       musb_platform_disable(musb);
-       musb_generic_disable(musb);
-       spin_unlock_irqrestore(&musb->lock, flags);
-
-       musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
-       musb_platform_exit(musb);
-
-       pm_runtime_put(musb->controller);
-       /* FIXME power down */
-}
-
-
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -1702,7 +1679,7 @@ EXPORT_SYMBOL_GPL(musb_dma_completion);
 #define use_dma                        0
 #endif
 
-static void (*musb_phy_callback)(enum musb_vbus_id_status status);
+static int (*musb_phy_callback)(enum musb_vbus_id_status status);
 
 /*
  * musb_mailbox - optional phy notifier function
@@ -1711,11 +1688,12 @@ static void (*musb_phy_callback)(enum musb_vbus_id_status status);
  * Optionally gets called from the USB PHY. Note that the USB PHY must be
  * disabled at the point the phy_callback is registered or unregistered.
  */
-void musb_mailbox(enum musb_vbus_id_status status)
+int musb_mailbox(enum musb_vbus_id_status status)
 {
        if (musb_phy_callback)
-               musb_phy_callback(status);
+               return musb_phy_callback(status);
 
+       return -ENODEV;
 };
 EXPORT_SYMBOL_GPL(musb_mailbox);
 
@@ -2028,11 +2006,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        musb_readl = musb_default_readl;
        musb_writel = musb_default_writel;
 
-       /* We need musb_read/write functions initialized for PM */
-       pm_runtime_use_autosuspend(musb->controller);
-       pm_runtime_set_autosuspend_delay(musb->controller, 200);
-       pm_runtime_enable(musb->controller);
-
        /* The musb_platform_init() call:
         *   - adjusts musb->mregs
         *   - sets the musb->isr
@@ -2134,6 +2107,16 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        if (musb->ops->phy_callback)
                musb_phy_callback = musb->ops->phy_callback;
 
+       /*
+        * We need musb_read/write functions initialized for PM.
+        * Note that at least 2430 glue needs autosuspend delay
+        * somewhere above 300 ms for the hardware to idle properly
+        * after disconnecting the cable in host mode. Let's use
+        * 500 ms for some margin.
+        */
+       pm_runtime_use_autosuspend(musb->controller);
+       pm_runtime_set_autosuspend_delay(musb->controller, 500);
+       pm_runtime_enable(musb->controller);
        pm_runtime_get_sync(musb->controller);
 
        status = usb_phy_init(musb->xceiv);
@@ -2237,13 +2220,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        if (status)
                goto fail5;
 
-       pm_runtime_put(musb->controller);
-
-       /*
-        * For why this is currently needed, see commit 3e43a0725637
-        * ("usb: musb: core: add pm_runtime_irq_safe()")
-        */
-       pm_runtime_irq_safe(musb->controller);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 
        return 0;
 
@@ -2265,7 +2243,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        usb_phy_shutdown(musb->xceiv);
 
 err_usb_phy_init:
+       pm_runtime_dont_use_autosuspend(musb->controller);
        pm_runtime_put_sync(musb->controller);
+       pm_runtime_disable(musb->controller);
 
 fail2:
        if (musb->irq_wake)
@@ -2273,7 +2253,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        musb_platform_exit(musb);
 
 fail1:
-       pm_runtime_disable(musb->controller);
        dev_err(musb->controller,
                "musb_init_controller failed with status %d\n", status);
 
@@ -2312,6 +2291,7 @@ static int musb_remove(struct platform_device *pdev)
 {
        struct device   *dev = &pdev->dev;
        struct musb     *musb = dev_to_musb(dev);
+       unsigned long   flags;
 
        /* this gets called on rmmod.
         *  - Host mode: host may still be active
@@ -2319,17 +2299,26 @@ static int musb_remove(struct platform_device *pdev)
         *  - OTG mode: both roles are deactivated (or never-activated)
         */
        musb_exit_debugfs(musb);
-       musb_shutdown(pdev);
-       musb_phy_callback = NULL;
-
-       if (musb->dma_controller)
-               musb_dma_controller_destroy(musb->dma_controller);
-
-       usb_phy_shutdown(musb->xceiv);
 
        cancel_work_sync(&musb->irq_work);
        cancel_delayed_work_sync(&musb->finish_resume_work);
        cancel_delayed_work_sync(&musb->deassert_reset_work);
+       pm_runtime_get_sync(musb->controller);
+       musb_host_cleanup(musb);
+       musb_gadget_cleanup(musb);
+       spin_lock_irqsave(&musb->lock, flags);
+       musb_platform_disable(musb);
+       musb_generic_disable(musb);
+       spin_unlock_irqrestore(&musb->lock, flags);
+       musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+       pm_runtime_dont_use_autosuspend(musb->controller);
+       pm_runtime_put_sync(musb->controller);
+       pm_runtime_disable(musb->controller);
+       musb_platform_exit(musb);
+       musb_phy_callback = NULL;
+       if (musb->dma_controller)
+               musb_dma_controller_destroy(musb->dma_controller);
+       usb_phy_shutdown(musb->xceiv);
        musb_free(musb);
        device_init_wakeup(dev, 0);
        return 0;
@@ -2429,7 +2418,8 @@ static void musb_restore_context(struct musb *musb)
        musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
        musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
        musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
-       musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
+       if (musb->context.devctl & MUSB_DEVCTL_SESSION)
+               musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
 
        for (i = 0; i < musb->config->num_eps; ++i) {
                struct musb_hw_ep       *hw_ep;
@@ -2612,7 +2602,6 @@ static struct platform_driver musb_driver = {
        },
        .probe          = musb_probe,
        .remove         = musb_remove,
-       .shutdown       = musb_shutdown,
 };
 
 module_platform_driver(musb_driver);
index b6afe9e4330545d6b3bd1d432374d61f7cabbf3f..b55a776b03eb5d37f4bbb3ec337656f3d0fc3c55 100644 (file)
@@ -215,7 +215,7 @@ struct musb_platform_ops {
                                dma_addr_t *dma_addr, u32 *len);
        void    (*pre_root_reset_end)(struct musb *musb);
        void    (*post_root_reset_end)(struct musb *musb);
-       void    (*phy_callback)(enum musb_vbus_id_status status);
+       int     (*phy_callback)(enum musb_vbus_id_status status);
 };
 
 /*
@@ -312,6 +312,7 @@ struct musb {
        struct work_struct      irq_work;
        struct delayed_work     deassert_reset_work;
        struct delayed_work     finish_resume_work;
+       struct delayed_work     gadget_work;
        u16                     hwvers;
 
        u16                     intrrxe;
index 152865b3652295caf52340ca86397d5494f34c84..af2a3a7addf904a736e13d81d95f5caac5630f50 100644 (file)
@@ -1656,6 +1656,20 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
        return usb_phy_set_power(musb->xceiv, mA);
 }
 
+static void musb_gadget_work(struct work_struct *work)
+{
+       struct musb *musb;
+       unsigned long flags;
+
+       musb = container_of(work, struct musb, gadget_work.work);
+       pm_runtime_get_sync(musb->controller);
+       spin_lock_irqsave(&musb->lock, flags);
+       musb_pullup(musb, musb->softconnect);
+       spin_unlock_irqrestore(&musb->lock, flags);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
+}
+
 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
 {
        struct musb     *musb = gadget_to_musb(gadget);
@@ -1663,20 +1677,16 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
 
        is_on = !!is_on;
 
-       pm_runtime_get_sync(musb->controller);
-
        /* NOTE: this assumes we are sensing vbus; we'd rather
         * not pullup unless the B-session is active.
         */
        spin_lock_irqsave(&musb->lock, flags);
        if (is_on != musb->softconnect) {
                musb->softconnect = is_on;
-               musb_pullup(musb, is_on);
+               schedule_delayed_work(&musb->gadget_work, 0);
        }
        spin_unlock_irqrestore(&musb->lock, flags);
 
-       pm_runtime_put(musb->controller);
-
        return 0;
 }
 
@@ -1845,7 +1855,7 @@ int musb_gadget_setup(struct musb *musb)
 #elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
        musb->g.is_otg = 0;
 #endif
-
+       INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
        musb_g_init_endpoints(musb);
 
        musb->is_active = 0;
@@ -1866,6 +1876,8 @@ void musb_gadget_cleanup(struct musb *musb)
 {
        if (musb->port_mode == MUSB_PORT_MODE_HOST)
                return;
+
+       cancel_delayed_work_sync(&musb->gadget_work);
        usb_del_gadget_udc(&musb->g);
 }
 
@@ -1914,8 +1926,8 @@ static int musb_gadget_start(struct usb_gadget *g,
        if (musb->xceiv->last_event == USB_EVENT_ID)
                musb_platform_set_vbus(musb, 1);
 
-       if (musb->xceiv->last_event == USB_EVENT_NONE)
-               pm_runtime_put(musb->controller);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 
        return 0;
 
@@ -1934,8 +1946,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
        struct musb     *musb = gadget_to_musb(g);
        unsigned long   flags;
 
-       if (musb->xceiv->last_event == USB_EVENT_NONE)
-               pm_runtime_get_sync(musb->controller);
+       pm_runtime_get_sync(musb->controller);
 
        /*
         * REVISIT always use otg_set_peripheral() here too;
@@ -1963,7 +1974,8 @@ static int musb_gadget_stop(struct usb_gadget *g)
         * that currently misbehaves.
         */
 
-       pm_runtime_put(musb->controller);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 
        return 0;
 }
index 2f8ad7f1f482cd55116b8cb3a7fd575fc79c360a..d227a71d85e19cffef3a11662bb9a23affa18fb3 100644 (file)
@@ -434,7 +434,13 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
                }
        }
 
-       if (qh != NULL && qh->is_ready) {
+       /*
+        * The pipe must be broken if current urb->status is set, so don't
+        * start next urb.
+        * TODO: to minimize the risk of regression, only check urb->status
+        * for RX, until we have a test case to understand the behavior of TX.
+        */
+       if ((!status || !is_in) && qh && qh->is_ready) {
                dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
                    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
                musb_start_urb(musb, is_in, qh);
@@ -594,14 +600,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
                musb_writew(ep->regs, MUSB_TXCSR, 0);
 
        /* scrub all previous state, clearing toggle */
-       } else {
-               csr = musb_readw(ep->regs, MUSB_RXCSR);
-               if (csr & MUSB_RXCSR_RXPKTRDY)
-                       WARNING("rx%d, packet/%d ready?\n", ep->epnum,
-                               musb_readw(ep->regs, MUSB_RXCOUNT));
-
-               musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
        }
+       csr = musb_readw(ep->regs, MUSB_RXCSR);
+       if (csr & MUSB_RXCSR_RXPKTRDY)
+               WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+                       musb_readw(ep->regs, MUSB_RXCOUNT));
+
+       musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
 
        /* target addr and (for multipoint) hub addr/port */
        if (musb->is_multipoint) {
@@ -627,7 +632,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
        ep->rx_reinit = 0;
 }
 
-static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
+static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
                struct musb_hw_ep *hw_ep, struct musb_qh *qh,
                struct urb *urb, u32 offset,
                u32 *length, u8 *mode)
@@ -664,23 +669,18 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
        }
        channel->desired_mode = *mode;
        musb_writew(epio, MUSB_TXCSR, csr);
-
-       return 0;
 }
 
-static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
-                                         struct musb_hw_ep *hw_ep,
-                                         struct musb_qh *qh,
-                                         struct urb *urb,
-                                         u32 offset,
-                                         u32 *length,
-                                         u8 *mode)
+static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
+                                          struct musb_hw_ep *hw_ep,
+                                          struct musb_qh *qh,
+                                          struct urb *urb,
+                                          u32 offset,
+                                          u32 *length,
+                                          u8 *mode)
 {
        struct dma_channel *channel = hw_ep->tx_channel;
 
-       if (!is_cppi_enabled(hw_ep->musb) && !tusb_dma_omap(hw_ep->musb))
-               return -ENODEV;
-
        channel->actual_len = 0;
 
        /*
@@ -688,8 +688,6 @@ static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
         * to identify the zero-length-final-packet case.
         */
        *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
-
-       return 0;
 }
 
 static bool musb_tx_dma_program(struct dma_controller *dma,
@@ -699,15 +697,14 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
        struct dma_channel      *channel = hw_ep->tx_channel;
        u16                     pkt_size = qh->maxpacket;
        u8                      mode;
-       int                     res;
 
        if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
-               res = musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb,
-                                                offset, &length, &mode);
+               musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
+                                           &length, &mode);
+       else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
+               musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
+                                              &length, &mode);
        else
-               res = musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb,
-                                                    offset, &length, &mode);
-       if (res)
                return false;
 
        qh->segsize = length;
@@ -995,9 +992,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
        if (is_in) {
                dma = is_dma_capable() ? ep->rx_channel : NULL;
 
-               /* clear nak timeout bit */
+               /*
+                * Need to stop the transaction by clearing REQPKT first
+                * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
+                * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
+                */
                rx_csr = musb_readw(epio, MUSB_RXCSR);
                rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+               rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+               musb_writew(epio, MUSB_RXCSR, rx_csr);
                rx_csr &= ~MUSB_RXCSR_DATAERROR;
                musb_writew(epio, MUSB_RXCSR, rx_csr);
 
@@ -1551,7 +1554,7 @@ static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
                                  struct urb *urb,
                                  size_t len)
 {
-       struct dma_channel *channel = hw_ep->tx_channel;
+       struct dma_channel *channel = hw_ep->rx_channel;
        void __iomem *epio = hw_ep->regs;
        dma_addr_t *buf;
        u32 length, res;
@@ -1870,6 +1873,9 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                status = -EPROTO;
                musb_writeb(epio, MUSB_RXINTERVAL, 0);
 
+               rx_csr &= ~MUSB_RXCSR_H_ERROR;
+               musb_writew(epio, MUSB_RXCSR, rx_csr);
+
        } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
 
                if (USB_ENDPOINT_XFER_ISOC != qh->type) {
index c84e0322c108710607abca65245ca4f368ba2da9..0b4cec940386e12da3b5263be910c3c218663204 100644 (file)
@@ -49,97 +49,14 @@ struct omap2430_glue {
        enum musb_vbus_id_status status;
        struct work_struct      omap_musb_mailbox_work;
        struct device           *control_otghs;
+       bool                    cable_connected;
+       bool                    enabled;
+       bool                    powered;
 };
 #define glue_to_musb(g)                platform_get_drvdata(g->musb)
 
 static struct omap2430_glue    *_glue;
 
-static struct timer_list musb_idle_timer;
-
-static void musb_do_idle(unsigned long _musb)
-{
-       struct musb     *musb = (void *)_musb;
-       unsigned long   flags;
-       u8      power;
-       u8      devctl;
-
-       spin_lock_irqsave(&musb->lock, flags);
-
-       switch (musb->xceiv->otg->state) {
-       case OTG_STATE_A_WAIT_BCON:
-
-               devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-               if (devctl & MUSB_DEVCTL_BDEVICE) {
-                       musb->xceiv->otg->state = OTG_STATE_B_IDLE;
-                       MUSB_DEV_MODE(musb);
-               } else {
-                       musb->xceiv->otg->state = OTG_STATE_A_IDLE;
-                       MUSB_HST_MODE(musb);
-               }
-               break;
-       case OTG_STATE_A_SUSPEND:
-               /* finish RESUME signaling? */
-               if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
-                       power = musb_readb(musb->mregs, MUSB_POWER);
-                       power &= ~MUSB_POWER_RESUME;
-                       dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power);
-                       musb_writeb(musb->mregs, MUSB_POWER, power);
-                       musb->is_active = 1;
-                       musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
-                                               | MUSB_PORT_STAT_RESUME);
-                       musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
-                       usb_hcd_poll_rh_status(musb->hcd);
-                       /* NOTE: it might really be A_WAIT_BCON ... */
-                       musb->xceiv->otg->state = OTG_STATE_A_HOST;
-               }
-               break;
-       case OTG_STATE_A_HOST:
-               devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-               if (devctl &  MUSB_DEVCTL_BDEVICE)
-                       musb->xceiv->otg->state = OTG_STATE_B_IDLE;
-               else
-                       musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
-       default:
-               break;
-       }
-       spin_unlock_irqrestore(&musb->lock, flags);
-}
-
-
-static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
-{
-       unsigned long           default_timeout = jiffies + msecs_to_jiffies(3);
-       static unsigned long    last_timer;
-
-       if (timeout == 0)
-               timeout = default_timeout;
-
-       /* Never idle if active, or when VBUS timeout is not set as host */
-       if (musb->is_active || ((musb->a_wait_bcon == 0)
-                       && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
-               dev_dbg(musb->controller, "%s active, deleting timer\n",
-                       usb_otg_state_string(musb->xceiv->otg->state));
-               del_timer(&musb_idle_timer);
-               last_timer = jiffies;
-               return;
-       }
-
-       if (time_after(last_timer, timeout)) {
-               if (!timer_pending(&musb_idle_timer))
-                       last_timer = timeout;
-               else {
-                       dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
-                       return;
-               }
-       }
-       last_timer = timeout;
-
-       dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
-               usb_otg_state_string(musb->xceiv->otg->state),
-               (unsigned long)jiffies_to_msecs(timeout - jiffies));
-       mod_timer(&musb_idle_timer, timeout);
-}
-
 static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
 {
        struct usb_otg  *otg = musb->xceiv->otg;
@@ -205,16 +122,6 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
                musb_readb(musb->mregs, MUSB_DEVCTL));
 }
 
-static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode)
-{
-       u8      devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
-       devctl |= MUSB_DEVCTL_SESSION;
-       musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
-
-       return 0;
-}
-
 static inline void omap2430_low_level_exit(struct musb *musb)
 {
        u32 l;
@@ -234,22 +141,63 @@ static inline void omap2430_low_level_init(struct musb *musb)
        musb_writel(musb->mregs, OTG_FORCESTDBY, l);
 }
 
-static void omap2430_musb_mailbox(enum musb_vbus_id_status status)
+/*
+ * We can get multiple cable events so we need to keep track
+ * of the power state. Only keep power enabled if USB cable is
+ * connected and a gadget is started.
+ */
+static void omap2430_set_power(struct musb *musb, bool enabled, bool cable)
+{
+       struct device *dev = musb->controller;
+       struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+       bool power_up;
+       int res;
+
+       if (glue->enabled != enabled)
+               glue->enabled = enabled;
+
+       if (glue->cable_connected != cable)
+               glue->cable_connected = cable;
+
+       power_up = glue->enabled && glue->cable_connected;
+       if (power_up == glue->powered) {
+               dev_warn(musb->controller, "power state already %i\n",
+                        power_up);
+               return;
+       }
+
+       glue->powered = power_up;
+
+       if (power_up) {
+               res = pm_runtime_get_sync(musb->controller);
+               if (res < 0) {
+                       dev_err(musb->controller, "could not enable: %i", res);
+                       glue->powered = false;
+               }
+       } else {
+               pm_runtime_mark_last_busy(musb->controller);
+               pm_runtime_put_autosuspend(musb->controller);
+       }
+}
+
+static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
 {
        struct omap2430_glue    *glue = _glue;
 
        if (!glue) {
                pr_err("%s: musb core is not yet initialized\n", __func__);
-               return;
+               return -EPROBE_DEFER;
        }
        glue->status = status;
 
        if (!glue_to_musb(glue)) {
                pr_err("%s: musb core is not yet ready\n", __func__);
-               return;
+               return -EPROBE_DEFER;
        }
 
        schedule_work(&glue->omap_musb_mailbox_work);
+
+       return 0;
 }
 
 static void omap_musb_set_mailbox(struct omap2430_glue *glue)
@@ -259,6 +207,13 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
        struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
        struct omap_musb_board_data *data = pdata->board_data;
        struct usb_otg *otg = musb->xceiv->otg;
+       bool cable_connected;
+
+       cable_connected = ((glue->status == MUSB_ID_GROUND) ||
+                          (glue->status == MUSB_VBUS_VALID));
+
+       if (cable_connected)
+               omap2430_set_power(musb, glue->enabled, cable_connected);
 
        switch (glue->status) {
        case MUSB_ID_GROUND:
@@ -268,7 +223,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
                musb->xceiv->otg->state = OTG_STATE_A_IDLE;
                musb->xceiv->last_event = USB_EVENT_ID;
                if (musb->gadget_driver) {
-                       pm_runtime_get_sync(dev);
                        omap_control_usb_set_mode(glue->control_otghs,
                                USB_MODE_HOST);
                        omap2430_musb_set_vbus(musb, 1);
@@ -281,8 +235,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
                otg->default_a = false;
                musb->xceiv->otg->state = OTG_STATE_B_IDLE;
                musb->xceiv->last_event = USB_EVENT_VBUS;
-               if (musb->gadget_driver)
-                       pm_runtime_get_sync(dev);
                omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
                break;
 
@@ -291,11 +243,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
                dev_dbg(dev, "VBUS Disconnect\n");
 
                musb->xceiv->last_event = USB_EVENT_NONE;
-               if (musb->gadget_driver) {
+               if (musb->gadget_driver)
                        omap2430_musb_set_vbus(musb, 0);
-                       pm_runtime_mark_last_busy(dev);
-                       pm_runtime_put_autosuspend(dev);
-               }
 
                if (data->interface_type == MUSB_INTERFACE_UTMI)
                        otg_set_vbus(musb->xceiv->otg, 0);
@@ -307,6 +256,9 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
                dev_dbg(dev, "ID float\n");
        }
 
+       if (!cable_connected)
+               omap2430_set_power(musb, glue->enabled, cable_connected);
+
        atomic_notifier_call_chain(&musb->xceiv->notifier,
                        musb->xceiv->last_event, NULL);
 }
@@ -316,13 +268,8 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
 {
        struct omap2430_glue *glue = container_of(mailbox_work,
                                struct omap2430_glue, omap_musb_mailbox_work);
-       struct musb *musb = glue_to_musb(glue);
-       struct device *dev = musb->controller;
 
-       pm_runtime_get_sync(dev);
        omap_musb_set_mailbox(glue);
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_put_autosuspend(dev);
 }
 
 static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
@@ -389,23 +336,7 @@ static int omap2430_musb_init(struct musb *musb)
                return PTR_ERR(musb->phy);
        }
        musb->isr = omap2430_musb_interrupt;
-
-       /*
-        * Enable runtime PM for musb parent (this driver). We can't
-        * do it earlier as struct musb is not yet allocated and we
-        * need to touch the musb registers for runtime PM.
-        */
-       pm_runtime_enable(glue->dev);
-       status = pm_runtime_get_sync(glue->dev);
-       if (status < 0)
-               goto err1;
-
-       status = pm_runtime_get_sync(dev);
-       if (status < 0) {
-               dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
-               pm_runtime_put_sync(glue->dev);
-               goto err1;
-       }
+       phy_init(musb->phy);
 
        l = musb_readl(musb->mregs, OTG_INTERFSEL);
 
@@ -427,20 +358,10 @@ static int omap2430_musb_init(struct musb *musb)
                        musb_readl(musb->mregs, OTG_INTERFSEL),
                        musb_readl(musb->mregs, OTG_SIMENABLE));
 
-       setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
-
        if (glue->status != MUSB_UNKNOWN)
                omap_musb_set_mailbox(glue);
 
-       phy_init(musb->phy);
-       phy_power_on(musb->phy);
-
-       pm_runtime_put_noidle(musb->controller);
-       pm_runtime_put_noidle(glue->dev);
        return 0;
-
-err1:
-       return status;
 }
 
 static void omap2430_musb_enable(struct musb *musb)
@@ -452,6 +373,11 @@ static void omap2430_musb_enable(struct musb *musb)
        struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
        struct omap_musb_board_data *data = pdata->board_data;
 
+       if (!WARN_ON(!musb->phy))
+               phy_power_on(musb->phy);
+
+       omap2430_set_power(musb, true, glue->cable_connected);
+
        switch (glue->status) {
 
        case MUSB_ID_GROUND:
@@ -487,18 +413,25 @@ static void omap2430_musb_disable(struct musb *musb)
        struct device *dev = musb->controller;
        struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
 
+       if (!WARN_ON(!musb->phy))
+               phy_power_off(musb->phy);
+
        if (glue->status != MUSB_UNKNOWN)
                omap_control_usb_set_mode(glue->control_otghs,
                        USB_MODE_DISCONNECT);
+
+       omap2430_set_power(musb, false, glue->cable_connected);
 }
 
 static int omap2430_musb_exit(struct musb *musb)
 {
-       del_timer_sync(&musb_idle_timer);
+       struct device *dev = musb->controller;
+       struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
 
        omap2430_low_level_exit(musb);
-       phy_power_off(musb->phy);
        phy_exit(musb->phy);
+       musb->phy = NULL;
+       cancel_work_sync(&glue->omap_musb_mailbox_work);
 
        return 0;
 }
@@ -512,9 +445,6 @@ static const struct musb_platform_ops omap2430_ops = {
        .init           = omap2430_musb_init,
        .exit           = omap2430_musb_exit,
 
-       .set_mode       = omap2430_musb_set_mode,
-       .try_idle       = omap2430_musb_try_idle,
-
        .set_vbus       = omap2430_musb_set_vbus,
 
        .enable         = omap2430_musb_enable,
@@ -639,11 +569,9 @@ static int omap2430_probe(struct platform_device *pdev)
                goto err2;
        }
 
-       /*
-        * Note that we cannot enable PM runtime yet for this
-        * driver as we need struct musb initialized first.
-        * See omap2430_musb_init above.
-        */
+       pm_runtime_enable(glue->dev);
+       pm_runtime_use_autosuspend(glue->dev);
+       pm_runtime_set_autosuspend_delay(glue->dev, 500);
 
        ret = platform_device_add(musb);
        if (ret) {
@@ -662,12 +590,14 @@ static int omap2430_probe(struct platform_device *pdev)
 
 static int omap2430_remove(struct platform_device *pdev)
 {
-       struct omap2430_glue            *glue = platform_get_drvdata(pdev);
+       struct omap2430_glue *glue = platform_get_drvdata(pdev);
+       struct musb *musb = glue_to_musb(glue);
 
        pm_runtime_get_sync(glue->dev);
-       cancel_work_sync(&glue->omap_musb_mailbox_work);
        platform_device_unregister(glue->musb);
+       omap2430_set_power(musb, false, false);
        pm_runtime_put_sync(glue->dev);
+       pm_runtime_dont_use_autosuspend(glue->dev);
        pm_runtime_disable(glue->dev);
 
        return 0;
@@ -680,12 +610,13 @@ static int omap2430_runtime_suspend(struct device *dev)
        struct omap2430_glue            *glue = dev_get_drvdata(dev);
        struct musb                     *musb = glue_to_musb(glue);
 
-       if (musb) {
-               musb->context.otg_interfsel = musb_readl(musb->mregs,
-                               OTG_INTERFSEL);
+       if (!musb)
+               return 0;
 
-               omap2430_low_level_exit(musb);
-       }
+       musb->context.otg_interfsel = musb_readl(musb->mregs,
+                                                OTG_INTERFSEL);
+
+       omap2430_low_level_exit(musb);
 
        return 0;
 }
@@ -696,7 +627,7 @@ static int omap2430_runtime_resume(struct device *dev)
        struct musb                     *musb = glue_to_musb(glue);
 
        if (!musb)
-               return -EPROBE_DEFER;
+               return 0;
 
        omap2430_low_level_init(musb);
        musb_writel(musb->mregs, OTG_INTERFSEL,
@@ -738,18 +669,8 @@ static struct platform_driver omap2430_driver = {
        },
 };
 
+module_platform_driver(omap2430_driver);
+
 MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
 MODULE_LICENSE("GPL v2");
-
-static int __init omap2430_init(void)
-{
-       return platform_driver_register(&omap2430_driver);
-}
-subsys_initcall(omap2430_init);
-
-static void __exit omap2430_exit(void)
-{
-       platform_driver_unregister(&omap2430_driver);
-}
-module_exit(omap2430_exit);
index fdab4232cfbf606e268e301b4f06701765d0eaa2..76500515dd8ba0884eae778253527f52601f2219 100644 (file)
@@ -80,7 +80,8 @@ static struct musb *sunxi_musb;
 
 struct sunxi_glue {
        struct device           *dev;
-       struct platform_device  *musb;
+       struct musb             *musb;
+       struct platform_device  *musb_pdev;
        struct clk              *clk;
        struct reset_control    *rst;
        struct phy              *phy;
@@ -102,7 +103,7 @@ static void sunxi_musb_work(struct work_struct *work)
                return;
 
        if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) {
-               struct musb *musb = platform_get_drvdata(glue->musb);
+               struct musb *musb = glue->musb;
                unsigned long flags;
                u8 devctl;
 
@@ -112,7 +113,7 @@ static void sunxi_musb_work(struct work_struct *work)
                if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) {
                        set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
                        musb->xceiv->otg->default_a = 1;
-                       musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+                       musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
                        MUSB_HST_MODE(musb);
                        devctl |= MUSB_DEVCTL_SESSION;
                } else {
@@ -145,10 +146,12 @@ static void sunxi_musb_set_vbus(struct musb *musb, int is_on)
 {
        struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
 
-       if (is_on)
+       if (is_on) {
                set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
-       else
+               musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+       } else {
                clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+       }
 
        schedule_work(&glue->work);
 }
@@ -264,15 +267,6 @@ static int sunxi_musb_init(struct musb *musb)
        if (ret)
                goto error_unregister_notifier;
 
-       if (musb->port_mode == MUSB_PORT_MODE_HOST) {
-               ret = phy_power_on(glue->phy);
-               if (ret)
-                       goto error_phy_exit;
-               set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
-               /* Stop musb work from turning vbus off again */
-               set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
-       }
-
        musb->isr = sunxi_musb_interrupt;
 
        /* Stop the musb-core from doing runtime pm (not supported on sunxi) */
@@ -280,8 +274,6 @@ static int sunxi_musb_init(struct musb *musb)
 
        return 0;
 
-error_phy_exit:
-       phy_exit(glue->phy);
 error_unregister_notifier:
        if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
                extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
@@ -323,10 +315,31 @@ static int sunxi_musb_exit(struct musb *musb)
        return 0;
 }
 
+static int sunxi_set_mode(struct musb *musb, u8 mode)
+{
+       struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+       int ret;
+
+       if (mode == MUSB_HOST) {
+               ret = phy_power_on(glue->phy);
+               if (ret)
+                       return ret;
+
+               set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+               /* Stop musb work from turning vbus off again */
+               set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+               musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+       }
+
+       return 0;
+}
+
 static void sunxi_musb_enable(struct musb *musb)
 {
        struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
 
+       glue->musb = musb;
+
        /* musb_core does not call us in a balanced manner */
        if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
                return;
@@ -569,6 +582,7 @@ static const struct musb_platform_ops sunxi_musb_ops = {
        .exit           = sunxi_musb_exit,
        .enable         = sunxi_musb_enable,
        .disable        = sunxi_musb_disable,
+       .set_mode       = sunxi_set_mode,
        .fifo_offset    = sunxi_musb_fifo_offset,
        .ep_offset      = sunxi_musb_ep_offset,
        .busctl_offset  = sunxi_musb_busctl_offset,
@@ -721,9 +735,9 @@ static int sunxi_musb_probe(struct platform_device *pdev)
        pinfo.data      = &pdata;
        pinfo.size_data = sizeof(pdata);
 
-       glue->musb = platform_device_register_full(&pinfo);
-       if (IS_ERR(glue->musb)) {
-               ret = PTR_ERR(glue->musb);
+       glue->musb_pdev = platform_device_register_full(&pinfo);
+       if (IS_ERR(glue->musb_pdev)) {
+               ret = PTR_ERR(glue->musb_pdev);
                dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret);
                goto err_unregister_usb_phy;
        }
@@ -740,7 +754,7 @@ static int sunxi_musb_remove(struct platform_device *pdev)
        struct sunxi_glue *glue = platform_get_drvdata(pdev);
        struct platform_device *usb_phy = glue->usb_phy;
 
-       platform_device_unregister(glue->musb); /* Frees glue ! */
+       platform_device_unregister(glue->musb_pdev);
        usb_phy_generic_unregister(usb_phy);
 
        return 0;
index 24e2b3cf186759f81240232323d06d8dd03b4975..a72e8d670adc846b225d5cfd6fc27b43f5bc9bea 100644 (file)
@@ -97,6 +97,9 @@ struct twl6030_usb {
 
        struct regulator                *usb3v3;
 
+       /* used to check initial cable status after probe */
+       struct delayed_work     get_status_work;
+
        /* used to set vbus, in atomic path */
        struct work_struct      set_vbus_work;
 
@@ -227,12 +230,16 @@ static irqreturn_t twl6030_usb_irq(int irq, void *_twl)
                        twl->asleep = 1;
                        status = MUSB_VBUS_VALID;
                        twl->linkstat = status;
-                       musb_mailbox(status);
+                       ret = musb_mailbox(status);
+                       if (ret)
+                               twl->linkstat = MUSB_UNKNOWN;
                } else {
                        if (twl->linkstat != MUSB_UNKNOWN) {
                                status = MUSB_VBUS_OFF;
                                twl->linkstat = status;
-                               musb_mailbox(status);
+                               ret = musb_mailbox(status);
+                               if (ret)
+                                       twl->linkstat = MUSB_UNKNOWN;
                                if (twl->asleep) {
                                        regulator_disable(twl->usb3v3);
                                        twl->asleep = 0;
@@ -264,7 +271,9 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
                twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
                status = MUSB_ID_GROUND;
                twl->linkstat = status;
-               musb_mailbox(status);
+               ret = musb_mailbox(status);
+               if (ret)
+                       twl->linkstat = MUSB_UNKNOWN;
        } else  {
                twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
                twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
@@ -274,6 +283,15 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
        return IRQ_HANDLED;
 }
 
+static void twl6030_status_work(struct work_struct *work)
+{
+       struct twl6030_usb *twl = container_of(work, struct twl6030_usb,
+                                              get_status_work.work);
+
+       twl6030_usb_irq(twl->irq2, twl);
+       twl6030_usbotg_irq(twl->irq1, twl);
+}
+
 static int twl6030_enable_irq(struct twl6030_usb *twl)
 {
        twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
@@ -284,8 +302,6 @@ static int twl6030_enable_irq(struct twl6030_usb *twl)
                                REG_INT_MSK_LINE_C);
        twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
                                REG_INT_MSK_STS_C);
-       twl6030_usb_irq(twl->irq2, twl);
-       twl6030_usbotg_irq(twl->irq1, twl);
 
        return 0;
 }
@@ -371,6 +387,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
                dev_warn(&pdev->dev, "could not create sysfs file\n");
 
        INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);
+       INIT_DELAYED_WORK(&twl->get_status_work, twl6030_status_work);
 
        status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
                        IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -395,6 +412,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
 
        twl->asleep = 0;
        twl6030_enable_irq(twl);
+       schedule_delayed_work(&twl->get_status_work, HZ);
        dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
 
        return 0;
@@ -404,6 +422,7 @@ static int twl6030_usb_remove(struct platform_device *pdev)
 {
        struct twl6030_usb *twl = platform_get_drvdata(pdev);
 
+       cancel_delayed_work(&twl->get_status_work);
        twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
                REG_INT_MSK_LINE_C);
        twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
index 2eddbe538cda14d073a15bdaa5f5ad4a13eeb560..5608af4a369d47f1f63402a22173e2971a51fdaf 100644 (file)
@@ -2007,6 +2007,7 @@ static void mos7720_release(struct usb_serial *serial)
                                    urblist_entry)
                        usb_unlink_urb(urbtrack->urb);
                spin_unlock_irqrestore(&mos_parport->listlock, flags);
+               parport_del_port(mos_parport->pp);
 
                kref_put(&mos_parport->ref_count, destroy_mos_parport);
        }
index 4d49fce406e19aef87ec12004dcf3d2e31b5551c..5ef014ba6ae86917f34e6ebbd855845e3bf71732 100644 (file)
@@ -836,6 +836,7 @@ static int uas_slave_configure(struct scsi_device *sdev)
        if (devinfo->flags & US_FL_BROKEN_FUA)
                sdev->broken_fua = 1;
 
+       scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
        return 0;
 }
 
@@ -848,7 +849,6 @@ static struct scsi_host_template uas_host_template = {
        .slave_configure = uas_slave_configure,
        .eh_abort_handler = uas_eh_abort_handler,
        .eh_bus_reset_handler = uas_eh_bus_reset_handler,
-       .can_queue = MAX_CMNDS,
        .this_id = -1,
        .sg_tablesize = SG_NONE,
        .skip_settle_delay = 1,
index fca51105974eabdd09094b7cd1e80bb945275129..2e0450bec1b13014cd257066a138a66e1ddd306d 100644 (file)
@@ -941,7 +941,7 @@ static void vhci_stop(struct usb_hcd *hcd)
 
 static int vhci_get_frame_number(struct usb_hcd *hcd)
 {
-       pr_err("Not yet implemented\n");
+       dev_err_ratelimited(&hcd->self.root_hub->dev, "Not yet implemented\n");
        return 0;
 }
 
index b54f26c55dfd1df46fcd5114660406edfd7e2820..b4b3e256491bfbe8c05fa0f7830c003541db8720 100644 (file)
@@ -746,7 +746,7 @@ config ALIM7101_WDT
 
 config EBC_C384_WDT
        tristate "WinSystems EBC-C384 Watchdog Timer"
-       depends on X86 && ISA
+       depends on X86 && ISA_BUS_API
        select WATCHDOG_CORE
        help
          Enables watchdog timer support for the watchdog timer on the
index d46839f51e730ff50e2c756a9f4b1326af82e2ec..e4db19e88ab15992b44c1c1f82d98938bca24d4b 100644 (file)
@@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
 static void balloon_process(struct work_struct *work);
 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
 
-static void release_memory_resource(struct resource *resource);
-
 /* When ballooning out (allocating memory to return to Xen) we don't really
    want the kernel to try too hard since that can trigger the oom killer. */
 #define GFP_BALLOON \
@@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state)
 }
 
 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+static void release_memory_resource(struct resource *resource)
+{
+       if (!resource)
+               return;
+
+       /*
+        * No need to reset region to identity mapped since we now
+        * know that no I/O can be in this region
+        */
+       release_resource(resource);
+       kfree(resource);
+}
+
 static struct resource *additional_memory_resource(phys_addr_t size)
 {
        struct resource *res;
@@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
        return res;
 }
 
-static void release_memory_resource(struct resource *resource)
-{
-       if (!resource)
-               return;
-
-       /*
-        * No need to reset region to identity mapped since we now
-        * know that no I/O can be in this region
-        */
-       release_resource(resource);
-       kfree(resource);
-}
-
 static enum bp_state reserve_additional_memory(void)
 {
        long credit;
index 8e67336f8ddd238d7763a118bb460327ba3278f7..6a25533da237db612f2bb4a99096a96fd49c0d8c 100644 (file)
@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
                field_start = OFFSET(cfg_entry);
                field_end = OFFSET(cfg_entry) + field->size;
 
-               if ((req_start >= field_start && req_start < field_end)
-                   || (req_end > field_start && req_end <= field_end)) {
+                if (req_end > field_start && field_end > req_start) {
                        err = conf_space_read(dev, cfg_entry, field_start,
                                              &tmp_val);
                        if (err)
@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
                field_start = OFFSET(cfg_entry);
                field_end = OFFSET(cfg_entry) + field->size;
 
-               if ((req_start >= field_start && req_start < field_end)
-                   || (req_end > field_start && req_end <= field_end)) {
+                if (req_end > field_start && field_end > req_start) {
                        tmp_val = 0;
 
                        err = xen_pcibk_config_read(dev, field_start,
index ad3d17d29c81171838d01a2f1a081c769407f02d..9ead1c2ff1ddd2e1340e57c70db6b53a7b34665e 100644 (file)
@@ -145,7 +145,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
        /* A write to obtain the length must happen as a 32-bit write.
         * This does not (yet) support writing individual bytes
         */
-       if (value == ~PCI_ROM_ADDRESS_ENABLE)
+       if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U)
                bar->which = 1;
        else {
                u32 tmpval;
@@ -225,38 +225,42 @@ static inline void read_dev_bar(struct pci_dev *dev,
                           (PCI_BASE_ADDRESS_SPACE_MEMORY |
                                PCI_BASE_ADDRESS_MEM_TYPE_64))) {
                        bar_info->val = res[pos - 1].start >> 32;
-                       bar_info->len_val = res[pos - 1].end >> 32;
+                       bar_info->len_val = -resource_size(&res[pos - 1]) >> 32;
                        return;
                }
        }
 
+       if (!res[pos].flags ||
+           (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET |
+                              IORESOURCE_BUSY)))
+               return;
+
        bar_info->val = res[pos].start |
                        (res[pos].flags & PCI_REGION_FLAG_MASK);
-       bar_info->len_val = resource_size(&res[pos]);
+       bar_info->len_val = -resource_size(&res[pos]) |
+                           (res[pos].flags & PCI_REGION_FLAG_MASK);
 }
 
 static void *bar_init(struct pci_dev *dev, int offset)
 {
-       struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+       struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
 
        if (!bar)
                return ERR_PTR(-ENOMEM);
 
        read_dev_bar(dev, bar, offset, ~0);
-       bar->which = 0;
 
        return bar;
 }
 
 static void *rom_init(struct pci_dev *dev, int offset)
 {
-       struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+       struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
 
        if (!bar)
                return ERR_PTR(-ENOMEM);
 
        read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
-       bar->which = 0;
 
        return bar;
 }
index f0d268b97d19657929ac2f50cd3bc0e1a80436c7..a439548de785dde9497ed13b2063b696ed773d7f 100644 (file)
@@ -70,9 +70,13 @@ struct autofs_info {
 };
 
 #define AUTOFS_INF_EXPIRING    (1<<0) /* dentry in the process of expiring */
-#define AUTOFS_INF_NO_RCU      (1<<1) /* the dentry is being considered
+#define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered
                                        * for expiry, so RCU_walk is
-                                       * not permitted
+                                       * not permitted.  If it progresses to
+                                       * actual expiry attempt, the flag is
+                                       * not cleared when EXPIRING is set -
+                                       * in that case it gets cleared only
+                                       * when it comes to clearing EXPIRING.
                                        */
 #define AUTOFS_INF_PENDING     (1<<2) /* dentry pending mount */
 
index 9510d8d2e9cd2fa4ac7919c7bd8bee48c30d6165..b493909e74920be68574b849b426805fd37541d3 100644 (file)
@@ -316,19 +316,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
        if (ino->flags & AUTOFS_INF_PENDING)
                goto out;
        if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
-               ino->flags |= AUTOFS_INF_NO_RCU;
+               ino->flags |= AUTOFS_INF_WANT_EXPIRE;
                spin_unlock(&sbi->fs_lock);
                synchronize_rcu();
                spin_lock(&sbi->fs_lock);
                if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
                        ino->flags |= AUTOFS_INF_EXPIRING;
-                       smp_mb();
-                       ino->flags &= ~AUTOFS_INF_NO_RCU;
                        init_completion(&ino->expire_complete);
                        spin_unlock(&sbi->fs_lock);
                        return root;
                }
-               ino->flags &= ~AUTOFS_INF_NO_RCU;
+               ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
        }
 out:
        spin_unlock(&sbi->fs_lock);
@@ -446,7 +444,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
        while ((dentry = get_next_positive_subdir(dentry, root))) {
                spin_lock(&sbi->fs_lock);
                ino = autofs4_dentry_ino(dentry);
-               if (ino->flags & AUTOFS_INF_NO_RCU)
+               if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
                        expired = NULL;
                else
                        expired = should_expire(dentry, mnt, timeout, how);
@@ -455,7 +453,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
                        continue;
                }
                ino = autofs4_dentry_ino(expired);
-               ino->flags |= AUTOFS_INF_NO_RCU;
+               ino->flags |= AUTOFS_INF_WANT_EXPIRE;
                spin_unlock(&sbi->fs_lock);
                synchronize_rcu();
                spin_lock(&sbi->fs_lock);
@@ -465,7 +463,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
                        goto found;
                }
 
-               ino->flags &= ~AUTOFS_INF_NO_RCU;
+               ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
                if (expired != dentry)
                        dput(expired);
                spin_unlock(&sbi->fs_lock);
@@ -475,17 +473,8 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
 found:
        pr_debug("returning %p %pd\n", expired, expired);
        ino->flags |= AUTOFS_INF_EXPIRING;
-       smp_mb();
-       ino->flags &= ~AUTOFS_INF_NO_RCU;
        init_completion(&ino->expire_complete);
        spin_unlock(&sbi->fs_lock);
-       spin_lock(&sbi->lookup_lock);
-       spin_lock(&expired->d_parent->d_lock);
-       spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
-       list_move(&expired->d_parent->d_subdirs, &expired->d_child);
-       spin_unlock(&expired->d_lock);
-       spin_unlock(&expired->d_parent->d_lock);
-       spin_unlock(&sbi->lookup_lock);
        return expired;
 }
 
@@ -496,7 +485,7 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
        int status;
 
        /* Block on any pending expire */
-       if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)))
+       if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
                return 0;
        if (rcu_walk)
                return -ECHILD;
@@ -554,7 +543,7 @@ int autofs4_expire_run(struct super_block *sb,
        ino = autofs4_dentry_ino(dentry);
        /* avoid rapid-fire expire attempts if expiry fails */
        ino->last_used = now;
-       ino->flags &= ~AUTOFS_INF_EXPIRING;
+       ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
        complete_all(&ino->expire_complete);
        spin_unlock(&sbi->fs_lock);
 
@@ -583,7 +572,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
                spin_lock(&sbi->fs_lock);
                /* avoid rapid-fire expire attempts if expiry fails */
                ino->last_used = now;
-               ino->flags &= ~AUTOFS_INF_EXPIRING;
+               ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
                complete_all(&ino->expire_complete);
                spin_unlock(&sbi->fs_lock);
                dput(dentry);
index 78bd8029852856a486533699d1d25c1a692104a1..3767f6641af1cf885d5254c7a59f249eb14db17e 100644 (file)
@@ -458,7 +458,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
                 */
                struct inode *inode;
 
-               if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))
+               if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
                        return 0;
                if (d_mountpoint(dentry))
                        return 0;
index 0146d911f468ca46d5fe7c946851c500b624ccb9..631f1554c87b7d73b9afc7834392ce6e279c7d6d 100644 (file)
@@ -66,11 +66,12 @@ static int autofs4_write(struct autofs_sb_info *sbi,
        set_fs(KERNEL_DS);
 
        mutex_lock(&sbi->pipe_mutex);
-       wr = __vfs_write(file, data, bytes, &file->f_pos);
-       while (bytes && wr) {
+       while (bytes) {
+               wr = __vfs_write(file, data, bytes, &file->f_pos);
+               if (wr <= 0)
+                       break;
                data += wr;
                bytes -= wr;
-               wr = __vfs_write(file, data, bytes, &file->f_pos);
        }
        mutex_unlock(&sbi->pipe_mutex);
 
index e158b22ef32f16d2fcf61c29fbf6d15a65581c1e..a7a28110dc80aa9df1e700d9c24aa297161ab996 100644 (file)
@@ -2275,7 +2275,7 @@ static int elf_core_dump(struct coredump_params *cprm)
                goto end_coredump;
 
        /* Align to page */
-       if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
+       if (!dump_skip(cprm, dataoff - cprm->pos))
                goto end_coredump;
 
        for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
index 71ade0e556b7e7cab5b83705228a54099b067749..203589311bf88733bdb53d329a3b05a490ce9164 100644 (file)
@@ -1787,7 +1787,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
                                goto end_coredump;
        }
 
-       if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
+       if (!dump_skip(cprm, dataoff - cprm->pos))
                goto end_coredump;
 
        if (!elf_fdpic_dump_segments(cprm))
index b677a6ea6001a872fc27dd6f90db72459b69f712..7706c8dc5fa637632dce3c9e31f50aa2c9d5f400 100644 (file)
@@ -2645,7 +2645,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
         * This algorithm is recursive because the amount of used stack space
         * is very small and the max recursion depth is limited.
         */
-       indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)",
+       indent_add = sprintf(buf, "%c-%llu(%s/%llu/%u)",
                             btrfsic_get_block_type(state, block),
                             block->logical_bytenr, block->dev_state->name,
                             block->dev_bytenr, block->mirror_num);
index 427c36b430a6e52dfadbcd9d9b620b5368d0b2ff..a85cf7d2330981d606352cc8218167570db5507a 100644 (file)
@@ -1373,7 +1373,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
 
        if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
                BUG_ON(tm->slot != 0);
-               eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
+               eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
+                                               eb->len);
                if (!eb_rewin) {
                        btrfs_tree_read_unlock_blocking(eb);
                        free_extent_buffer(eb);
@@ -1454,7 +1455,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
        } else if (old_root) {
                btrfs_tree_read_unlock(eb_root);
                free_extent_buffer(eb_root);
-               eb = alloc_dummy_extent_buffer(root->fs_info, logical);
+               eb = alloc_dummy_extent_buffer(root->fs_info, logical,
+                                       root->nodesize);
        } else {
                btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
                eb = btrfs_clone_extent_buffer(eb_root);
@@ -1552,6 +1554,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
                       trans->transid, root->fs_info->generation);
 
        if (!should_cow_block(trans, root, buf)) {
+               trans->dirty = true;
                *cow_ret = buf;
                return 0;
        }
@@ -1783,10 +1786,12 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
                        if (!err) {
                                tmp = (struct btrfs_disk_key *)(kaddr + offset -
                                                        map_start);
-                       } else {
+                       } else if (err == 1) {
                                read_extent_buffer(eb, &unaligned,
                                                   offset, sizeof(unaligned));
                                tmp = &unaligned;
+                       } else {
+                               return err;
                        }
 
                } else {
@@ -2510,6 +2515,8 @@ read_block_for_search(struct btrfs_trans_handle *trans,
                if (!btrfs_buffer_uptodate(tmp, 0, 0))
                        ret = -EIO;
                free_extent_buffer(tmp);
+       } else {
+               ret = PTR_ERR(tmp);
        }
        return ret;
 }
@@ -2773,8 +2780,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
                         * then we don't want to set the path blocking,
                         * so we test it here
                         */
-                       if (!should_cow_block(trans, root, b))
+                       if (!should_cow_block(trans, root, b)) {
+                               trans->dirty = true;
                                goto cow_done;
+                       }
 
                        /*
                         * must have write locks on this node and the
@@ -2823,6 +2832,8 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
                }
 
                ret = key_search(b, key, level, &prev_cmp, &slot);
+               if (ret < 0)
+                       goto done;
 
                if (level != 0) {
                        int dec = 0;
index 101c3cfd3f7cddd43e07462fd74ebfb06cb4039e..4274a7bfdaed8db3ef94d07d38e755db9125b4d0 100644 (file)
@@ -2518,7 +2518,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root, unsigned long count);
 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
-                                unsigned long count, int wait);
+                                unsigned long count, u64 transid, int wait);
 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root, u64 bytenr,
index 61561c2a3f96da0917a0819fb9db48972385496d..d3aaabbfada0adcbb09936a5d47063a4adec65dd 100644 (file)
@@ -1606,15 +1606,23 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode)
        return 0;
 }
 
-void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
-                            struct list_head *del_list)
+bool btrfs_readdir_get_delayed_items(struct inode *inode,
+                                    struct list_head *ins_list,
+                                    struct list_head *del_list)
 {
        struct btrfs_delayed_node *delayed_node;
        struct btrfs_delayed_item *item;
 
        delayed_node = btrfs_get_delayed_node(inode);
        if (!delayed_node)
-               return;
+               return false;
+
+       /*
+        * We can only do one readdir with delayed items at a time because of
+        * item->readdir_list.
+        */
+       inode_unlock_shared(inode);
+       inode_lock(inode);
 
        mutex_lock(&delayed_node->mutex);
        item = __btrfs_first_delayed_insertion_item(delayed_node);
@@ -1641,10 +1649,13 @@ void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
         * requeue or dequeue this delayed node.
         */
        atomic_dec(&delayed_node->refs);
+
+       return true;
 }
 
-void btrfs_put_delayed_items(struct list_head *ins_list,
-                            struct list_head *del_list)
+void btrfs_readdir_put_delayed_items(struct inode *inode,
+                                    struct list_head *ins_list,
+                                    struct list_head *del_list)
 {
        struct btrfs_delayed_item *curr, *next;
 
@@ -1659,6 +1670,12 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
                if (atomic_dec_and_test(&curr->refs))
                        kfree(curr);
        }
+
+       /*
+        * The VFS is going to do up_read(), so we need to downgrade back to a
+        * read lock.
+        */
+       downgrade_write(&inode->i_rwsem);
 }
 
 int btrfs_should_delete_dir_index(struct list_head *del_list,
index 0167853c84aea2d93a8fdd0314342e0a1ef42d1b..2495b3d4075f81538fde3ad0ca020648c164db91 100644 (file)
@@ -137,10 +137,12 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
 void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
 
 /* Used for readdir() */
-void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
-                            struct list_head *del_list);
-void btrfs_put_delayed_items(struct list_head *ins_list,
-                            struct list_head *del_list);
+bool btrfs_readdir_get_delayed_items(struct inode *inode,
+                                    struct list_head *ins_list,
+                                    struct list_head *del_list);
+void btrfs_readdir_put_delayed_items(struct inode *inode,
+                                    struct list_head *ins_list,
+                                    struct list_head *del_list);
 int btrfs_should_delete_dir_index(struct list_head *del_list,
                                  u64 index);
 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
index 6628fca9f4ed87c047e54007f1f94f99ebbaba65..60ce1190307bb976a8fc9229e05ad21b8bedba55 100644 (file)
@@ -1098,7 +1098,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
        struct inode *btree_inode = root->fs_info->btree_inode;
 
        buf = btrfs_find_create_tree_block(root, bytenr);
-       if (!buf)
+       if (IS_ERR(buf))
                return;
        read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
                                 buf, 0, WAIT_NONE, btree_get_extent, 0);
@@ -1114,7 +1114,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
        int ret;
 
        buf = btrfs_find_create_tree_block(root, bytenr);
-       if (!buf)
+       if (IS_ERR(buf))
                return 0;
 
        set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
@@ -1147,7 +1147,8 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
                                                 u64 bytenr)
 {
        if (btrfs_test_is_dummy_root(root))
-               return alloc_test_extent_buffer(root->fs_info, bytenr);
+               return alloc_test_extent_buffer(root->fs_info, bytenr,
+                               root->nodesize);
        return alloc_extent_buffer(root->fs_info, bytenr);
 }
 
@@ -1171,8 +1172,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
        int ret;
 
        buf = btrfs_find_create_tree_block(root, bytenr);
-       if (!buf)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(buf))
+               return buf;
 
        ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
        if (ret) {
@@ -1314,14 +1315,16 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 /* Should only be used by the testing infrastructure */
-struct btrfs_root *btrfs_alloc_dummy_root(void)
+struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize)
 {
        struct btrfs_root *root;
 
        root = btrfs_alloc_root(NULL, GFP_KERNEL);
        if (!root)
                return ERR_PTR(-ENOMEM);
-       __setup_root(4096, 4096, 4096, root, NULL, 1);
+       /* We don't use the stripesize in selftest, set it as sectorsize */
+       __setup_root(nodesize, sectorsize, sectorsize, root, NULL,
+                       BTRFS_ROOT_TREE_OBJECTID);
        set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
        root->alloc_bytenr = 0;
 
@@ -1803,6 +1806,13 @@ static int cleaner_kthread(void *arg)
                if (btrfs_need_cleaner_sleep(root))
                        goto sleep;
 
+               /*
+                * Do not do anything if we might cause open_ctree() to block
+                * before we have finished mounting the filesystem.
+                */
+               if (!root->fs_info->open)
+                       goto sleep;
+
                if (!mutex_trylock(&root->fs_info->cleaner_mutex))
                        goto sleep;
 
@@ -2517,7 +2527,6 @@ int open_ctree(struct super_block *sb,
        int num_backups_tried = 0;
        int backup_index = 0;
        int max_active;
-       bool cleaner_mutex_locked = false;
 
        tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
        chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
@@ -2797,7 +2806,7 @@ int open_ctree(struct super_block *sb,
 
        nodesize = btrfs_super_nodesize(disk_super);
        sectorsize = btrfs_super_sectorsize(disk_super);
-       stripesize = btrfs_super_stripesize(disk_super);
+       stripesize = sectorsize;
        fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
        fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
 
@@ -2996,13 +3005,6 @@ int open_ctree(struct super_block *sb,
                goto fail_sysfs;
        }
 
-       /*
-        * Hold the cleaner_mutex thread here so that we don't block
-        * for a long time on btrfs_recover_relocation.  cleaner_kthread
-        * will wait for us to finish mounting the filesystem.
-        */
-       mutex_lock(&fs_info->cleaner_mutex);
-       cleaner_mutex_locked = true;
        fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
                                               "btrfs-cleaner");
        if (IS_ERR(fs_info->cleaner_kthread))
@@ -3062,8 +3064,10 @@ int open_ctree(struct super_block *sb,
                ret = btrfs_cleanup_fs_roots(fs_info);
                if (ret)
                        goto fail_qgroup;
-               /* We locked cleaner_mutex before creating cleaner_kthread. */
+
+               mutex_lock(&fs_info->cleaner_mutex);
                ret = btrfs_recover_relocation(tree_root);
+               mutex_unlock(&fs_info->cleaner_mutex);
                if (ret < 0) {
                        btrfs_warn(fs_info, "failed to recover relocation: %d",
                                        ret);
@@ -3071,8 +3075,6 @@ int open_ctree(struct super_block *sb,
                        goto fail_qgroup;
                }
        }
-       mutex_unlock(&fs_info->cleaner_mutex);
-       cleaner_mutex_locked = false;
 
        location.objectid = BTRFS_FS_TREE_OBJECTID;
        location.type = BTRFS_ROOT_ITEM_KEY;
@@ -3186,10 +3188,6 @@ int open_ctree(struct super_block *sb,
        filemap_write_and_wait(fs_info->btree_inode->i_mapping);
 
 fail_sysfs:
-       if (cleaner_mutex_locked) {
-               mutex_unlock(&fs_info->cleaner_mutex);
-               cleaner_mutex_locked = false;
-       }
        btrfs_sysfs_remove_mounted(fs_info);
 
 fail_fsdev_sysfs:
@@ -4130,6 +4128,16 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
         * Hint to catch really bogus numbers, bitflips or so, more exact checks are
         * done later
         */
+       if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
+               btrfs_err(fs_info, "bytes_used is too small %llu",
+                      btrfs_super_bytes_used(sb));
+               ret = -EINVAL;
+       }
+       if (!is_power_of_2(btrfs_super_stripesize(sb))) {
+               btrfs_err(fs_info, "invalid stripesize %u",
+                      btrfs_super_stripesize(sb));
+               ret = -EINVAL;
+       }
        if (btrfs_super_num_devices(sb) > (1UL << 31))
                printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
                                btrfs_super_num_devices(sb));
index 8e79d0070bcf57d26f6792e7775fde0117834810..acba821499a909e3a8d42e62e34c8f8e3f27bfa2 100644 (file)
@@ -90,7 +90,7 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
 void btrfs_free_fs_root(struct btrfs_root *root);
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-struct btrfs_root *btrfs_alloc_dummy_root(void);
+struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize);
 #endif
 
 /*
index 689d25ac6a6818f84aad0fcdbe7810d3dda417df..82b912a293ab3f2f06cd54aacf14fcda0827e85d 100644 (file)
@@ -2835,6 +2835,7 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
 
 struct async_delayed_refs {
        struct btrfs_root *root;
+       u64 transid;
        int count;
        int error;
        int sync;
@@ -2850,6 +2851,10 @@ static void delayed_ref_async_start(struct btrfs_work *work)
 
        async = container_of(work, struct async_delayed_refs, work);
 
+       /* if the commit is already started, we don't need to wait here */
+       if (btrfs_transaction_blocked(async->root->fs_info))
+               goto done;
+
        trans = btrfs_join_transaction(async->root);
        if (IS_ERR(trans)) {
                async->error = PTR_ERR(trans);
@@ -2861,10 +2866,15 @@ static void delayed_ref_async_start(struct btrfs_work *work)
         * wait on delayed refs
         */
        trans->sync = true;
+
+       /* Don't bother flushing if we got into a different transaction */
+       if (trans->transid > async->transid)
+               goto end;
+
        ret = btrfs_run_delayed_refs(trans, async->root, async->count);
        if (ret)
                async->error = ret;
-
+end:
        ret = btrfs_end_transaction(trans, async->root);
        if (ret && !async->error)
                async->error = ret;
@@ -2876,7 +2886,7 @@ static void delayed_ref_async_start(struct btrfs_work *work)
 }
 
 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
-                                unsigned long count, int wait)
+                                unsigned long count, u64 transid, int wait)
 {
        struct async_delayed_refs *async;
        int ret;
@@ -2888,6 +2898,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
        async->root = root->fs_info->tree_root;
        async->count = count;
        async->error = 0;
+       async->transid = transid;
        if (wait)
                async->sync = 1;
        else
@@ -8016,8 +8027,9 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
        struct extent_buffer *buf;
 
        buf = btrfs_find_create_tree_block(root, bytenr);
-       if (!buf)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(buf))
+               return buf;
+
        btrfs_set_header_generation(buf, trans->transid);
        btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
        btrfs_tree_lock(buf);
@@ -8044,7 +8056,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
                         buf->start + buf->len - 1, GFP_NOFS);
        }
-       trans->blocks_used++;
+       trans->dirty = true;
        /* this returns a buffer locked for blocking */
        return buf;
 }
@@ -8659,8 +8671,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
        next = btrfs_find_tree_block(root->fs_info, bytenr);
        if (!next) {
                next = btrfs_find_create_tree_block(root, bytenr);
-               if (!next)
-                       return -ENOMEM;
+               if (IS_ERR(next))
+                       return PTR_ERR(next);
+
                btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
                                               level - 1);
                reada = 1;
index 6e953de83f08e35d363eaf08466e3ef89449ff64..75533adef9988ae18b5a3ccab9a1b6128d7d02d8 100644 (file)
@@ -4728,16 +4728,16 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
 }
 
 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
-                                               u64 start)
+                                               u64 start, u32 nodesize)
 {
        unsigned long len;
 
        if (!fs_info) {
                /*
                 * Called only from tests that don't always have a fs_info
-                * available, but we know that nodesize is 4096
+                * available
                 */
-               len = 4096;
+               len = nodesize;
        } else {
                len = fs_info->tree_root->nodesize;
        }
@@ -4833,7 +4833,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
-                                              u64 start)
+                                       u64 start, u32 nodesize)
 {
        struct extent_buffer *eb, *exists = NULL;
        int ret;
@@ -4841,7 +4841,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
        eb = find_extent_buffer(fs_info, start);
        if (eb)
                return eb;
-       eb = alloc_dummy_extent_buffer(fs_info, start);
+       eb = alloc_dummy_extent_buffer(fs_info, start, nodesize);
        if (!eb)
                return NULL;
        eb->fs_info = fs_info;
@@ -4892,18 +4892,25 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
        int uptodate = 1;
        int ret;
 
+       if (!IS_ALIGNED(start, fs_info->tree_root->sectorsize)) {
+               btrfs_err(fs_info, "bad tree block start %llu", start);
+               return ERR_PTR(-EINVAL);
+       }
+
        eb = find_extent_buffer(fs_info, start);
        if (eb)
                return eb;
 
        eb = __alloc_extent_buffer(fs_info, start, len);
        if (!eb)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        for (i = 0; i < num_pages; i++, index++) {
                p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
-               if (!p)
+               if (!p) {
+                       exists = ERR_PTR(-ENOMEM);
                        goto free_eb;
+               }
 
                spin_lock(&mapping->private_lock);
                if (PagePrivate(p)) {
@@ -4948,8 +4955,10 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
                set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 again:
        ret = radix_tree_preload(GFP_NOFS);
-       if (ret)
+       if (ret) {
+               exists = ERR_PTR(ret);
                goto free_eb;
+       }
 
        spin_lock(&fs_info->buffer_lock);
        ret = radix_tree_insert(&fs_info->buffer_radix,
@@ -5333,6 +5342,11 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
        return ret;
 }
 
+/*
+ * return 0 if the item is found within a page.
+ * return 1 if the item spans two pages.
+ * return -EINVAL otherwise.
+ */
 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
                               unsigned long min_len, char **map,
                               unsigned long *map_start,
@@ -5347,7 +5361,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
                PAGE_SHIFT;
 
        if (i != end_i)
-               return -EINVAL;
+               return 1;
 
        if (i == 0) {
                offset = start_offset;
index 1baf19c9b79d2f48d407549b7dcdf03ce0faed3c..c0c1c4fef6cea0a6ab542a38596093b186b5d7d1 100644 (file)
@@ -348,7 +348,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
                                                  u64 start, unsigned long len);
 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
-                                               u64 start);
+                                               u64 start, u32 nodesize);
 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
                                         u64 start);
@@ -468,5 +468,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode,
                                      u64 *end, u64 max_bytes);
 #endif
 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
-                                              u64 start);
+                                              u64 start, u32 nodesize);
 #endif
index e0c9bd3fb02dffdb46ac221953c941604043bcf5..2234e88cf674b6298aae8a1077737444a12c4d9d 100644 (file)
@@ -1534,30 +1534,30 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                reserve_bytes = round_up(write_bytes + sector_offset,
                                root->sectorsize);
 
-               if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
-                                             BTRFS_INODE_PREALLOC)) &&
-                   check_can_nocow(inode, pos, &write_bytes) > 0) {
-                       /*
-                        * For nodata cow case, no need to reserve
-                        * data space.
-                        */
-                       only_release_metadata = true;
-                       /*
-                        * our prealloc extent may be smaller than
-                        * write_bytes, so scale down.
-                        */
-                       num_pages = DIV_ROUND_UP(write_bytes + offset,
-                                                PAGE_SIZE);
-                       reserve_bytes = round_up(write_bytes + sector_offset,
-                                       root->sectorsize);
-                       goto reserve_metadata;
-               }
-
                ret = btrfs_check_data_free_space(inode, pos, write_bytes);
-               if (ret < 0)
-                       break;
+               if (ret < 0) {
+                       if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+                                                     BTRFS_INODE_PREALLOC)) &&
+                           check_can_nocow(inode, pos, &write_bytes) > 0) {
+                               /*
+                                * For nodata cow case, no need to reserve
+                                * data space.
+                                */
+                               only_release_metadata = true;
+                               /*
+                                * our prealloc extent may be smaller than
+                                * write_bytes, so scale down.
+                                */
+                               num_pages = DIV_ROUND_UP(write_bytes + offset,
+                                                        PAGE_SIZE);
+                               reserve_bytes = round_up(write_bytes +
+                                                        sector_offset,
+                                                        root->sectorsize);
+                       } else {
+                               break;
+                       }
+               }
 
-reserve_metadata:
                ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
                if (ret) {
                        if (!only_release_metadata)
index c6dc1183f54249c3ee5681fcec818fc9194bc52b..69d270f6602c2de22a734af3f848ad03bf767906 100644 (file)
@@ -29,7 +29,7 @@
 #include "inode-map.h"
 #include "volumes.h"
 
-#define BITS_PER_BITMAP                (PAGE_SIZE * 8)
+#define BITS_PER_BITMAP                (PAGE_SIZE * 8UL)
 #define MAX_CACHE_BYTES_PER_GIG        SZ_32K
 
 struct btrfs_trim_range {
@@ -1415,11 +1415,11 @@ static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
                                   u64 offset)
 {
        u64 bitmap_start;
-       u32 bytes_per_bitmap;
+       u64 bytes_per_bitmap;
 
        bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
        bitmap_start = offset - ctl->start;
-       bitmap_start = div_u64(bitmap_start, bytes_per_bitmap);
+       bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
        bitmap_start *= bytes_per_bitmap;
        bitmap_start += ctl->start;
 
@@ -1638,10 +1638,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
        u64 bitmap_bytes;
        u64 extent_bytes;
        u64 size = block_group->key.offset;
-       u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
-       u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg);
+       u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
+       u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
 
-       max_bitmaps = max_t(u32, max_bitmaps, 1);
+       max_bitmaps = max_t(u64, max_bitmaps, 1);
 
        ASSERT(ctl->total_bitmaps <= max_bitmaps);
 
@@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
         * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
         * we add more bitmaps.
         */
-       bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE;
+       bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit;
 
        if (bitmap_bytes >= max_bytes) {
                ctl->extents_thresh = 0;
@@ -3662,7 +3662,7 @@ int test_check_exists(struct btrfs_block_group_cache *cache,
                        if (tmp->offset + tmp->bytes < offset)
                                break;
                        if (offset + bytes < tmp->offset) {
-                               n = rb_prev(&info->offset_index);
+                               n = rb_prev(&tmp->offset_index);
                                continue;
                        }
                        info = tmp;
@@ -3676,7 +3676,7 @@ int test_check_exists(struct btrfs_block_group_cache *cache,
                        if (offset + bytes < tmp->offset)
                                break;
                        if (tmp->offset + tmp->bytes < offset) {
-                               n = rb_next(&info->offset_index);
+                               n = rb_next(&tmp->offset_index);
                                continue;
                        }
                        info = tmp;
index aae520b2aee535f069946ba332b83a3ab0d44644..a97fdc156a03512bf36df40dd1b4278df845115f 100644 (file)
@@ -24,6 +24,11 @@ int __init btrfs_hash_init(void)
        return PTR_ERR_OR_ZERO(tfm);
 }
 
+const char* btrfs_crc32c_impl(void)
+{
+       return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
+}
+
 void btrfs_hash_exit(void)
 {
        crypto_free_shash(tfm);
index 118a2316e5d39f51cc6293e912ed9d2598d6afd8..c3a2ec554361f5d33b4b5a1a6eda29dca5b82aad 100644 (file)
@@ -22,6 +22,7 @@
 int __init btrfs_hash_init(void);
 
 void btrfs_hash_exit(void);
+const char* btrfs_crc32c_impl(void);
 
 u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length);
 
index 8b1212e8f7a8b65ddd96a38e7d57d534fdf75aff..4421954720b8b719a5c91dcc747c3a494884143c 100644 (file)
@@ -3271,7 +3271,16 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
        /* grab metadata reservation from transaction handle */
        if (reserve) {
                ret = btrfs_orphan_reserve_metadata(trans, inode);
-               BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
+               ASSERT(!ret);
+               if (ret) {
+                       atomic_dec(&root->orphan_inodes);
+                       clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
+                                 &BTRFS_I(inode)->runtime_flags);
+                       if (insert)
+                               clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                                         &BTRFS_I(inode)->runtime_flags);
+                       return ret;
+               }
        }
 
        /* insert an orphan item to track this unlinked/truncated file */
@@ -4549,6 +4558,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                        BUG_ON(ret);
                        if (btrfs_should_throttle_delayed_refs(trans, root))
                                btrfs_async_run_delayed_refs(root,
+                                                            trans->transid,
                                        trans->delayed_ref_updates * 2, 0);
                        if (be_nice) {
                                if (truncate_space_check(trans, root,
@@ -5748,6 +5758,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
        int name_len;
        int is_curr = 0;        /* ctx->pos points to the current index? */
        bool emitted;
+       bool put = false;
 
        /* FIXME, use a real flag for deciding about the key type */
        if (root->fs_info->tree_root == root)
@@ -5765,7 +5776,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
        if (key_type == BTRFS_DIR_INDEX_KEY) {
                INIT_LIST_HEAD(&ins_list);
                INIT_LIST_HEAD(&del_list);
-               btrfs_get_delayed_items(inode, &ins_list, &del_list);
+               put = btrfs_readdir_get_delayed_items(inode, &ins_list,
+                                                     &del_list);
        }
 
        key.type = key_type;
@@ -5912,8 +5924,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
 nopos:
        ret = 0;
 err:
-       if (key_type == BTRFS_DIR_INDEX_KEY)
-               btrfs_put_delayed_items(&ins_list, &del_list);
+       if (put)
+               btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
        btrfs_free_path(path);
        return ret;
 }
@@ -10525,7 +10537,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = {
 static const struct file_operations btrfs_dir_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .iterate        = btrfs_real_readdir,
+       .iterate_shared = btrfs_real_readdir,
        .unlocked_ioctl = btrfs_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = btrfs_compat_ioctl,
index e96634a725c3e441a311e36b40b43532031bf5c5..aca8264f4a49d029c52ca9363cf4b64dcef1aef2 100644 (file)
@@ -968,6 +968,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
        struct rb_node *prev = NULL;
        struct btrfs_ordered_extent *test;
        int ret = 1;
+       u64 orig_offset = offset;
 
        spin_lock_irq(&tree->lock);
        if (ordered) {
@@ -983,7 +984,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
 
        /* truncate file */
        if (disk_i_size > i_size) {
-               BTRFS_I(inode)->disk_i_size = i_size;
+               BTRFS_I(inode)->disk_i_size = orig_offset;
                ret = 0;
                goto out;
        }
index 4e59a91a11e0447ab05f5c39b9f5baa58121fc23..60e7179ed4b77ff8dcd9de1d8eb1c0cebce9f95d 100644 (file)
@@ -235,7 +235,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
        trans->aborted = errno;
        /* Nothing used. The other threads that have joined this
         * transaction may be able to continue. */
-       if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
+       if (!trans->dirty && list_empty(&trans->new_bgs)) {
                const char *errstr;
 
                errstr = btrfs_decode_error(errno);
@@ -1807,6 +1807,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                        }
                }
                sb->s_flags &= ~MS_RDONLY;
+
+               fs_info->open = 1;
        }
 out:
        wake_up_process(fs_info->transaction_kthread);
@@ -2303,7 +2305,7 @@ static void btrfs_interface_exit(void)
 
 static void btrfs_print_mod_info(void)
 {
-       printk(KERN_INFO "Btrfs loaded"
+       printk(KERN_INFO "Btrfs loaded, crc32c=%s"
 #ifdef CONFIG_BTRFS_DEBUG
                        ", debug=on"
 #endif
@@ -2313,33 +2315,48 @@ static void btrfs_print_mod_info(void)
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
                        ", integrity-checker=on"
 #endif
-                       "\n");
+                       "\n",
+                       btrfs_crc32c_impl());
 }
 
 static int btrfs_run_sanity_tests(void)
 {
-       int ret;
-
+       int ret, i;
+       u32 sectorsize, nodesize;
+       u32 test_sectorsize[] = {
+               PAGE_SIZE,
+       };
        ret = btrfs_init_test_fs();
        if (ret)
                return ret;
-
-       ret = btrfs_test_free_space_cache();
-       if (ret)
-               goto out;
-       ret = btrfs_test_extent_buffer_operations();
-       if (ret)
-               goto out;
-       ret = btrfs_test_extent_io();
-       if (ret)
-               goto out;
-       ret = btrfs_test_inodes();
-       if (ret)
-               goto out;
-       ret = btrfs_test_qgroups();
-       if (ret)
-               goto out;
-       ret = btrfs_test_free_space_tree();
+       for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
+               sectorsize = test_sectorsize[i];
+               for (nodesize = sectorsize;
+                    nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
+                    nodesize <<= 1) {
+                       pr_info("BTRFS: selftest: sectorsize: %u  nodesize: %u\n",
+                               sectorsize, nodesize);
+                       ret = btrfs_test_free_space_cache(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_extent_buffer_operations(sectorsize,
+                               nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_extent_io(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_inodes(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_qgroups(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_free_space_tree(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+               }
+       }
 out:
        btrfs_destroy_test_fs();
        return ret;
index f54bf450bad3e8e5fa9de2d4a0a439cff295b1f4..02223f3f78f4b54bc1fca294d71ac75b0e61b2ce 100644 (file)
@@ -68,7 +68,7 @@ int btrfs_init_test_fs(void)
        if (IS_ERR(test_mnt)) {
                printk(KERN_ERR "btrfs: cannot mount test file system\n");
                unregister_filesystem(&test_type);
-               return ret;
+               return PTR_ERR(test_mnt);
        }
        return 0;
 }
@@ -175,7 +175,7 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
 }
 
 struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length)
+btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
 {
        struct btrfs_block_group_cache *cache;
 
@@ -192,8 +192,8 @@ btrfs_alloc_dummy_block_group(unsigned long length)
        cache->key.objectid = 0;
        cache->key.offset = length;
        cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
-       cache->sectorsize = 4096;
-       cache->full_stripe_len = 4096;
+       cache->sectorsize = sectorsize;
+       cache->full_stripe_len = sectorsize;
 
        INIT_LIST_HEAD(&cache->list);
        INIT_LIST_HEAD(&cache->cluster_list);
index 054b8c73c951e4e50be759ea4b8cb322d80e9041..66fb6b701eb72975fa203fd4018ae847cffd46ab 100644 (file)
 struct btrfs_root;
 struct btrfs_trans_handle;
 
-int btrfs_test_free_space_cache(void);
-int btrfs_test_extent_buffer_operations(void);
-int btrfs_test_extent_io(void);
-int btrfs_test_inodes(void);
-int btrfs_test_qgroups(void);
-int btrfs_test_free_space_tree(void);
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
+int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
+int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
+int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
 int btrfs_init_test_fs(void);
 void btrfs_destroy_test_fs(void);
 struct inode *btrfs_new_test_inode(void);
 struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
 void btrfs_free_dummy_root(struct btrfs_root *root);
 struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length);
+btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize);
 void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
 void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
 #else
-static inline int btrfs_test_free_space_cache(void)
+static inline int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
-static inline int btrfs_test_extent_buffer_operations(void)
+static inline int btrfs_test_extent_buffer_operations(u32 sectorsize,
+       u32 nodesize)
 {
        return 0;
 }
@@ -57,19 +58,19 @@ static inline int btrfs_init_test_fs(void)
 static inline void btrfs_destroy_test_fs(void)
 {
 }
-static inline int btrfs_test_extent_io(void)
+static inline int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
-static inline int btrfs_test_inodes(void)
+static inline int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
-static inline int btrfs_test_qgroups(void)
+static inline int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
-static inline int btrfs_test_free_space_tree(void)
+static inline int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
index f51963a8f929e97d8030ca1a1e228263094cf244..4f8cbd1ec5ee340d862a8f8c415ce828ebd1a9c8 100644 (file)
@@ -22,7 +22,7 @@
 #include "../extent_io.h"
 #include "../disk-io.h"
 
-static int test_btrfs_split_item(void)
+static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
 {
        struct btrfs_path *path;
        struct btrfs_root *root;
@@ -40,7 +40,7 @@ static int test_btrfs_split_item(void)
 
        test_msg("Running btrfs_split_item tests\n");
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Could not allocate root\n");
                return PTR_ERR(root);
@@ -53,7 +53,8 @@ static int test_btrfs_split_item(void)
                return -ENOMEM;
        }
 
-       path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096);
+       path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize,
+                                                       nodesize);
        if (!eb) {
                test_msg("Could not allocate dummy buffer\n");
                ret = -ENOMEM;
@@ -222,8 +223,8 @@ static int test_btrfs_split_item(void)
        return ret;
 }
 
-int btrfs_test_extent_buffer_operations(void)
+int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize)
 {
-       test_msg("Running extent buffer operation tests");
-       return test_btrfs_split_item();
+       test_msg("Running extent buffer operation tests\n");
+       return test_btrfs_split_item(sectorsize, nodesize);
 }
index 55724607f79b8eea00090352d7038885001f7843..d19ab0317283ca728963700c93cff334819eaf16 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/sizes.h>
 #include "btrfs-tests.h"
+#include "../ctree.h"
 #include "../extent_io.h"
 
 #define PROCESS_UNLOCK         (1 << 0)
@@ -65,7 +66,7 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
        return count;
 }
 
-static int test_find_delalloc(void)
+static int test_find_delalloc(u32 sectorsize)
 {
        struct inode *inode;
        struct extent_io_tree tmp;
@@ -113,7 +114,7 @@ static int test_find_delalloc(void)
         * |--- delalloc ---|
         * |---  search  ---|
         */
-       set_extent_delalloc(&tmp, 0, 4095, NULL);
+       set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
        start = 0;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -122,9 +123,9 @@ static int test_find_delalloc(void)
                test_msg("Should have found at least one delalloc\n");
                goto out_bits;
        }
-       if (start != 0 || end != 4095) {
-               test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n",
-                        start, end);
+       if (start != 0 || end != (sectorsize - 1)) {
+               test_msg("Expected start 0 end %u, got start %llu end %llu\n",
+                       sectorsize - 1, start, end);
                goto out_bits;
        }
        unlock_extent(&tmp, start, end);
@@ -144,7 +145,7 @@ static int test_find_delalloc(void)
                test_msg("Couldn't find the locked page\n");
                goto out_bits;
        }
-       set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL);
+       set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -172,7 +173,7 @@ static int test_find_delalloc(void)
         * |--- delalloc ---|
         *                    |--- search ---|
         */
-       test_start = max_bytes + 4096;
+       test_start = max_bytes + sectorsize;
        locked_page = find_lock_page(inode->i_mapping, test_start >>
                                     PAGE_SHIFT);
        if (!locked_page) {
@@ -272,6 +273,16 @@ static int test_find_delalloc(void)
        return ret;
 }
 
+/**
+ * test_bit_in_byte - Determine whether a bit is set in a byte
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit_in_byte(int nr, const u8 *addr)
+{
+       return 1UL & (addr[nr / BITS_PER_BYTE] >> (nr & (BITS_PER_BYTE - 1)));
+}
+
 static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
                             unsigned long len)
 {
@@ -298,25 +309,29 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
                return -EINVAL;
        }
 
-       bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
-                  sizeof(long) * BITS_PER_BYTE);
-       extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
-                                sizeof(long) * BITS_PER_BYTE);
-       if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
-               test_msg("Setting straddling pages failed\n");
-               return -EINVAL;
-       }
+       /* Straddling pages test */
+       if (len > PAGE_SIZE) {
+               bitmap_set(bitmap,
+                       (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+                       sizeof(long) * BITS_PER_BYTE);
+               extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
+                                       sizeof(long) * BITS_PER_BYTE);
+               if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+                       test_msg("Setting straddling pages failed\n");
+                       return -EINVAL;
+               }
 
-       bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
-       bitmap_clear(bitmap,
-                    (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
-                    sizeof(long) * BITS_PER_BYTE);
-       extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
-       extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
-                                  sizeof(long) * BITS_PER_BYTE);
-       if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
-               test_msg("Clearing straddling pages failed\n");
-               return -EINVAL;
+               bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
+               bitmap_clear(bitmap,
+                       (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+                       sizeof(long) * BITS_PER_BYTE);
+               extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
+               extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
+                                       sizeof(long) * BITS_PER_BYTE);
+               if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+                       test_msg("Clearing straddling pages failed\n");
+                       return -EINVAL;
+               }
        }
 
        /*
@@ -333,7 +348,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
        for (i = 0; i < len * BITS_PER_BYTE; i++) {
                int bit, bit1;
 
-               bit = !!test_bit(i, bitmap);
+               bit = !!test_bit_in_byte(i, (u8 *)bitmap);
                bit1 = !!extent_buffer_test_bit(eb, 0, i);
                if (bit1 != bit) {
                        test_msg("Testing bit pattern failed\n");
@@ -351,15 +366,22 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
        return 0;
 }
 
-static int test_eb_bitmaps(void)
+static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
 {
-       unsigned long len = PAGE_SIZE * 4;
+       unsigned long len;
        unsigned long *bitmap;
        struct extent_buffer *eb;
        int ret;
 
        test_msg("Running extent buffer bitmap tests\n");
 
+       /*
+        * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than
+        * BTRFS_MAX_METADATA_BLOCKSIZE.
+        */
+       len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
+               ? sectorsize * 4 : sectorsize;
+
        bitmap = kmalloc(len, GFP_KERNEL);
        if (!bitmap) {
                test_msg("Couldn't allocate test bitmap\n");
@@ -379,7 +401,7 @@ static int test_eb_bitmaps(void)
 
        /* Do it over again with an extent buffer which isn't page-aligned. */
        free_extent_buffer(eb);
-       eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len);
+       eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len);
        if (!eb) {
                test_msg("Couldn't allocate test extent buffer\n");
                kfree(bitmap);
@@ -393,17 +415,17 @@ static int test_eb_bitmaps(void)
        return ret;
 }
 
-int btrfs_test_extent_io(void)
+int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
 {
        int ret;
 
        test_msg("Running extent I/O tests\n");
 
-       ret = test_find_delalloc();
+       ret = test_find_delalloc(sectorsize);
        if (ret)
                goto out;
 
-       ret = test_eb_bitmaps();
+       ret = test_eb_bitmaps(sectorsize, nodesize);
 out:
        test_msg("Extent I/O tests finished\n");
        return ret;
index 0eeb8f3d6b674ba8f67fe54877afde605999fcf3..3956bb2ff84cab59bf25fc9869cebc83d8606621 100644 (file)
@@ -22,7 +22,7 @@
 #include "../disk-io.h"
 #include "../free-space-cache.h"
 
-#define BITS_PER_BITMAP                (PAGE_SIZE * 8)
+#define BITS_PER_BITMAP                (PAGE_SIZE * 8UL)
 
 /*
  * This test just does basic sanity checking, making sure we can add an extent
@@ -99,7 +99,8 @@ static int test_extents(struct btrfs_block_group_cache *cache)
        return 0;
 }
 
-static int test_bitmaps(struct btrfs_block_group_cache *cache)
+static int test_bitmaps(struct btrfs_block_group_cache *cache,
+                       u32 sectorsize)
 {
        u64 next_bitmap_offset;
        int ret;
@@ -139,7 +140,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
         * The first bitmap we have starts at offset 0 so the next one is just
         * at the end of the first bitmap.
         */
-       next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+       next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
 
        /* Test a bit straddling two bitmaps */
        ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M,
@@ -167,9 +168,10 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
 }
 
 /* This is the high grade jackassery */
-static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
+static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache,
+                                   u32 sectorsize)
 {
-       u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+       u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
        int ret;
 
        test_msg("Running bitmap and extent tests\n");
@@ -401,7 +403,8 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
  * requests.
  */
 static int
-test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
+test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
+                                      u32 sectorsize)
 {
        int ret;
        u64 offset;
@@ -539,7 +542,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
         * The goal is to test that the bitmap entry space stealing doesn't
         * steal this space region.
         */
-       ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, 4096);
+       ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize);
        if (ret) {
                test_msg("Error adding free space: %d\n", ret);
                return ret;
@@ -597,8 +600,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
                return -ENOENT;
        }
 
-       if (cache->free_space_ctl->free_space != (SZ_1M + 4096)) {
-               test_msg("Cache free space is not 1Mb + 4Kb\n");
+       if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) {
+               test_msg("Cache free space is not 1Mb + %u\n", sectorsize);
                return -EINVAL;
        }
 
@@ -611,22 +614,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
                return -EINVAL;
        }
 
-       /* All that remains is a 4Kb free space region in a bitmap. Confirm. */
+       /*
+        * All that remains is a sectorsize free space region in a bitmap.
+        * Confirm.
+        */
        ret = check_num_extents_and_bitmaps(cache, 1, 1);
        if (ret)
                return ret;
 
-       if (cache->free_space_ctl->free_space != 4096) {
-               test_msg("Cache free space is not 4Kb\n");
+       if (cache->free_space_ctl->free_space != sectorsize) {
+               test_msg("Cache free space is not %u\n", sectorsize);
                return -EINVAL;
        }
 
        offset = btrfs_find_space_for_alloc(cache,
-                                           0, 4096, 0,
+                                           0, sectorsize, 0,
                                            &max_extent_size);
        if (offset != (SZ_128M + SZ_16M)) {
-               test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n",
-                        offset);
+               test_msg("Failed to allocate %u, returned offset : %llu\n",
+                        sectorsize, offset);
                return -EINVAL;
        }
 
@@ -733,7 +739,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
         * The goal is to test that the bitmap entry space stealing doesn't
         * steal this space region.
         */
-       ret = btrfs_add_free_space(cache, SZ_32M, 8192);
+       ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize);
        if (ret) {
                test_msg("Error adding free space: %d\n", ret);
                return ret;
@@ -757,7 +763,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
 
        /*
         * Confirm that our extent entry didn't stole all free space from the
-        * bitmap, because of the small 8Kb free space region.
+        * bitmap, because of the small 2 * sectorsize free space region.
         */
        ret = check_num_extents_and_bitmaps(cache, 2, 1);
        if (ret)
@@ -783,8 +789,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
                return -ENOENT;
        }
 
-       if (cache->free_space_ctl->free_space != (SZ_1M + 8192)) {
-               test_msg("Cache free space is not 1Mb + 8Kb\n");
+       if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) {
+               test_msg("Cache free space is not 1Mb + %u\n", 2 * sectorsize);
                return -EINVAL;
        }
 
@@ -796,21 +802,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
                return -EINVAL;
        }
 
-       /* All that remains is a 8Kb free space region in a bitmap. Confirm. */
+       /*
+        * All that remains is 2 * sectorsize free space region
+        * in a bitmap. Confirm.
+        */
        ret = check_num_extents_and_bitmaps(cache, 1, 1);
        if (ret)
                return ret;
 
-       if (cache->free_space_ctl->free_space != 8192) {
-               test_msg("Cache free space is not 8Kb\n");
+       if (cache->free_space_ctl->free_space != 2 * sectorsize) {
+               test_msg("Cache free space is not %u\n", 2 * sectorsize);
                return -EINVAL;
        }
 
        offset = btrfs_find_space_for_alloc(cache,
-                                           0, 8192, 0,
+                                           0, 2 * sectorsize, 0,
                                            &max_extent_size);
        if (offset != SZ_32M) {
-               test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n",
+               test_msg("Failed to allocate %u, offset: %llu\n",
+                        2 * sectorsize,
                         offset);
                return -EINVAL;
        }
@@ -825,7 +835,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
        return 0;
 }
 
-int btrfs_test_free_space_cache(void)
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
 {
        struct btrfs_block_group_cache *cache;
        struct btrfs_root *root = NULL;
@@ -833,13 +843,19 @@ int btrfs_test_free_space_cache(void)
 
        test_msg("Running btrfs free space cache tests\n");
 
-       cache = btrfs_alloc_dummy_block_group(1024 * 1024 * 1024);
+       /*
+        * For ppc64 (with 64k page size), bytes per bitmap might be
+        * larger than 1G.  To make bitmap test available in ppc64,
+        * alloc dummy block group whose size cross bitmaps.
+        */
+       cache = btrfs_alloc_dummy_block_group(BITS_PER_BITMAP * sectorsize
+                                       + PAGE_SIZE, sectorsize);
        if (!cache) {
                test_msg("Couldn't run the tests\n");
                return 0;
        }
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                ret = PTR_ERR(root);
                goto out;
@@ -855,14 +871,14 @@ int btrfs_test_free_space_cache(void)
        ret = test_extents(cache);
        if (ret)
                goto out;
-       ret = test_bitmaps(cache);
+       ret = test_bitmaps(cache, sectorsize);
        if (ret)
                goto out;
-       ret = test_bitmaps_and_extents(cache);
+       ret = test_bitmaps_and_extents(cache, sectorsize);
        if (ret)
                goto out;
 
-       ret = test_steal_space_from_bitmap_to_extent(cache);
+       ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize);
 out:
        btrfs_free_dummy_block_group(cache);
        btrfs_free_dummy_root(root);
index 7cea4462acd5f8d2ecf671be0c398dd2d6d62a98..aac507085ab0958f89a1ce81f0dc69d3824c2a6c 100644 (file)
@@ -16,6 +16,7 @@
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/types.h>
 #include "btrfs-tests.h"
 #include "../ctree.h"
 #include "../disk-io.h"
@@ -30,7 +31,7 @@ struct free_space_extent {
  * The test cases align their operations to this in order to hit some of the
  * edge cases in the bitmap code.
  */
-#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * 4096)
+#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE)
 
 static int __check_free_space_extents(struct btrfs_trans_handle *trans,
                                      struct btrfs_fs_info *fs_info,
@@ -439,7 +440,8 @@ typedef int (*test_func_t)(struct btrfs_trans_handle *,
                           struct btrfs_block_group_cache *,
                           struct btrfs_path *);
 
-static int run_test(test_func_t test_func, int bitmaps)
+static int run_test(test_func_t test_func, int bitmaps,
+               u32 sectorsize, u32 nodesize)
 {
        struct btrfs_root *root = NULL;
        struct btrfs_block_group_cache *cache = NULL;
@@ -447,7 +449,7 @@ static int run_test(test_func_t test_func, int bitmaps)
        struct btrfs_path *path = NULL;
        int ret;
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate dummy root\n");
                ret = PTR_ERR(root);
@@ -466,7 +468,8 @@ static int run_test(test_func_t test_func, int bitmaps)
        root->fs_info->free_space_root = root;
        root->fs_info->tree_root = root;
 
-       root->node = alloc_test_extent_buffer(root->fs_info, 4096);
+       root->node = alloc_test_extent_buffer(root->fs_info,
+               nodesize, nodesize);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                ret = -ENOMEM;
@@ -474,9 +477,9 @@ static int run_test(test_func_t test_func, int bitmaps)
        }
        btrfs_set_header_level(root->node, 0);
        btrfs_set_header_nritems(root->node, 0);
-       root->alloc_bytenr += 8192;
+       root->alloc_bytenr += 2 * nodesize;
 
-       cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE);
+       cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE, sectorsize);
        if (!cache) {
                test_msg("Couldn't allocate dummy block group cache\n");
                ret = -ENOMEM;
@@ -534,17 +537,18 @@ static int run_test(test_func_t test_func, int bitmaps)
        return ret;
 }
 
-static int run_test_both_formats(test_func_t test_func)
+static int run_test_both_formats(test_func_t test_func,
+       u32 sectorsize, u32 nodesize)
 {
        int ret;
 
-       ret = run_test(test_func, 0);
+       ret = run_test(test_func, 0, sectorsize, nodesize);
        if (ret)
                return ret;
-       return run_test(test_func, 1);
+       return run_test(test_func, 1, sectorsize, nodesize);
 }
 
-int btrfs_test_free_space_tree(void)
+int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
 {
        test_func_t tests[] = {
                test_empty_block_group,
@@ -561,9 +565,11 @@ int btrfs_test_free_space_tree(void)
 
        test_msg("Running free space tree tests\n");
        for (i = 0; i < ARRAY_SIZE(tests); i++) {
-               int ret = run_test_both_formats(tests[i]);
+               int ret = run_test_both_formats(tests[i], sectorsize,
+                       nodesize);
                if (ret) {
-                       test_msg("%pf failed\n", tests[i]);
+                       test_msg("%pf : sectorsize %u failed\n",
+                               tests[i], sectorsize);
                        return ret;
                }
        }
index 8a25fe8b7c452e78d823da897068748ae280de32..29648c0a39f1bc380eaffb08f0d574098dc9d343 100644 (file)
@@ -16,6 +16,7 @@
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/types.h>
 #include "btrfs-tests.h"
 #include "../ctree.h"
 #include "../btrfs_inode.h"
@@ -86,19 +87,19 @@ static void insert_inode_item_key(struct btrfs_root *root)
  * diagram of how the extents will look though this may not be possible we still
  * want to make sure everything acts normally (the last number is not inclusive)
  *
- * [0 - 5][5 -  6][6 - 10][10 - 4096][  4096 - 8192 ][8192 - 12288]
- * [hole ][inline][ hole ][ regular ][regular1 split][    hole    ]
+ * [0 - 5][5 -  6][     6 - 4096     ][ 4096 - 4100][4100 - 8195][8195 - 12291]
+ * [hole ][inline][hole but no extent][  hole   ][   regular ][regular1 split]
  *
- * [ 12288 - 20480][20480 - 24576][  24576 - 28672  ][28672 - 36864][36864 - 45056]
- * [regular1 split][   prealloc1 ][prealloc1 written][   prealloc1 ][ compressed  ]
+ * [12291 - 16387][16387 - 24579][24579 - 28675][ 28675 - 32771][32771 - 36867 ]
+ * [    hole    ][regular1 split][   prealloc ][   prealloc1  ][prealloc1 written]
  *
- * [45056 - 49152][49152-53248][53248-61440][61440-65536][     65536+81920   ]
- * [ compressed1 ][  regular  ][compressed1][  regular  ][ hole but no extent]
+ * [36867 - 45059][45059 - 53251][53251 - 57347][57347 - 61443][61443- 69635]
+ * [  prealloc1  ][ compressed  ][ compressed1 ][    regular  ][ compressed1]
  *
- * [81920-86016]
- * [  regular  ]
+ * [69635-73731][   73731 - 86019   ][86019-90115]
+ * [  regular  ][ hole but no extent][  regular  ]
  */
-static void setup_file_extents(struct btrfs_root *root)
+static void setup_file_extents(struct btrfs_root *root, u32 sectorsize)
 {
        int slot = 0;
        u64 disk_bytenr = SZ_1M;
@@ -119,7 +120,7 @@ static void setup_file_extents(struct btrfs_root *root)
        insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0,
                      slot);
        slot++;
-       offset = 4096;
+       offset = sectorsize;
 
        /* Now another hole */
        insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
@@ -128,99 +129,106 @@ static void setup_file_extents(struct btrfs_root *root)
        offset += 4;
 
        /* Now for a regular extent */
-       insert_extent(root, offset, 4095, 4095, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0,
+                     disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       disk_bytenr += 4096;
-       offset += 4095;
+       disk_bytenr += sectorsize;
+       offset += sectorsize - 1;
 
        /*
         * Now for 3 extents that were split from a hole punch so we test
         * offsets properly.
         */
-       insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+                     4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 4096, 4096, 0, 0, 0, BTRFS_FILE_EXTENT_REG,
-                     0, slot);
+       offset += sectorsize;
+       insert_extent(root, offset, sectorsize, sectorsize, 0, 0, 0,
+                     BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
+       offset += sectorsize;
+       insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+                     2 * sectorsize, disk_bytenr, 4 * sectorsize,
                      BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 8192;
-       disk_bytenr += 16384;
+       offset += 2 * sectorsize;
+       disk_bytenr += 4 * sectorsize;
 
        /* Now for a unwritten prealloc extent */
-       insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+       insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+               sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
        slot++;
-       offset += 4096;
+       offset += sectorsize;
 
        /*
         * We want to jack up disk_bytenr a little more so the em stuff doesn't
         * merge our records.
         */
-       disk_bytenr += 8192;
+       disk_bytenr += 2 * sectorsize;
 
        /*
         * Now for a partially written prealloc extent, basically the same as
         * the hole punch example above.  Ram_bytes never changes when you mark
         * extents written btw.
         */
-       insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
-                     BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+       insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+                     4 * sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 4096, 16384, 4096, disk_bytenr, 16384,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       offset += sectorsize;
+       insert_extent(root, offset, sectorsize, 4 * sectorsize, sectorsize,
+                     disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0,
+                     slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
+       offset += sectorsize;
+       insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+                     2 * sectorsize, disk_bytenr, 4 * sectorsize,
                      BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
        slot++;
-       offset += 8192;
-       disk_bytenr += 16384;
+       offset += 2 * sectorsize;
+       disk_bytenr += 4 * sectorsize;
 
        /* Now a normal compressed extent */
-       insert_extent(root, offset, 8192, 8192, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+       insert_extent(root, offset, 2 * sectorsize, 2 * sectorsize, 0,
+                     disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG,
+                     BTRFS_COMPRESS_ZLIB, slot);
        slot++;
-       offset += 8192;
+       offset += 2 * sectorsize;
        /* No merges */
-       disk_bytenr += 8192;
+       disk_bytenr += 2 * sectorsize;
 
        /* Now a split compressed extent */
-       insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+       insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+                     sectorsize, BTRFS_FILE_EXTENT_REG,
+                     BTRFS_COMPRESS_ZLIB, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 4096, 4096, 0, disk_bytenr + 4096, 4096,
+       offset += sectorsize;
+       insert_extent(root, offset, sectorsize, sectorsize, 0,
+                     disk_bytenr + sectorsize, sectorsize,
                      BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 4096,
+       offset += sectorsize;
+       insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+                     2 * sectorsize, disk_bytenr, sectorsize,
                      BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
        slot++;
-       offset += 8192;
-       disk_bytenr += 8192;
+       offset += 2 * sectorsize;
+       disk_bytenr += 2 * sectorsize;
 
        /* Now extents that have a hole but no hole extent */
-       insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+                     sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 16384;
-       disk_bytenr += 4096;
-       insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       offset += 4 * sectorsize;
+       disk_bytenr += sectorsize;
+       insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+                     sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
 }
 
 static unsigned long prealloc_only = 0;
 static unsigned long compressed_only = 0;
 static unsigned long vacancy_only = 0;
 
-static noinline int test_btrfs_get_extent(void)
+static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
 {
        struct inode *inode = NULL;
        struct btrfs_root *root = NULL;
@@ -240,7 +248,7 @@ static noinline int test_btrfs_get_extent(void)
        BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
        BTRFS_I(inode)->location.offset = 0;
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate root\n");
                goto out;
@@ -256,7 +264,7 @@ static noinline int test_btrfs_get_extent(void)
                goto out;
        }
 
-       root->node = alloc_dummy_extent_buffer(NULL, 4096);
+       root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                goto out;
@@ -273,7 +281,7 @@ static noinline int test_btrfs_get_extent(void)
 
        /* First with no extents */
        BTRFS_I(inode)->root = root;
-       em = btrfs_get_extent(inode, NULL, 0, 0, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0);
        if (IS_ERR(em)) {
                em = NULL;
                test_msg("Got an error when we shouldn't have\n");
@@ -295,7 +303,7 @@ static noinline int test_btrfs_get_extent(void)
         * setup_file_extents, so if you change anything there you need to
         * update the comment and update the expected values below.
         */
-       setup_file_extents(root);
+       setup_file_extents(root, sectorsize);
 
        em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0);
        if (IS_ERR(em)) {
@@ -318,7 +326,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -327,7 +335,8 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected an inline, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4091) {
+
+       if (em->start != offset || em->len != (sectorsize - 5)) {
                test_msg("Unexpected extent wanted start %llu len 1, got start "
                         "%llu len %llu\n", offset, em->start, em->len);
                goto out;
@@ -344,7 +353,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -366,7 +375,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* Regular extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -375,7 +384,7 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4095) {
+       if (em->start != offset || em->len != sectorsize - 1) {
                test_msg("Unexpected extent wanted start %llu len 4095, got "
                         "start %llu len %llu\n", offset, em->start, em->len);
                goto out;
@@ -393,7 +402,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* The next 3 are split extents */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -402,9 +411,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -421,7 +431,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -430,9 +440,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a hole, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -442,7 +453,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -451,9 +462,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 8192) {
-               test_msg("Unexpected extent wanted start %llu len 8192, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 2 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, 2 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -475,7 +487,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* Prealloc extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -484,9 +496,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != prealloc_only) {
@@ -503,7 +516,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* The next 3 are a half written prealloc extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -512,9 +525,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != prealloc_only) {
@@ -532,7 +546,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -541,9 +555,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -564,7 +579,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -573,9 +588,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 8192) {
-               test_msg("Unexpected extent wanted start %llu len 8192, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 2 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, 2 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != prealloc_only) {
@@ -598,7 +614,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* Now for the compressed extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -607,9 +623,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 8192) {
-               test_msg("Unexpected extent wanted start %llu len 8192, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 2 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u,"
+                       "got start %llu len %llu\n",
+                       offset, 2 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != compressed_only) {
@@ -631,7 +648,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* Split compressed extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -640,9 +657,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u,"
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != compressed_only) {
@@ -665,7 +683,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -674,9 +692,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -691,7 +710,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -701,9 +720,10 @@ static noinline int test_btrfs_get_extent(void)
                         disk_bytenr, em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 8192) {
-               test_msg("Unexpected extent wanted start %llu len 8192, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 2 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, 2 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != compressed_only) {
@@ -725,7 +745,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* A hole between regular extents but no hole extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset + 6, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -734,9 +754,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -765,9 +786,10 @@ static noinline int test_btrfs_get_extent(void)
         * length of the actual hole, if this changes we'll have to change this
         * test.
         */
-       if (em->start != offset || em->len != 12288) {
-               test_msg("Unexpected extent wanted start %llu len 12288, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 3 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, 3 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != vacancy_only) {
@@ -783,7 +805,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -792,9 +814,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u,"
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -815,7 +838,7 @@ static noinline int test_btrfs_get_extent(void)
        return ret;
 }
 
-static int test_hole_first(void)
+static int test_hole_first(u32 sectorsize, u32 nodesize)
 {
        struct inode *inode = NULL;
        struct btrfs_root *root = NULL;
@@ -832,7 +855,7 @@ static int test_hole_first(void)
        BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
        BTRFS_I(inode)->location.offset = 0;
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate root\n");
                goto out;
@@ -844,7 +867,7 @@ static int test_hole_first(void)
                goto out;
        }
 
-       root->node = alloc_dummy_extent_buffer(NULL, 4096);
+       root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                goto out;
@@ -861,9 +884,9 @@ static int test_hole_first(void)
         * btrfs_get_extent.
         */
        insert_inode_item_key(root);
-       insert_extent(root, 4096, 4096, 4096, 0, 4096, 4096,
-                     BTRFS_FILE_EXTENT_REG, 0, 1);
-       em = btrfs_get_extent(inode, NULL, 0, 0, 8192, 0);
+       insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize,
+                     sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1);
+       em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -872,9 +895,10 @@ static int test_hole_first(void)
                test_msg("Expected a hole, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != 0 || em->len != 4096) {
-               test_msg("Unexpected extent wanted start 0 len 4096, got start "
-                        "%llu len %llu\n", em->start, em->len);
+       if (em->start != 0 || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start 0 len %u, "
+                       "got start %llu len %llu\n",
+                       sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != vacancy_only) {
@@ -884,18 +908,19 @@ static int test_hole_first(void)
        }
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, 4096, 8192, 0);
+       em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
        }
-       if (em->block_start != 4096) {
+       if (em->block_start != sectorsize) {
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != 4096 || em->len != 4096) {
-               test_msg("Unexpected extent wanted start 4096 len 4096, got "
-                        "start %llu len %llu\n", em->start, em->len);
+       if (em->start != sectorsize || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %u len %u, "
+                       "got start %llu len %llu\n",
+                       sectorsize, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -912,7 +937,7 @@ static int test_hole_first(void)
        return ret;
 }
 
-static int test_extent_accounting(void)
+static int test_extent_accounting(u32 sectorsize, u32 nodesize)
 {
        struct inode *inode = NULL;
        struct btrfs_root *root = NULL;
@@ -924,7 +949,7 @@ static int test_extent_accounting(void)
                return ret;
        }
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate root\n");
                goto out;
@@ -954,10 +979,11 @@ static int test_extent_accounting(void)
                goto out;
        }
 
-       /* [BTRFS_MAX_EXTENT_SIZE][4k] */
+       /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
        BTRFS_I(inode)->outstanding_extents++;
        ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
-                                       BTRFS_MAX_EXTENT_SIZE + 4095, NULL);
+                                       BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
+                                       NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -969,10 +995,10 @@ static int test_extent_accounting(void)
                goto out;
        }
 
-       /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */
+       /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */
        ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
                               BTRFS_MAX_EXTENT_SIZE >> 1,
-                              (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
+                              (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
                               EXTENT_DELALLOC | EXTENT_DIRTY |
                               EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0,
                               NULL, GFP_KERNEL);
@@ -987,10 +1013,11 @@ static int test_extent_accounting(void)
                goto out;
        }
 
-       /* [BTRFS_MAX_EXTENT_SIZE][4K] */
+       /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
        BTRFS_I(inode)->outstanding_extents++;
        ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
-                                       (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
+                                       (BTRFS_MAX_EXTENT_SIZE >> 1)
+                                       + sectorsize - 1,
                                        NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
@@ -1004,16 +1031,17 @@ static int test_extent_accounting(void)
        }
 
        /*
-        * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K]
+        * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize]
         *
         * I'm artificially adding 2 to outstanding_extents because in the
         * buffered IO case we'd add things up as we go, but I don't feel like
         * doing that here, this isn't the interesting case we want to test.
         */
        BTRFS_I(inode)->outstanding_extents += 2;
-       ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192,
-                                       (BTRFS_MAX_EXTENT_SIZE << 1) + 12287,
-                                       NULL);
+       ret = btrfs_set_extent_delalloc(inode,
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
+                       (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
+                       NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1025,10 +1053,13 @@ static int test_extent_accounting(void)
                goto out;
        }
 
-       /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */
+       /*
+       * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize]
+       */
        BTRFS_I(inode)->outstanding_extents++;
-       ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
-                                       BTRFS_MAX_EXTENT_SIZE+8191, NULL);
+       ret = btrfs_set_extent_delalloc(inode,
+                       BTRFS_MAX_EXTENT_SIZE + sectorsize,
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1042,8 +1073,8 @@ static int test_extent_accounting(void)
 
        /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */
        ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
-                              BTRFS_MAX_EXTENT_SIZE+4096,
-                              BTRFS_MAX_EXTENT_SIZE+8191,
+                              BTRFS_MAX_EXTENT_SIZE + sectorsize,
+                              BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
                               EXTENT_DIRTY | EXTENT_DELALLOC |
                               EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
                               NULL, GFP_KERNEL);
@@ -1063,8 +1094,9 @@ static int test_extent_accounting(void)
         * might fail and I'd rather satisfy my paranoia at this point.
         */
        BTRFS_I(inode)->outstanding_extents++;
-       ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
-                                       BTRFS_MAX_EXTENT_SIZE+8191, NULL);
+       ret = btrfs_set_extent_delalloc(inode,
+                       BTRFS_MAX_EXTENT_SIZE + sectorsize,
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1103,7 +1135,7 @@ static int test_extent_accounting(void)
        return ret;
 }
 
-int btrfs_test_inodes(void)
+int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
 {
        int ret;
 
@@ -1112,13 +1144,13 @@ int btrfs_test_inodes(void)
        set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
 
        test_msg("Running btrfs_get_extent tests\n");
-       ret = test_btrfs_get_extent();
+       ret = test_btrfs_get_extent(sectorsize, nodesize);
        if (ret)
                return ret;
        test_msg("Running hole first btrfs_get_extent test\n");
-       ret = test_hole_first();
+       ret = test_hole_first(sectorsize, nodesize);
        if (ret)
                return ret;
        test_msg("Running outstanding_extents tests\n");
-       return test_extent_accounting();
+       return test_extent_accounting(sectorsize, nodesize);
 }
index 8aa4ded31326c95e8b19b4ced8f9c9fab1c4c8de..57a12c0d680ba300461ff5ee9714833b541f2810 100644 (file)
@@ -16,6 +16,7 @@
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/types.h>
 #include "btrfs-tests.h"
 #include "../ctree.h"
 #include "../transaction.h"
@@ -216,7 +217,8 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
        return ret;
 }
 
-static int test_no_shared_qgroup(struct btrfs_root *root)
+static int test_no_shared_qgroup(struct btrfs_root *root,
+               u32 sectorsize, u32 nodesize)
 {
        struct btrfs_trans_handle trans;
        struct btrfs_fs_info *fs_info = root->fs_info;
@@ -227,7 +229,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
        btrfs_init_dummy_trans(&trans);
 
        test_msg("Qgroup basic add\n");
-       ret = btrfs_create_qgroup(NULL, fs_info, 5);
+       ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FS_TREE_OBJECTID);
        if (ret) {
                test_msg("Couldn't create a qgroup %d\n", ret);
                return ret;
@@ -238,18 +240,19 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
         * we can only call btrfs_qgroup_account_extent() directly to test
         * quota.
         */
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
+       ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+                               BTRFS_FS_TREE_OBJECTID);
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -257,32 +260,33 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return ret;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+                               nodesize, nodesize)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
        old_roots = NULL;
        new_roots = NULL;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = remove_extent_item(root, 4096, 4096);
+       ret = remove_extent_item(root, nodesize, nodesize);
        if (ret)
                return -EINVAL;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -290,14 +294,14 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return -EINVAL;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 0, 0)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, 0, 0)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
@@ -310,7 +314,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
  * right, also remove one of the roots and make sure the exclusive count is
  * adjusted properly.
  */
-static int test_multiple_refs(struct btrfs_root *root)
+static int test_multiple_refs(struct btrfs_root *root,
+               u32 sectorsize, u32 nodesize)
 {
        struct btrfs_trans_handle trans;
        struct btrfs_fs_info *fs_info = root->fs_info;
@@ -322,25 +327,29 @@ static int test_multiple_refs(struct btrfs_root *root)
 
        test_msg("Qgroup multiple refs test\n");
 
-       /* We have 5 created already from the previous test */
-       ret = btrfs_create_qgroup(NULL, fs_info, 256);
+       /*
+        * We have BTRFS_FS_TREE_OBJECTID created already from the
+        * previous test.
+        */
+       ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FIRST_FREE_OBJECTID);
        if (ret) {
                test_msg("Couldn't create a qgroup %d\n", ret);
                return ret;
        }
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
+       ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+                               BTRFS_FS_TREE_OBJECTID);
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -348,30 +357,32 @@ static int test_multiple_refs(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return ret;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+                                      nodesize, nodesize)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = add_tree_ref(root, 4096, 4096, 0, 256);
+       ret = add_tree_ref(root, nodesize, nodesize, 0,
+                       BTRFS_FIRST_FREE_OBJECTID);
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -379,35 +390,38 @@ static int test_multiple_refs(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return ret;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 0)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+                                       nodesize, 0)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 256, 4096, 0)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
+                                       nodesize, 0)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = remove_extent_ref(root, 4096, 4096, 0, 256);
+       ret = remove_extent_ref(root, nodesize, nodesize, 0,
+                               BTRFS_FIRST_FREE_OBJECTID);
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -415,19 +429,21 @@ static int test_multiple_refs(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return ret;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 256, 0, 0)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
+                                       0, 0)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+                                       nodesize, nodesize)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
@@ -435,13 +451,13 @@ static int test_multiple_refs(struct btrfs_root *root)
        return 0;
 }
 
-int btrfs_test_qgroups(void)
+int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
 {
        struct btrfs_root *root;
        struct btrfs_root *tmp_root;
        int ret = 0;
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate root\n");
                return PTR_ERR(root);
@@ -468,7 +484,8 @@ int btrfs_test_qgroups(void)
         * Can't use bytenr 0, some things freak out
         * *cough*backref walking code*cough*
         */
-       root->node = alloc_test_extent_buffer(root->fs_info, 4096);
+       root->node = alloc_test_extent_buffer(root->fs_info, nodesize,
+                                       nodesize);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                ret = -ENOMEM;
@@ -476,16 +493,16 @@ int btrfs_test_qgroups(void)
        }
        btrfs_set_header_level(root->node, 0);
        btrfs_set_header_nritems(root->node, 0);
-       root->alloc_bytenr += 8192;
+       root->alloc_bytenr += 2 * nodesize;
 
-       tmp_root = btrfs_alloc_dummy_root();
+       tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(tmp_root)) {
                test_msg("Couldn't allocate a fs root\n");
                ret = PTR_ERR(tmp_root);
                goto out;
        }
 
-       tmp_root->root_key.objectid = 5;
+       tmp_root->root_key.objectid = BTRFS_FS_TREE_OBJECTID;
        root->fs_info->fs_root = tmp_root;
        ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
        if (ret) {
@@ -493,14 +510,14 @@ int btrfs_test_qgroups(void)
                goto out;
        }
 
-       tmp_root = btrfs_alloc_dummy_root();
+       tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(tmp_root)) {
                test_msg("Couldn't allocate a fs root\n");
                ret = PTR_ERR(tmp_root);
                goto out;
        }
 
-       tmp_root->root_key.objectid = 256;
+       tmp_root->root_key.objectid = BTRFS_FIRST_FREE_OBJECTID;
        ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
        if (ret) {
                test_msg("Couldn't insert fs root %d\n", ret);
@@ -508,10 +525,10 @@ int btrfs_test_qgroups(void)
        }
 
        test_msg("Running qgroup tests\n");
-       ret = test_no_shared_qgroup(root);
+       ret = test_no_shared_qgroup(root, sectorsize, nodesize);
        if (ret)
                goto out;
-       ret = test_multiple_refs(root);
+       ret = test_multiple_refs(root, sectorsize, nodesize);
 out:
        btrfs_free_dummy_root(root);
        return ret;
index f6e24cb423aed0c78316b983d3687d07bb2ae1ec..948aa186b353caf55748fd983e975c127b654f0f 100644 (file)
@@ -818,6 +818,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 {
        struct btrfs_transaction *cur_trans = trans->transaction;
        struct btrfs_fs_info *info = root->fs_info;
+       u64 transid = trans->transid;
        unsigned long cur = trans->delayed_ref_updates;
        int lock = (trans->type != TRANS_JOIN_NOLOCK);
        int err = 0;
@@ -905,7 +906,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
        if (must_run_delayed_refs) {
-               btrfs_async_run_delayed_refs(root, cur,
+               btrfs_async_run_delayed_refs(root, cur, transid,
                                             must_run_delayed_refs == 1);
        }
        return err;
@@ -1311,11 +1312,6 @@ int btrfs_defrag_root(struct btrfs_root *root)
        return ret;
 }
 
-/* Bisesctability fixup, remove in 4.8 */
-#ifndef btrfs_std_error
-#define btrfs_std_error btrfs_handle_fs_error
-#endif
-
 /*
  * Do all special snapshot related qgroup dirty hack.
  *
@@ -1385,7 +1381,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
        switch_commit_roots(trans->transaction, fs_info);
        ret = btrfs_write_and_wait_transaction(trans, src);
        if (ret)
-               btrfs_std_error(fs_info, ret,
+               btrfs_handle_fs_error(fs_info, ret,
                        "Error while writing out transaction for qgroup");
 
 out:
index 9fe0ec2bf0fe7fefda878bb9e885071f0d8e3f7e..c5abee4f01add54f811c78fac76170f8b5c02bb3 100644 (file)
@@ -110,7 +110,6 @@ struct btrfs_trans_handle {
        u64 chunk_bytes_reserved;
        unsigned long use_count;
        unsigned long blocks_reserved;
-       unsigned long blocks_used;
        unsigned long delayed_ref_updates;
        struct btrfs_transaction *transaction;
        struct btrfs_block_rsv *block_rsv;
@@ -121,6 +120,7 @@ struct btrfs_trans_handle {
        bool can_flush_pending_bgs;
        bool reloc_reserved;
        bool sync;
+       bool dirty;
        unsigned int type;
        /*
         * this root is only needed to validate that the root passed to
index b7665af471d88510d391c4ffc8bcbb65e506bd50..c05f69a8ec42dad19269620e0ec7e8b38f91ca25 100644 (file)
@@ -2422,8 +2422,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
                root_owner = btrfs_header_owner(parent);
 
                next = btrfs_find_create_tree_block(root, bytenr);
-               if (!next)
-                       return -ENOMEM;
+               if (IS_ERR(next))
+                       return PTR_ERR(next);
 
                if (*level == 1) {
                        ret = wc->process_func(root, next, wc, ptr_gen);
index da9e0036a864d3d8cf45b164b0fb083675223584..589f128173b10e1648ce2714cd61c042d5c9e6da 100644 (file)
@@ -4241,6 +4241,7 @@ int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
        if (IS_ERR(uuid_root)) {
                ret = PTR_ERR(uuid_root);
                btrfs_abort_transaction(trans, tree_root, ret);
+               btrfs_end_transaction(trans, tree_root);
                return ret;
        }
 
@@ -4693,12 +4694,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 
        if (type & BTRFS_BLOCK_GROUP_RAID5) {
                raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
-                                btrfs_super_stripesize(info->super_copy));
+                                               extent_root->stripesize);
                data_stripes = num_stripes - 1;
        }
        if (type & BTRFS_BLOCK_GROUP_RAID6) {
                raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
-                                btrfs_super_stripesize(info->super_copy));
+                                               extent_root->stripesize);
                data_stripes = num_stripes - 2;
        }
 
@@ -6258,27 +6259,23 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
        return dev;
 }
 
-static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
-                         struct extent_buffer *leaf,
-                         struct btrfs_chunk *chunk)
+/* Return -EIO if any error, otherwise return 0. */
+static int btrfs_check_chunk_valid(struct btrfs_root *root,
+                                  struct extent_buffer *leaf,
+                                  struct btrfs_chunk *chunk, u64 logical)
 {
-       struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
-       struct map_lookup *map;
-       struct extent_map *em;
-       u64 logical;
        u64 length;
        u64 stripe_len;
-       u64 devid;
-       u8 uuid[BTRFS_UUID_SIZE];
-       int num_stripes;
-       int ret;
-       int i;
+       u16 num_stripes;
+       u16 sub_stripes;
+       u64 type;
 
-       logical = key->offset;
        length = btrfs_chunk_length(leaf, chunk);
        stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
        num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
-       /* Validation check */
+       sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+       type = btrfs_chunk_type(leaf, chunk);
+
        if (!num_stripes) {
                btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
                          num_stripes);
@@ -6289,6 +6286,11 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                          "invalid chunk logical %llu", logical);
                return -EIO;
        }
+       if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
+               btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
+                         btrfs_chunk_sector_size(leaf, chunk));
+               return -EIO;
+       }
        if (!length || !IS_ALIGNED(length, root->sectorsize)) {
                btrfs_err(root->fs_info,
                        "invalid chunk length %llu", length);
@@ -6300,13 +6302,54 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                return -EIO;
        }
        if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
-           btrfs_chunk_type(leaf, chunk)) {
+           type) {
                btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
                          ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
                            BTRFS_BLOCK_GROUP_PROFILE_MASK) &
                          btrfs_chunk_type(leaf, chunk));
                return -EIO;
        }
+       if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
+           (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
+           (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
+           (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
+           (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
+           ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
+            num_stripes != 1)) {
+               btrfs_err(root->fs_info,
+                       "invalid num_stripes:sub_stripes %u:%u for profile %llu",
+                       num_stripes, sub_stripes,
+                       type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
+                         struct extent_buffer *leaf,
+                         struct btrfs_chunk *chunk)
+{
+       struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+       struct map_lookup *map;
+       struct extent_map *em;
+       u64 logical;
+       u64 length;
+       u64 stripe_len;
+       u64 devid;
+       u8 uuid[BTRFS_UUID_SIZE];
+       int num_stripes;
+       int ret;
+       int i;
+
+       logical = key->offset;
+       length = btrfs_chunk_length(leaf, chunk);
+       stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+       num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+
+       ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
+       if (ret)
+               return ret;
 
        read_lock(&map_tree->map_tree.lock);
        em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
@@ -6554,6 +6597,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
        u32 array_size;
        u32 len = 0;
        u32 cur_offset;
+       u64 type;
        struct btrfs_key key;
 
        ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
@@ -6563,8 +6607,8 @@ int btrfs_read_sys_array(struct btrfs_root *root)
         * overallocate but we can keep it as-is, only the first page is used.
         */
        sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
-       if (!sb)
-               return -ENOMEM;
+       if (IS_ERR(sb))
+               return PTR_ERR(sb);
        set_extent_buffer_uptodate(sb);
        btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
        /*
@@ -6620,6 +6664,15 @@ int btrfs_read_sys_array(struct btrfs_root *root)
                                break;
                        }
 
+                       type = btrfs_chunk_type(sb, chunk);
+                       if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
+                               btrfs_err(root->fs_info,
+                           "invalid chunk type %llu in sys_array at offset %u",
+                                       type, cur_offset);
+                               ret = -EIO;
+                               break;
+                       }
+
                        len = btrfs_chunk_item_size(num_stripes);
                        if (cur_offset + len > array_size)
                                goto out_short_read;
@@ -6638,12 +6691,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
                sb_array_offset += len;
                cur_offset += len;
        }
+       clear_extent_buffer_uptodate(sb);
        free_extent_buffer_stale(sb);
        return ret;
 
 out_short_read:
        printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
                        len, cur_offset);
+       clear_extent_buffer_uptodate(sb);
        free_extent_buffer_stale(sb);
        return -EIO;
 }
@@ -6656,6 +6711,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
        struct btrfs_key found_key;
        int ret;
        int slot;
+       u64 total_dev = 0;
 
        root = root->fs_info->chunk_root;
 
@@ -6697,6 +6753,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
                        ret = read_one_dev(root, leaf, dev_item);
                        if (ret)
                                goto error;
+                       total_dev++;
                } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
                        struct btrfs_chunk *chunk;
                        chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
@@ -6706,6 +6763,28 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
                }
                path->slots[0]++;
        }
+
+       /*
+        * After loading chunk tree, we've got all device information,
+        * do another round of validation checks.
+        */
+       if (total_dev != root->fs_info->fs_devices->total_devices) {
+               btrfs_err(root->fs_info,
+          "super_num_devices %llu mismatch with num_devices %llu found here",
+                         btrfs_super_num_devices(root->fs_info->super_copy),
+                         total_dev);
+               ret = -EINVAL;
+               goto error;
+       }
+       if (btrfs_super_total_bytes(root->fs_info->super_copy) <
+           root->fs_info->fs_devices->total_rw_bytes) {
+               btrfs_err(root->fs_info,
+       "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
+                         btrfs_super_total_bytes(root->fs_info->super_copy),
+                         root->fs_info->fs_devices->total_rw_bytes);
+               ret = -EINVAL;
+               goto error;
+       }
        ret = 0;
 error:
        unlock_chunks(root);
index 38a7ab87e10a80d9d62d80b74dd58c1f2989a966..281b768000e664e4d4ef9092d4bb567d003623a0 100644 (file)
@@ -794,6 +794,7 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
                        return 0;
                file->f_pos = pos;
                cprm->written += n;
+               cprm->pos += n;
                nr -= n;
        }
        return 1;
@@ -808,6 +809,7 @@ int dump_skip(struct coredump_params *cprm, size_t nr)
                if (dump_interrupted() ||
                    file->f_op->llseek(file, nr, SEEK_CUR) < 0)
                        return 0;
+               cprm->pos += nr;
                return 1;
        } else {
                while (nr > PAGE_SIZE) {
@@ -822,7 +824,7 @@ EXPORT_SYMBOL(dump_skip);
 
 int dump_align(struct coredump_params *cprm, int align)
 {
-       unsigned mod = cprm->file->f_pos & (align - 1);
+       unsigned mod = cprm->pos & (align - 1);
        if (align & (align - 1))
                return 0;
        return mod ? dump_skip(cprm, align - mod) : 1;
index ad4a542e9babdceb321a77ef5c8837ad70177a6a..d6847d7b123d4ca0fe4a9100b751b673d8801cd9 100644 (file)
@@ -507,6 +507,44 @@ void d_drop(struct dentry *dentry)
 }
 EXPORT_SYMBOL(d_drop);
 
+static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
+{
+       struct dentry *next;
+       /*
+        * Inform d_walk() and shrink_dentry_list() that we are no longer
+        * attached to the dentry tree
+        */
+       dentry->d_flags |= DCACHE_DENTRY_KILLED;
+       if (unlikely(list_empty(&dentry->d_child)))
+               return;
+       __list_del_entry(&dentry->d_child);
+       /*
+        * Cursors can move around the list of children.  While we'd been
+        * a normal list member, it didn't matter - ->d_child.next would've
+        * been updated.  However, from now on it won't be and for the
+        * things like d_walk() it might end up with a nasty surprise.
+        * Normally d_walk() doesn't care about cursors moving around -
+        * ->d_lock on parent prevents that and since a cursor has no children
+        * of its own, we get through it without ever unlocking the parent.
+        * There is one exception, though - if we ascend from a child that
+        * gets killed as soon as we unlock it, the next sibling is found
+        * using the value left in its ->d_child.next.  And if _that_
+        * pointed to a cursor, and cursor got moved (e.g. by lseek())
+        * before d_walk() regains parent->d_lock, we'll end up skipping
+        * everything the cursor had been moved past.
+        *
+        * Solution: make sure that the pointer left behind in ->d_child.next
+        * points to something that won't be moving around.  I.e. skip the
+        * cursors.
+        */
+       while (dentry->d_child.next != &parent->d_subdirs) {
+               next = list_entry(dentry->d_child.next, struct dentry, d_child);
+               if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
+                       break;
+               dentry->d_child.next = next->d_child.next;
+       }
+}
+
 static void __dentry_kill(struct dentry *dentry)
 {
        struct dentry *parent = NULL;
@@ -532,12 +570,7 @@ static void __dentry_kill(struct dentry *dentry)
        }
        /* if it was on the hash then remove it */
        __d_drop(dentry);
-       __list_del_entry(&dentry->d_child);
-       /*
-        * Inform d_walk() that we are no longer attached to the
-        * dentry tree
-        */
-       dentry->d_flags |= DCACHE_DENTRY_KILLED;
+       dentry_unlist(dentry, parent);
        if (parent)
                spin_unlock(&parent->d_lock);
        dentry_iput(dentry);
@@ -1203,6 +1236,9 @@ static void d_walk(struct dentry *parent, void *data,
                struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
                next = tmp->next;
 
+               if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
+                       continue;
+
                spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 
                ret = enter(data, dentry);
@@ -1636,7 +1672,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
        struct dentry *dentry = __d_alloc(parent->d_sb, name);
        if (!dentry)
                return NULL;
-
+       dentry->d_flags |= DCACHE_RCUACCESS;
        spin_lock(&parent->d_lock);
        /*
         * don't need child lock because it is not subject
@@ -1651,6 +1687,16 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
 }
 EXPORT_SYMBOL(d_alloc);
 
+struct dentry *d_alloc_cursor(struct dentry * parent)
+{
+       struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
+       if (dentry) {
+               dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
+               dentry->d_parent = dget(parent);
+       }
+       return dentry;
+}
+
 /**
  * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
  * @sb: the superblock
@@ -2358,7 +2404,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
 {
        BUG_ON(!d_unhashed(entry));
        hlist_bl_lock(b);
-       entry->d_flags |= DCACHE_RCUACCESS;
        hlist_bl_add_head_rcu(&entry->d_hash, b);
        hlist_bl_unlock(b);
 }
@@ -2458,7 +2503,6 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
                rcu_read_unlock();
                goto retry;
        }
-       rcu_read_unlock();
        /*
         * No changes for the parent since the beginning of d_lookup().
         * Since all removals from the chain happen with hlist_bl_lock(),
@@ -2471,8 +2515,6 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
                        continue;
                if (dentry->d_parent != parent)
                        continue;
-               if (d_unhashed(dentry))
-                       continue;
                if (parent->d_flags & DCACHE_OP_COMPARE) {
                        int tlen = dentry->d_name.len;
                        const char *tname = dentry->d_name.name;
@@ -2484,9 +2526,18 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
                        if (dentry_cmp(dentry, str, len))
                                continue;
                }
-               dget(dentry);
                hlist_bl_unlock(b);
-               /* somebody is doing lookup for it right now; wait for it */
+               /* now we can try to grab a reference */
+               if (!lockref_get_not_dead(&dentry->d_lockref)) {
+                       rcu_read_unlock();
+                       goto retry;
+               }
+
+               rcu_read_unlock();
+               /*
+                * somebody is likely to be still doing lookup for it;
+                * wait for them to finish
+                */
                spin_lock(&dentry->d_lock);
                d_wait_lookup(dentry);
                /*
@@ -2517,6 +2568,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
                dput(new);
                return dentry;
        }
+       rcu_read_unlock();
        /* we can't take ->d_lock here; it's OK, though. */
        new->d_flags |= DCACHE_PAR_LOOKUP;
        new->d_wait = wq;
@@ -2843,6 +2895,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
        /* ... and switch them in the tree */
        if (IS_ROOT(dentry)) {
                /* splicing a tree */
+               dentry->d_flags |= DCACHE_RCUACCESS;
                dentry->d_parent = target->d_parent;
                target->d_parent = target;
                list_del_init(&target->d_child);
index 9c1c9a01b7e5120da23888828300d3f1b16e04c4..592059f88e04f7757c9e3bc413ab78bec58dee09 100644 (file)
@@ -127,7 +127,6 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
                r = real_fops->open(inode, filp);
 
 out:
-       fops_put(real_fops);
        debugfs_use_file_finish(srcu_idx);
        return r;
 }
@@ -262,8 +261,10 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
 
        if (real_fops->open) {
                r = real_fops->open(inode, filp);
-
-               if (filp->f_op != proxy_fops) {
+               if (r) {
+                       replace_fops(filp, d_inode(dentry)->i_fop);
+                       goto free_proxy;
+               } else if (filp->f_op != proxy_fops) {
                        /* No protection against file removal anymore. */
                        WARN(1, "debugfs file owner replaced proxy fops: %pd",
                                dentry);
index 866bb18efefea9953250ba1cbdc145e7d4be49af..e818f5ac7a2692b6bb5c1a132d7bbfd967ad956a 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/slab.h>
 #include <linux/wait.h>
 #include <linux/mount.h>
+#include <linux/file.h>
 #include "ecryptfs_kernel.h"
 
 struct ecryptfs_open_req {
@@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
        flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
        (*lower_file) = dentry_open(&req.path, flags, cred);
        if (!IS_ERR(*lower_file))
-               goto out;
+               goto have_file;
        if ((flags & O_ACCMODE) == O_RDONLY) {
                rc = PTR_ERR((*lower_file));
                goto out;
@@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file,
        mutex_unlock(&ecryptfs_kthread_ctl.mux);
        wake_up(&ecryptfs_kthread_ctl.wait);
        wait_for_completion(&req.done);
-       if (IS_ERR(*lower_file))
+       if (IS_ERR(*lower_file)) {
                rc = PTR_ERR(*lower_file);
+               goto out;
+       }
+have_file:
+       if ((*lower_file)->f_op->mmap == NULL) {
+               fput(*lower_file);
+               *lower_file = NULL;
+               rc = -EMEDIUMTYPE;
+       }
 out:
        return rc;
 }
index b71deeecea17939d85bce1c54f30da77f1c4d8a1..f57ced528cde71d498ecb6339188cf6d4cd216f9 100644 (file)
@@ -130,6 +130,7 @@ extern int invalidate_inodes(struct super_block *, bool);
 extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
 extern int d_set_mounted(struct dentry *dentry);
 extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc);
+extern struct dentry *d_alloc_cursor(struct dentry *);
 
 /*
  * read_write.c
index b31852f76f46585137021df6266a18ba743b8528..e3ca4b4cac84a4597b913888359cce6a4540ff09 100644 (file)
@@ -2329,18 +2329,10 @@ void *jbd2_alloc(size_t size, gfp_t flags)
 
        BUG_ON(size & (size-1)); /* Must be a power of 2 */
 
-       flags |= __GFP_REPEAT;
-       if (size == PAGE_SIZE)
-               ptr = (void *)__get_free_pages(flags, 0);
-       else if (size > PAGE_SIZE) {
-               int order = get_order(size);
-
-               if (order < 3)
-                       ptr = (void *)__get_free_pages(flags, order);
-               else
-                       ptr = vmalloc(size);
-       } else
+       if (size < PAGE_SIZE)
                ptr = kmem_cache_alloc(get_slab(size), flags);
+       else
+               ptr = (void *)__get_free_pages(flags, get_order(size));
 
        /* Check alignment; SLUB has gotten this wrong in the past,
         * and this can lead to user data corruption! */
@@ -2351,20 +2343,10 @@ void *jbd2_alloc(size_t size, gfp_t flags)
 
 void jbd2_free(void *ptr, size_t size)
 {
-       if (size == PAGE_SIZE) {
-               free_pages((unsigned long)ptr, 0);
-               return;
-       }
-       if (size > PAGE_SIZE) {
-               int order = get_order(size);
-
-               if (order < 3)
-                       free_pages((unsigned long)ptr, order);
-               else
-                       vfree(ptr);
-               return;
-       }
-       kmem_cache_free(get_slab(size), ptr);
+       if (size < PAGE_SIZE)
+               kmem_cache_free(get_slab(size), ptr);
+       else
+               free_pages((unsigned long)ptr, get_order(size));
 };
 
 /*
index 3db2721144c27d133cc7f8652d269900c9865ea5..cedeacbae303a9483fea836ba4a5cc24cb022f41 100644 (file)
@@ -71,9 +71,7 @@ EXPORT_SYMBOL(simple_lookup);
 
 int dcache_dir_open(struct inode *inode, struct file *file)
 {
-       static struct qstr cursor_name = QSTR_INIT(".", 1);
-
-       file->private_data = d_alloc(file->f_path.dentry, &cursor_name);
+       file->private_data = d_alloc_cursor(file->f_path.dentry);
 
        return file->private_data ? 0 : -ENOMEM;
 }
index 6a82fb7e21273ca457f583da77af8b2cae9f0a81..70580ab1445c89f8379756fc16e33223aca4ab0d 100644 (file)
@@ -3030,9 +3030,13 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
                        }
                        if (*opened & FILE_CREATED)
                                fsnotify_create(dir, dentry);
-                       path->dentry = dentry;
-                       path->mnt = nd->path.mnt;
-                       return 1;
+                       if (unlikely(d_is_negative(dentry))) {
+                               error = -ENOENT;
+                       } else {
+                               path->dentry = dentry;
+                               path->mnt = nd->path.mnt;
+                               return 1;
+                       }
                }
        }
        dput(dentry);
@@ -3201,9 +3205,7 @@ static int do_last(struct nameidata *nd,
        int acc_mode = op->acc_mode;
        unsigned seq;
        struct inode *inode;
-       struct path save_parent = { .dentry = NULL, .mnt = NULL };
        struct path path;
-       bool retried = false;
        int error;
 
        nd->flags &= ~LOOKUP_PARENT;
@@ -3246,7 +3248,6 @@ static int do_last(struct nameidata *nd,
                        return -EISDIR;
        }
 
-retry_lookup:
        if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
                error = mnt_want_write(nd->path.mnt);
                if (!error)
@@ -3298,6 +3299,10 @@ static int do_last(struct nameidata *nd,
                got_write = false;
        }
 
+       error = follow_managed(&path, nd);
+       if (unlikely(error < 0))
+               return error;
+
        if (unlikely(d_is_negative(path.dentry))) {
                path_to_nameidata(&path, nd);
                return -ENOENT;
@@ -3313,10 +3318,6 @@ static int do_last(struct nameidata *nd,
                return -EEXIST;
        }
 
-       error = follow_managed(&path, nd);
-       if (unlikely(error < 0))
-               return error;
-
        seq = 0;        /* out of RCU mode, so the value doesn't matter */
        inode = d_backing_inode(path.dentry);
 finish_lookup:
@@ -3327,23 +3328,14 @@ static int do_last(struct nameidata *nd,
        if (unlikely(error))
                return error;
 
-       if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
-               path_to_nameidata(&path, nd);
-       } else {
-               save_parent.dentry = nd->path.dentry;
-               save_parent.mnt = mntget(path.mnt);
-               nd->path.dentry = path.dentry;
-
-       }
+       path_to_nameidata(&path, nd);
        nd->inode = inode;
        nd->seq = seq;
        /* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
 finish_open:
        error = complete_walk(nd);
-       if (error) {
-               path_put(&save_parent);
+       if (error)
                return error;
-       }
        audit_inode(nd->name, nd->path.dentry, 0);
        error = -EISDIR;
        if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
@@ -3366,13 +3358,9 @@ static int do_last(struct nameidata *nd,
                goto out;
        BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
        error = vfs_open(&nd->path, file, current_cred());
-       if (!error) {
-               *opened |= FILE_OPENED;
-       } else {
-               if (error == -EOPENSTALE)
-                       goto stale_open;
+       if (error)
                goto out;
-       }
+       *opened |= FILE_OPENED;
 opened:
        error = open_check_o_direct(file);
        if (!error)
@@ -3388,26 +3376,7 @@ static int do_last(struct nameidata *nd,
        }
        if (got_write)
                mnt_drop_write(nd->path.mnt);
-       path_put(&save_parent);
        return error;
-
-stale_open:
-       /* If no saved parent or already retried then can't retry */
-       if (!save_parent.dentry || retried)
-               goto out;
-
-       BUG_ON(save_parent.dentry != dir);
-       path_put(&nd->path);
-       nd->path = save_parent;
-       nd->inode = dir->d_inode;
-       save_parent.mnt = NULL;
-       save_parent.dentry = NULL;
-       if (got_write) {
-               mnt_drop_write(nd->path.mnt);
-               got_write = false;
-       }
-       retried = true;
-       goto retry_lookup;
 }
 
 static int do_tmpfile(struct nameidata *nd, unsigned flags,
index 4fb1691b435552c044134b56aa2032da35684b5b..783004af57077a9947a8f3a3228cbc8465a79b3e 100644 (file)
@@ -2409,8 +2409,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
                        mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
                }
                if (type->fs_flags & FS_USERNS_VISIBLE) {
-                       if (!fs_fully_visible(type, &mnt_flags))
+                       if (!fs_fully_visible(type, &mnt_flags)) {
+                               put_filesystem(type);
                                return -EPERM;
+                       }
                }
        }
 
@@ -3245,6 +3247,10 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
                if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
                        mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
 
+               /* Don't miss readonly hidden in the superblock flags */
+               if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
+                       mnt_flags |= MNT_LOCK_READONLY;
+
                /* Verify the mount flags are equal to or more permissive
                 * than the proposed new mount.
                 */
@@ -3271,7 +3277,7 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
                list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
                        struct inode *inode = child->mnt_mountpoint->d_inode;
                        /* Only worry about locked mounts */
-                       if (!(mnt_flags & MNT_LOCKED))
+                       if (!(child->mnt.mnt_flags & MNT_LOCKED))
                                continue;
                        /* Is the directory permanetly empty? */
                        if (!is_empty_dir_inode(inode))
index e55b5242614da7a9d209db34e8c0f6519cf69446..31f3df193bdbb47b82dbe8df7f67b33163d55828 100644 (file)
@@ -290,7 +290,7 @@ static int nfsd4_scsi_identify_device(struct block_device *bdev,
        return error;
 }
 
-#define NFSD_MDS_PR_KEY                0x0100000000000000
+#define NFSD_MDS_PR_KEY                0x0100000000000000ULL
 
 /*
  * We use the client ID as a unique key for the reservations.
index 1580ea6fd64df15da96b832eeef9f3dc2560e32b..d08cd88155c75278c4607f49c078622bf87ab5ee 100644 (file)
@@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
                goto out;
 
        inode = d_inode(fh->fh_dentry);
-       if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
-               error = -EOPNOTSUPP;
-               goto out_errno;
-       }
 
        error = fh_want_write(fh);
        if (error)
                goto out_errno;
 
-       error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
+       fh_lock(fh);
+
+       error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
        if (error)
-               goto out_drop_write;
-       error = inode->i_op->set_acl(inode, argp->acl_default,
-                                    ACL_TYPE_DEFAULT);
+               goto out_drop_lock;
+       error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
        if (error)
-               goto out_drop_write;
+               goto out_drop_lock;
+
+       fh_unlock(fh);
 
        fh_drop_write(fh);
 
@@ -131,7 +130,8 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
        posix_acl_release(argp->acl_access);
        posix_acl_release(argp->acl_default);
        return nfserr;
-out_drop_write:
+out_drop_lock:
+       fh_unlock(fh);
        fh_drop_write(fh);
 out_errno:
        nfserr = nfserrno(error);
index 01df4cd7c753fe12cee4c6d37a5ba9400b6f8f41..0c890347cde3d9559b0b0103c2c2c11825d51fae 100644 (file)
@@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
                goto out;
 
        inode = d_inode(fh->fh_dentry);
-       if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
-               error = -EOPNOTSUPP;
-               goto out_errno;
-       }
 
        error = fh_want_write(fh);
        if (error)
                goto out_errno;
 
-       error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
+       fh_lock(fh);
+
+       error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
        if (error)
-               goto out_drop_write;
-       error = inode->i_op->set_acl(inode, argp->acl_default,
-                                    ACL_TYPE_DEFAULT);
+               goto out_drop_lock;
+       error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
 
-out_drop_write:
+out_drop_lock:
+       fh_unlock(fh);
        fh_drop_write(fh);
 out_errno:
        nfserr = nfserrno(error);
index 6adabd6049b7199ad72e01e361e3637a88b016ff..71292a0d6f09226e828b327ba139a296dc76a08b 100644 (file)
@@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
        dentry = fhp->fh_dentry;
        inode = d_inode(dentry);
 
-       if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
-               return nfserr_attrnotsupp;
-
        if (S_ISDIR(inode->i_mode))
                flags = NFS4_ACL_DIR;
 
@@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
        if (host_error < 0)
                goto out_nfserr;
 
-       host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
+       fh_lock(fhp);
+
+       host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
        if (host_error < 0)
-               goto out_release;
+               goto out_drop_lock;
 
        if (S_ISDIR(inode->i_mode)) {
-               host_error = inode->i_op->set_acl(inode, dpacl,
-                                                 ACL_TYPE_DEFAULT);
+               host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
        }
 
-out_release:
+out_drop_lock:
+       fh_unlock(fhp);
+
        posix_acl_release(pacl);
        posix_acl_release(dpacl);
 out_nfserr:
index 7389cb1d7409cf16c31b041d88ae59cbe09d5322..04c68d9003249f884a0d4e49467873c8d75ed9a9 100644 (file)
@@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
        }
 }
 
-static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
-{
-       struct rpc_xprt *xprt;
-
-       if (args->protocol != XPRT_TRANSPORT_BC_TCP)
-               return rpc_create(args);
-
-       xprt = args->bc_xprt->xpt_bc_xprt;
-       if (xprt) {
-               xprt_get(xprt);
-               return rpc_create_xprt(args, xprt);
-       }
-
-       return rpc_create(args);
-}
-
 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
 {
        int maxtime = max_cb_time(clp->net);
@@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
                args.authflavor = ses->se_cb_sec.flavor;
        }
        /* Create RPC client */
-       client = create_backchannel_client(&args);
+       client = rpc_create(&args);
        if (IS_ERR(client)) {
                dprintk("NFSD: couldn't create callback client: %ld\n",
                        PTR_ERR(client));
index f5f82e145018059bbeb265cd42a279a3009dfd6b..70d0b9b33031ee88985da10ff2d11c782a2b49f6 100644 (file)
@@ -3480,12 +3480,17 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
 }
 
 static struct nfs4_ol_stateid *
-init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
-               struct nfsd4_open *open)
+init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
 {
 
        struct nfs4_openowner *oo = open->op_openowner;
        struct nfs4_ol_stateid *retstp = NULL;
+       struct nfs4_ol_stateid *stp;
+
+       stp = open->op_stp;
+       /* We are moving these outside of the spinlocks to avoid the warnings */
+       mutex_init(&stp->st_mutex);
+       mutex_lock(&stp->st_mutex);
 
        spin_lock(&oo->oo_owner.so_client->cl_lock);
        spin_lock(&fp->fi_lock);
@@ -3493,6 +3498,8 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
        retstp = nfsd4_find_existing_open(fp, open);
        if (retstp)
                goto out_unlock;
+
+       open->op_stp = NULL;
        atomic_inc(&stp->st_stid.sc_count);
        stp->st_stid.sc_type = NFS4_OPEN_STID;
        INIT_LIST_HEAD(&stp->st_locks);
@@ -3502,14 +3509,19 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = 0;
        stp->st_openstp = NULL;
-       init_rwsem(&stp->st_rwsem);
        list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
        list_add(&stp->st_perfile, &fp->fi_stateids);
 
 out_unlock:
        spin_unlock(&fp->fi_lock);
        spin_unlock(&oo->oo_owner.so_client->cl_lock);
-       return retstp;
+       if (retstp) {
+               mutex_lock(&retstp->st_mutex);
+               /* To keep mutex tracking happy */
+               mutex_unlock(&stp->st_mutex);
+               stp = retstp;
+       }
+       return stp;
 }
 
 /*
@@ -4305,7 +4317,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
        struct nfs4_file *fp = NULL;
        struct nfs4_ol_stateid *stp = NULL;
-       struct nfs4_ol_stateid *swapstp = NULL;
        struct nfs4_delegation *dp = NULL;
        __be32 status;
 
@@ -4335,32 +4346,28 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
         */
        if (stp) {
                /* Stateid was found, this is an OPEN upgrade */
-               down_read(&stp->st_rwsem);
+               mutex_lock(&stp->st_mutex);
                status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
                if (status) {
-                       up_read(&stp->st_rwsem);
+                       mutex_unlock(&stp->st_mutex);
                        goto out;
                }
        } else {
-               stp = open->op_stp;
-               open->op_stp = NULL;
-               swapstp = init_open_stateid(stp, fp, open);
-               if (swapstp) {
-                       nfs4_put_stid(&stp->st_stid);
-                       stp = swapstp;
-                       down_read(&stp->st_rwsem);
+               /* stp is returned locked. */
+               stp = init_open_stateid(fp, open);
+               /* See if we lost the race to some other thread */
+               if (stp->st_access_bmap != 0) {
                        status = nfs4_upgrade_open(rqstp, fp, current_fh,
                                                stp, open);
                        if (status) {
-                               up_read(&stp->st_rwsem);
+                               mutex_unlock(&stp->st_mutex);
                                goto out;
                        }
                        goto upgrade_out;
                }
-               down_read(&stp->st_rwsem);
                status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
                if (status) {
-                       up_read(&stp->st_rwsem);
+                       mutex_unlock(&stp->st_mutex);
                        release_open_stateid(stp);
                        goto out;
                }
@@ -4372,7 +4379,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        }
 upgrade_out:
        nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
-       up_read(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
 
        if (nfsd4_has_session(&resp->cstate)) {
                if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
@@ -4977,12 +4984,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
                 * revoked delegations are kept only for free_stateid.
                 */
                return nfserr_bad_stateid;
-       down_write(&stp->st_rwsem);
+       mutex_lock(&stp->st_mutex);
        status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
        if (status == nfs_ok)
                status = nfs4_check_fh(current_fh, &stp->st_stid);
        if (status != nfs_ok)
-               up_write(&stp->st_rwsem);
+               mutex_unlock(&stp->st_mutex);
        return status;
 }
 
@@ -5030,7 +5037,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
                return status;
        oo = openowner(stp->st_stateowner);
        if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
-               up_write(&stp->st_rwsem);
+               mutex_unlock(&stp->st_mutex);
                nfs4_put_stid(&stp->st_stid);
                return nfserr_bad_stateid;
        }
@@ -5062,12 +5069,12 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        oo = openowner(stp->st_stateowner);
        status = nfserr_bad_stateid;
        if (oo->oo_flags & NFS4_OO_CONFIRMED) {
-               up_write(&stp->st_rwsem);
+               mutex_unlock(&stp->st_mutex);
                goto put_stateid;
        }
        oo->oo_flags |= NFS4_OO_CONFIRMED;
        nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
-       up_write(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
        dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
                __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
 
@@ -5143,7 +5150,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
        nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
        status = nfs_ok;
 put_stateid:
-       up_write(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
        nfs4_put_stid(&stp->st_stid);
 out:
        nfsd4_bump_seqid(cstate, status);
@@ -5196,7 +5203,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        if (status)
                goto out; 
        nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
-       up_write(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
 
        nfsd4_close_open_stateid(stp);
 
@@ -5422,7 +5429,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = open_stp->st_deny_bmap;
        stp->st_openstp = open_stp;
-       init_rwsem(&stp->st_rwsem);
+       mutex_init(&stp->st_mutex);
        list_add(&stp->st_locks, &open_stp->st_locks);
        list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
        spin_lock(&fp->fi_lock);
@@ -5591,7 +5598,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                                        &open_stp, nn);
                if (status)
                        goto out;
-               up_write(&open_stp->st_rwsem);
+               mutex_unlock(&open_stp->st_mutex);
                open_sop = openowner(open_stp->st_stateowner);
                status = nfserr_bad_stateid;
                if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
@@ -5600,7 +5607,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                status = lookup_or_create_lock_state(cstate, open_stp, lock,
                                                        &lock_stp, &new);
                if (status == nfs_ok)
-                       down_write(&lock_stp->st_rwsem);
+                       mutex_lock(&lock_stp->st_mutex);
        } else {
                status = nfs4_preprocess_seqid_op(cstate,
                                       lock->lk_old_lock_seqid,
@@ -5704,7 +5711,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                    seqid_mutating_err(ntohl(status)))
                        lock_sop->lo_owner.so_seqid++;
 
-               up_write(&lock_stp->st_rwsem);
+               mutex_unlock(&lock_stp->st_mutex);
 
                /*
                 * If this is a new, never-before-used stateid, and we are
@@ -5874,7 +5881,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 fput:
        fput(filp);
 put_stateid:
-       up_write(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
        nfs4_put_stid(&stp->st_stid);
 out:
        nfsd4_bump_seqid(cstate, status);
index 986e51e5ceac882f703e1d0262fdc66d356a453f..64053eadeb818f2a754bd791af7010b992e99d1a 100644 (file)
@@ -535,7 +535,7 @@ struct nfs4_ol_stateid {
        unsigned char                   st_access_bmap;
        unsigned char                   st_deny_bmap;
        struct nfs4_ol_stateid          *st_openstp;
-       struct rw_semaphore             st_rwsem;
+       struct mutex                    st_mutex;
 };
 
 static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
index 809bd2de7ad0602c638464e4ecd5a736c14fa878..e9fd241b9a0acd685fe1df0e62adef8839bb54e6 100644 (file)
@@ -439,7 +439,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
        if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
                return 0;
        bytes = le16_to_cpu(sbp->s_bytes);
-       if (bytes > BLOCK_SIZE)
+       if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
                return 0;
        crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
                       sumoff);
index e27e6527912bb567f10d03c7b272f3922f69f3ca..4342c7ee7d202153126fbaace723a19846d02fd4 100644 (file)
@@ -1,7 +1,5 @@
 ccflags-y := -Ifs/ocfs2
 
-ccflags-y += -DCATCH_BH_JBD_RACES
-
 obj-$(CONFIG_OCFS2_FS) +=      \
        ocfs2.o                 \
        ocfs2_stackglue.o
index fe50ded1b4ce763e33579ab54a982e64ed06003e..498641eed2db83efe31923c2f7d7517a499f836c 100644 (file)
@@ -139,11 +139,16 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
 
                lock_buffer(bh);
                if (buffer_jbd(bh)) {
+#ifdef CATCH_BH_JBD_RACES
                        mlog(ML_ERROR,
                             "block %llu had the JBD bit set "
                             "while I was in lock_buffer!",
                             (unsigned long long)bh->b_blocknr);
                        BUG();
+#else
+                       unlock_buffer(bh);
+                       continue;
+#endif
                }
 
                clear_buffer_uptodate(bh);
index 22f0253a3567745ed02520573c8fc99c56189208..c2a6b08940228c838375ed9ef37c548c5052a2cc 100644 (file)
@@ -405,12 +405,21 @@ static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
                err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
        } else {
                const struct cred *old_cred;
+               struct cred *override_cred;
 
                old_cred = ovl_override_creds(dentry->d_sb);
 
-               err = ovl_create_over_whiteout(dentry, inode, &stat, link,
-                                              hardlink);
+               err = -ENOMEM;
+               override_cred = prepare_creds();
+               if (override_cred) {
+                       override_cred->fsuid = old_cred->fsuid;
+                       override_cred->fsgid = old_cred->fsgid;
+                       put_cred(override_creds(override_cred));
+                       put_cred(override_cred);
 
+                       err = ovl_create_over_whiteout(dentry, inode, &stat,
+                                                      link, hardlink);
+               }
                revert_creds(old_cred);
        }
 
index 0ed7c40124378cc9dce12d73e3c22449d2348909..1dbeab6cf96e54c7e43cf040ef3d909ce6fe2071 100644 (file)
@@ -238,41 +238,27 @@ int ovl_setxattr(struct dentry *dentry, struct inode *inode,
        return err;
 }
 
-static bool ovl_need_xattr_filter(struct dentry *dentry,
-                                 enum ovl_path_type type)
-{
-       if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
-               return S_ISDIR(dentry->d_inode->i_mode);
-       else
-               return false;
-}
-
 ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
                     const char *name, void *value, size_t size)
 {
-       struct path realpath;
-       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+       struct dentry *realdentry = ovl_dentry_real(dentry);
 
-       if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
+       if (ovl_is_private_xattr(name))
                return -ENODATA;
 
-       return vfs_getxattr(realpath.dentry, name, value, size);
+       return vfs_getxattr(realdentry, name, value, size);
 }
 
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
-       struct path realpath;
-       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+       struct dentry *realdentry = ovl_dentry_real(dentry);
        ssize_t res;
        int off;
 
-       res = vfs_listxattr(realpath.dentry, list, size);
+       res = vfs_listxattr(realdentry, list, size);
        if (res <= 0 || size == 0)
                return res;
 
-       if (!ovl_need_xattr_filter(dentry, type))
-               return res;
-
        /* filter out private xattrs */
        for (off = 0; off < res;) {
                char *s = list + off;
@@ -302,7 +288,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
                goto out;
 
        err = -ENODATA;
-       if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
+       if (ovl_is_private_xattr(name))
                goto out_drop_write;
 
        if (!OVL_TYPE_UPPER(type)) {
index 8a4a266beff39dda0be9ca39df74bc847fd0a2d3..edc452c2a563a0f86849094a848786a7728ef99e 100644 (file)
@@ -820,39 +820,43 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
        return error;
 }
 
-static int
-posix_acl_xattr_set(const struct xattr_handler *handler,
-                   struct dentry *unused, struct inode *inode,
-                   const char *name, const void *value,
-                   size_t size, int flags)
+int
+set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
 {
-       struct posix_acl *acl = NULL;
-       int ret;
-
        if (!IS_POSIXACL(inode))
                return -EOPNOTSUPP;
        if (!inode->i_op->set_acl)
                return -EOPNOTSUPP;
 
-       if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
-               return value ? -EACCES : 0;
+       if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+               return acl ? -EACCES : 0;
        if (!inode_owner_or_capable(inode))
                return -EPERM;
 
+       if (acl) {
+               int ret = posix_acl_valid(acl);
+               if (ret)
+                       return ret;
+       }
+       return inode->i_op->set_acl(inode, acl, type);
+}
+EXPORT_SYMBOL(set_posix_acl);
+
+static int
+posix_acl_xattr_set(const struct xattr_handler *handler,
+                   struct dentry *unused, struct inode *inode,
+                   const char *name, const void *value,
+                   size_t size, int flags)
+{
+       struct posix_acl *acl = NULL;
+       int ret;
+
        if (value) {
                acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl))
                        return PTR_ERR(acl);
-
-               if (acl) {
-                       ret = posix_acl_valid(acl);
-                       if (ret)
-                               goto out;
-               }
        }
-
-       ret = inode->i_op->set_acl(inode, acl, handler->flags);
-out:
+       ret = set_posix_acl(inode, handler->flags, acl);
        posix_acl_release(acl);
        return ret;
 }
index 55bc7d6c8aacaed2f4892bd1a554e3baf939bf34..06702783bf40254593f82b76dd9745ad557ac334 100644 (file)
@@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
        if (IS_ERR(sb))
                return ERR_CAST(sb);
 
+       /*
+        * procfs isn't actually a stacking filesystem; however, there is
+        * too much magic going on inside it to permit stacking things on
+        * top of it
+        */
+       sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
+
        if (!proc_parse_options(options, ns)) {
                deactivate_locked_super(sb);
                return ERR_PTR(-EINVAL);
index b8f2d1e8c6453c373655be98c03c1118acd71f2c..c72c16c5a60f9dfd0a151022b7885073fa872270 100644 (file)
@@ -1393,7 +1393,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        unsigned long safe_mask = 0;
        unsigned int commit_max_age = (unsigned int)-1;
        struct reiserfs_journal *journal = SB_JOURNAL(s);
-       char *new_opts = kstrdup(arg, GFP_KERNEL);
+       char *new_opts;
        int err;
        char *qf_names[REISERFS_MAXQUOTAS];
        unsigned int qfmt = 0;
@@ -1401,6 +1401,10 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        int i;
 #endif
 
+       new_opts = kstrdup(arg, GFP_KERNEL);
+       if (arg && !new_opts)
+               return -ENOMEM;
+
        sync_filesystem(s);
        reiserfs_write_lock(s);
 
@@ -1546,7 +1550,8 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        }
 
 out_ok_unlocked:
-       replace_mount_options(s, new_opts);
+       if (new_opts)
+               replace_mount_options(s, new_opts);
        return 0;
 
 out_err_unlock:
index 08316972ff934102b58783b1609c2d1920a47ea8..7bbf420d128988470b0fb90491f05587b83b9052 100644 (file)
@@ -52,6 +52,7 @@
 #include "ubifs.h"
 #include <linux/mount.h>
 #include <linux/slab.h>
+#include <linux/migrate.h>
 
 static int read_block(struct inode *inode, void *addr, unsigned int block,
                      struct ubifs_data_node *dn)
@@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct page *page)
        return ret;
 }
 
+#ifdef CONFIG_MIGRATION
+static int ubifs_migrate_page(struct address_space *mapping,
+               struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+       int rc;
+
+       rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
+       if (rc != MIGRATEPAGE_SUCCESS)
+               return rc;
+
+       if (PagePrivate(page)) {
+               ClearPagePrivate(page);
+               SetPagePrivate(newpage);
+       }
+
+       migrate_page_copy(newpage, page);
+       return MIGRATEPAGE_SUCCESS;
+}
+#endif
+
 static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
 {
        /*
@@ -1591,6 +1612,9 @@ const struct address_space_operations ubifs_file_address_operations = {
        .write_end      = ubifs_write_end,
        .invalidatepage = ubifs_invalidatepage,
        .set_page_dirty = ubifs_set_page_dirty,
+#ifdef CONFIG_MIGRATION
+       .migratepage    = ubifs_migrate_page,
+#endif
        .releasepage    = ubifs_releasepage,
 };
 
index 5f861ed287c3fde0fbf87de4c740e672213acd5d..888c364b2fe95a87ffbc5d303f403d8875fb4b9f 100644 (file)
@@ -295,7 +295,8 @@ static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
                map = &UDF_SB(sb)->s_partmaps[partition];
                /* map to sparable/physical partition desc */
                phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
-                       map->s_partition_num, ext_offset + offset);
+                       map->s_type_specific.s_metadata.s_phys_partition_ref,
+                       ext_offset + offset);
        }
 
        brelse(epos.bh);
@@ -317,14 +318,18 @@ uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
        mdata = &map->s_type_specific.s_metadata;
        inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
 
-       /* We shouldn't mount such media... */
-       BUG_ON(!inode);
+       if (!inode)
+               return 0xFFFFFFFF;
+
        retblk = udf_try_read_meta(inode, block, partition, offset);
        if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
                udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
                if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
                        mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
-                               mdata->s_mirror_file_loc, map->s_partition_num);
+                               mdata->s_mirror_file_loc,
+                               mdata->s_phys_partition_ref);
+                       if (IS_ERR(mdata->s_mirror_fe))
+                               mdata->s_mirror_fe = NULL;
                        mdata->s_flags |= MF_MIRROR_FE_LOADED;
                }
 
index 5e2c8c814e1bda634c54ade051bd0143e20d553a..4942549e7dc8b3758dd8c8fadb99e8fba4265c00 100644 (file)
@@ -951,13 +951,13 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 }
 
 struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
-                                       u32 meta_file_loc, u32 partition_num)
+                                       u32 meta_file_loc, u32 partition_ref)
 {
        struct kernel_lb_addr addr;
        struct inode *metadata_fe;
 
        addr.logicalBlockNum = meta_file_loc;
-       addr.partitionReferenceNum = partition_num;
+       addr.partitionReferenceNum = partition_ref;
 
        metadata_fe = udf_iget_special(sb, &addr);
 
@@ -974,7 +974,8 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
        return metadata_fe;
 }
 
-static int udf_load_metadata_files(struct super_block *sb, int partition)
+static int udf_load_metadata_files(struct super_block *sb, int partition,
+                                  int type1_index)
 {
        struct udf_sb_info *sbi = UDF_SB(sb);
        struct udf_part_map *map;
@@ -984,20 +985,21 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
 
        map = &sbi->s_partmaps[partition];
        mdata = &map->s_type_specific.s_metadata;
+       mdata->s_phys_partition_ref = type1_index;
 
        /* metadata address */
        udf_debug("Metadata file location: block = %d part = %d\n",
-                 mdata->s_meta_file_loc, map->s_partition_num);
+                 mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
 
        fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
-                                        map->s_partition_num);
+                                        mdata->s_phys_partition_ref);
        if (IS_ERR(fe)) {
                /* mirror file entry */
                udf_debug("Mirror metadata file location: block = %d part = %d\n",
-                         mdata->s_mirror_file_loc, map->s_partition_num);
+                         mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
 
                fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
-                                                map->s_partition_num);
+                                                mdata->s_phys_partition_ref);
 
                if (IS_ERR(fe)) {
                        udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
@@ -1015,7 +1017,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
        */
        if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
                addr.logicalBlockNum = mdata->s_bitmap_file_loc;
-               addr.partitionReferenceNum = map->s_partition_num;
+               addr.partitionReferenceNum = mdata->s_phys_partition_ref;
 
                udf_debug("Bitmap file location: block = %d part = %d\n",
                          addr.logicalBlockNum, addr.partitionReferenceNum);
@@ -1283,7 +1285,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
        p = (struct partitionDesc *)bh->b_data;
        partitionNumber = le16_to_cpu(p->partitionNumber);
 
-       /* First scan for TYPE1, SPARABLE and METADATA partitions */
+       /* First scan for TYPE1 and SPARABLE partitions */
        for (i = 0; i < sbi->s_partitions; i++) {
                map = &sbi->s_partmaps[i];
                udf_debug("Searching map: (%d == %d)\n",
@@ -1333,7 +1335,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
                goto out_bh;
 
        if (map->s_partition_type == UDF_METADATA_MAP25) {
-               ret = udf_load_metadata_files(sb, i);
+               ret = udf_load_metadata_files(sb, i, type1_idx);
                if (ret < 0) {
                        udf_err(sb, "error loading MetaData partition map %d\n",
                                i);
index 27b5335730c9337424c0c32790e74173ac61f747..c13875d669c0f45722c5f138a5fa34786f2b3d75 100644 (file)
@@ -61,6 +61,11 @@ struct udf_meta_data {
        __u32   s_bitmap_file_loc;
        __u32   s_alloc_unit_size;
        __u16   s_align_unit_size;
+       /*
+        * Partition Reference Number of the associated physical / sparable
+        * partition
+        */
+       __u16   s_phys_partition_ref;
        int     s_flags;
        struct inode *s_metadata_fe;
        struct inode *s_mirror_fe;
index 6bd05700d8c94cfe5c190008c1eb8910bf8ae0ae..05f05f17a7c2e4fc14f8fa858abc9f7d7715c7b9 100644 (file)
 
 #include <asm-generic/qspinlock_types.h>
 
+/**
+ * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * There is a very slight possibility of live-lock if the lockers keep coming
+ * and the waiter is just unfortunate enough to not see any unlock state.
+ */
+#ifndef queued_spin_unlock_wait
+extern void queued_spin_unlock_wait(struct qspinlock *lock);
+#endif
+
 /**
  * queued_spin_is_locked - is the spinlock locked?
  * @lock: Pointer to queued spinlock structure
  * Return: 1 if it is locked, 0 otherwise
  */
+#ifndef queued_spin_is_locked
 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
 {
        /*
-        * queued_spin_lock_slowpath() can ACQUIRE the lock before
-        * issuing the unordered store that sets _Q_LOCKED_VAL.
-        *
-        * See both smp_cond_acquire() sites for more detail.
-        *
-        * This however means that in code like:
-        *
-        *   spin_lock(A)               spin_lock(B)
-        *   spin_unlock_wait(B)        spin_is_locked(A)
-        *   do_something()             do_something()
-        *
-        * Both CPUs can end up running do_something() because the store
-        * setting _Q_LOCKED_VAL will pass through the loads in
-        * spin_unlock_wait() and/or spin_is_locked().
+        * See queued_spin_unlock_wait().
         *
-        * Avoid this by issuing a full memory barrier between the spin_lock()
-        * and the loads in spin_unlock_wait() and spin_is_locked().
-        *
-        * Note that regular mutual exclusion doesn't care about this
-        * delayed store.
+        * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
+        * isn't immediately observable.
         */
-       smp_mb();
-       return atomic_read(&lock->val) & _Q_LOCKED_MASK;
+       return atomic_read(&lock->val);
 }
+#endif
 
 /**
  * queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
 }
 #endif
 
-/**
- * queued_spin_unlock_wait - wait until current lock holder releases the lock
- * @lock : Pointer to queued spinlock structure
- *
- * There is a very slight possibility of live-lock if the lockers keep coming
- * and the waiter is just unfortunate enough to not see any unlock state.
- */
-static inline void queued_spin_unlock_wait(struct qspinlock *lock)
-{
-       /* See queued_spin_is_locked() */
-       smp_mb();
-       while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
-               cpu_relax();
-}
-
 #ifndef virt_spin_lock
 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
 {
index 25afb31f0389edbb1f9f8a9e3558602df38f816b..261b86d20e7718f13c95290c63e277b5fafca96d 100644 (file)
 enum analogix_dp_devtype {
        EXYNOS_DP,
        RK3288_DP,
+       RK3399_EDP,
 };
 
+static inline bool is_rockchip(enum analogix_dp_devtype type)
+{
+       return type == RK3288_DP || type == RK3399_EDP;
+}
+
 struct analogix_dp_plat_data {
        enum analogix_dp_devtype dev_type;
        struct drm_panel *panel;
@@ -28,7 +34,8 @@ struct analogix_dp_plat_data {
        int (*power_off)(struct analogix_dp_plat_data *);
        int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
                      struct drm_connector *);
-       int (*get_modes)(struct analogix_dp_plat_data *);
+       int (*get_modes)(struct analogix_dp_plat_data *,
+                        struct drm_connector *);
 };
 
 int analogix_dp_resume(struct device *dev);
index fd0dde9f0a6d9f6bcbb3c69d14d7934d340f474d..f313211f8ed52f1e1ecf0a7224a8dd3c26715367 100644 (file)
@@ -23,6 +23,7 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
 
 void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
 void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state);
 int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
        struct drm_fb_helper_surface_size *sizes,
        const struct drm_framebuffer_funcs *funcs);
index 9094599a11509c190bfc5ae4df853531f5a442da..33466bfc6440363d4f36d1ca2f09d44098098e8c 100644 (file)
        INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
        INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
        INTEL_VGA_DEVICE(0x5902, info), /* DT  GT1 */ \
+       INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
        INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
        INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
 
        INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
 
 #define INTEL_KBL_GT3_IDS(info) \
+       INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
        INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
-       INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \
-       INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
+       INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
 
 #define INTEL_KBL_GT4_IDS(info) \
-       INTEL_VGA_DEVICE(0x5932, info), /* DT  GT4 */ \
-       INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
-       INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
-       INTEL_VGA_DEVICE(0x593D, info)  /* WKS GT4 */
+       INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
 
 #define INTEL_KBL_IDS(info) \
        INTEL_KBL_GT1_IDS(info), \
index c801d9028e37388bc30a8254be8c4ae7df6acb22..97aaf5c3d45b8406acb46e175e442aa80e6a9b1f 100644 (file)
@@ -173,7 +173,7 @@ struct ttm_tt;
  * @lru: List head for the lru list.
  * @ddestroy: List head for the delayed destroy list.
  * @swap: List head for swap LRU list.
- * @priv_flags: Flags describing buffer object internal state.
+ * @moving: Fence set when BO is moving
  * @vma_node: Address space manager node.
  * @offset: The current GPU offset, which can have different meanings
  * depending on the memory type. For SYSTEM type memory, it should be 0.
@@ -239,7 +239,7 @@ struct ttm_buffer_object {
         * Members protected by a bo reservation.
         */
 
-       unsigned long priv_flags;
+       struct fence *moving;
 
        struct drm_vma_offset_node vma_node;
 
index 513f7f96b80a90e530a4c74179f943a0dff8c40f..e2ebe6666e2beb7fc28d234341413140f67fcfba 100644 (file)
@@ -258,8 +258,10 @@ struct ttm_mem_type_manager_func {
  * reserved by the TTM vm system.
  * @io_reserve_lru: Optional lru list for unreserving io mem regions.
  * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
+ * @move_lock: lock for move fence
  * static information. bdev::driver::io_mem_free is never used.
  * @lru: The lru list for this memory type.
+ * @move: The fence of the last pipelined move operation.
  *
  * This structure is used to identify and manage memory types for a device.
  * It's set up by the ttm_bo_driver::init_mem_type method.
@@ -286,6 +288,7 @@ struct ttm_mem_type_manager {
        struct mutex io_reserve_mutex;
        bool use_io_reserve_lru;
        bool io_reserve_fastpath;
+       spinlock_t move_lock;
 
        /*
         * Protected by @io_reserve_mutex:
@@ -298,6 +301,11 @@ struct ttm_mem_type_manager {
         */
 
        struct list_head lru;
+
+       /*
+        * Protected by @move_lock.
+        */
+       struct fence *move;
 };
 
 /**
@@ -503,9 +511,6 @@ struct ttm_bo_global {
 
 #define TTM_NUM_MEM_TYPES 8
 
-#define TTM_BO_PRIV_FLAG_MOVING  0     /* Buffer object is moving and needs
-                                          idling before CPU mapping */
-#define TTM_BO_PRIV_FLAG_MAX 1
 /**
  * struct ttm_bo_device - Buffer object driver device-specific data.
  *
@@ -622,15 +627,6 @@ extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
  */
 extern void ttm_tt_destroy(struct ttm_tt *ttm);
 
-/**
- * ttm_ttm_unbind:
- *
- * @ttm: The struct ttm_tt.
- *
- * Unbind a struct ttm_tt.
- */
-extern void ttm_tt_unbind(struct ttm_tt *ttm);
-
 /**
  * ttm_tt_swapin:
  *
@@ -979,6 +975,7 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @interruptible: Sleep interruptible if waiting.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -993,7 +990,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  */
 
 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                             bool evict, bool no_wait_gpu,
+                             bool evict, bool interruptible,
+                             bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem);
 
 /**
@@ -1011,7 +1009,6 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
  * @bo: A pointer to a struct ttm_buffer_object.
  * @fence: A fence object that signals when moving is complete.
  * @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
  * Accelerated move function to be called when an accelerated move
@@ -1023,9 +1020,24 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
  */
 
 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
-                                    struct fence *fence,
-                                    bool evict, bool no_wait_gpu,
+                                    struct fence *fence, bool evict,
                                     struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_pipeline_move.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @fence: A fence object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Function for pipelining accelerated moves. Either free the memory
+ * immediately or hang it on a temporary buffer object.
+ */
+int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
+                        struct fence *fence, bool evict,
+                        struct ttm_mem_reg *new_mem);
+
 /**
  * ttm_io_prot
  *
index 576e4639ca609e8bfb8686474406073937e912e9..314b3caa701cc20a6c3fcd628de3c48b7c0c7d21 100644 (file)
@@ -65,6 +65,7 @@ struct coredump_params {
        unsigned long limit;
        unsigned long mm_flags;
        loff_t written;
+       loff_t pos;
 };
 
 /*
index 0c72204c75fc9428f08c9e63667bf75dee0bc153..fb39d5add173f94a3f9be0e77455e0f5b54764d8 100644 (file)
@@ -25,7 +25,7 @@
 #define CLK_SET_PARENT_GATE    BIT(1) /* must be gated across re-parent */
 #define CLK_SET_RATE_PARENT    BIT(2) /* propagate rate change up one level */
 #define CLK_IGNORE_UNUSED      BIT(3) /* do not gate even if unused */
-#define CLK_IS_ROOT            BIT(4) /* Deprecated: Don't use */
+                               /* unused */
 #define CLK_IS_BASIC           BIT(5) /* Basic clk, can't do a to_clk_foo() */
 #define CLK_GET_RATE_NOCACHE   BIT(6) /* do not use the cached clk rate */
 #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
index 786ad32631a672695d88b47d67c1019df624acab..07b83d32f66c857aeabe1eae476db0c1a9e5fbda 100644 (file)
@@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
 extern int cpuidle_play_dead(void);
 
 extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
+static inline struct cpuidle_device *cpuidle_get_device(void)
+{return __this_cpu_read(cpuidle_devices); }
 #else
 static inline void disable_cpuidle(void) { }
 static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
@@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
 static inline int cpuidle_play_dead(void) {return -ENODEV; }
 static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
        struct cpuidle_device *dev) {return NULL; }
+static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
 #endif
 
 #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
index 484c8792da825ed27bc17485fa3b4807e60d745f..f53fa055021ab16bc182b739dcb665d1a70ce4a6 100644 (file)
@@ -212,6 +212,7 @@ struct dentry_operations {
 #define DCACHE_OP_REAL                 0x08000000
 
 #define DCACHE_PAR_LOOKUP              0x10000000 /* being looked up (with parent locked shared) */
+#define DCACHE_DENTRY_CURSOR           0x20000000
 
 extern seqlock_t rename_lock;
 
@@ -575,5 +576,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry,
        return inode;
 }
 
+/**
+ * d_real_inode - Return the real inode
+ * @dentry: The dentry to query
+ *
+ * If dentry is on an union/overlay, then return the underlying, real inode.
+ * Otherwise return d_inode().
+ */
+static inline struct inode *d_real_inode(struct dentry *dentry)
+{
+       return d_backing_inode(d_real(dentry));
+}
+
 
 #endif /* __LINUX_DCACHE_H */
index c2db3ca22217c85891f06b53496bb6f644d2399e..f196dd0b0f2f6c241715807f63bdccaa30973ce8 100644 (file)
@@ -1005,7 +1005,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
 /* Iterate through an efi_memory_map */
 #define for_each_efi_memory_desc_in_map(m, md)                            \
        for ((md) = (m)->map;                                              \
-            (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
+            ((void *)(md) + (m)->desc_size) <= (m)->map_end;              \
             (md) = (void *)(md) + (m)->desc_size)
 
 /**
index d029ffac0d691ab51065cd960ab53a926e9bbf39..99403b19092f2da0ecadaffd21e9fc8065539434 100644 (file)
@@ -223,6 +223,8 @@ struct st_sensor_settings {
  * @get_irq_data_ready: Function to get the IRQ used for data ready signal.
  * @tf: Transfer function structure used by I/O operations.
  * @tb: Transfer buffers and mutex used by I/O operations.
+ * @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
+ * @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
  */
 struct st_sensor_data {
        struct device *dev;
@@ -247,6 +249,9 @@ struct st_sensor_data {
 
        const struct st_sensor_transfer_function *tf;
        struct st_sensor_transfer_buffer tb;
+
+       bool hw_irq_trigger;
+       s64 hw_timestamp;
 };
 
 #ifdef CONFIG_IIO_BUFFER
@@ -260,7 +265,8 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                                const struct iio_trigger_ops *trigger_ops);
 
 void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
-
+int st_sensors_validate_device(struct iio_trigger *trig,
+                              struct iio_dev *indio_dev);
 #else
 static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                                const struct iio_trigger_ops *trigger_ops)
@@ -271,6 +277,7 @@ static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
 {
        return;
 }
+#define st_sensors_validate_device NULL
 #endif
 
 int st_sensors_init_sensor(struct iio_dev *indio_dev,
index f2cb8d45513d182bef20020c775c617f8215af3a..f8834f820ec2475f0c4dab07df153034ccd7d26c 100644 (file)
@@ -190,7 +190,7 @@ extern struct task_group root_task_group;
 #define INIT_TASK(tsk) \
 {                                                                      \
        .state          = 0,                                            \
-       .stack          = &init_thread_info,                            \
+       .stack          = init_stack,                                   \
        .usage          = ATOMIC_INIT(2),                               \
        .flags          = PF_KTHREAD,                                   \
        .prio           = MAX_PRIO-20,                                  \
index 5ab85281230b9dd688a3cad78217a42e932ab746..f2d0258414cfef9a98384f3c59d691f387ef5862 100644 (file)
@@ -6,6 +6,7 @@
 #define __LINUX_ISA_H
 
 #include <linux/device.h>
+#include <linux/errno.h>
 #include <linux/kernel.h>
 
 struct isa_driver {
@@ -22,13 +23,13 @@ struct isa_driver {
 
 #define to_isa_driver(x) container_of((x), struct isa_driver, driver)
 
-#ifdef CONFIG_ISA
+#ifdef CONFIG_ISA_BUS_API
 int isa_register_driver(struct isa_driver *, unsigned int);
 void isa_unregister_driver(struct isa_driver *);
 #else
 static inline int isa_register_driver(struct isa_driver *d, unsigned int i)
 {
-       return 0;
+       return -ENODEV;
 }
 
 static inline void isa_unregister_driver(struct isa_driver *d)
index 0536524bb9eb6467013a51a70ed28b576494e649..68904469fba1b7492efda1b846edab53ed1f3b4e 100644 (file)
@@ -117,13 +117,18 @@ struct module;
 
 #include <linux/atomic.h>
 
+#ifdef HAVE_JUMP_LABEL
+
 static inline int static_key_count(struct static_key *key)
 {
-       return atomic_read(&key->enabled);
+       /*
+        * -1 means the first static_key_slow_inc() is in progress.
+        *  static_key_enabled() must return true, so return 1 here.
+        */
+       int n = atomic_read(&key->enabled);
+       return n >= 0 ? n : 1;
 }
 
-#ifdef HAVE_JUMP_LABEL
-
 #define JUMP_TYPE_FALSE        0UL
 #define JUMP_TYPE_TRUE 1UL
 #define JUMP_TYPE_MASK 1UL
@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
 
 #else  /* !HAVE_JUMP_LABEL */
 
+static inline int static_key_count(struct static_key *key)
+{
+       return atomic_read(&key->enabled);
+}
+
 static __always_inline void jump_label_init(void)
 {
        static_key_initialized = true;
index 611927f5870d203e3b987d9cc462f8ce74a59b91..ac4b3c46a84d1493d59e22e241c2cc7b9045ce27 100644 (file)
@@ -59,14 +59,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
 
 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
 void kasan_kfree_large(const void *ptr);
-void kasan_kfree(void *ptr);
+void kasan_poison_kfree(void *ptr);
 void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
                  gfp_t flags);
 void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
 
 void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
 bool kasan_slab_free(struct kmem_cache *s, void *object);
-void kasan_poison_slab_free(struct kmem_cache *s, void *object);
 
 struct kasan_cache {
        int alloc_meta_offset;
@@ -76,6 +75,9 @@ struct kasan_cache {
 int kasan_module_alloc(void *addr, size_t size);
 void kasan_free_shadow(const struct vm_struct *vm);
 
+size_t ksize(const void *);
+static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
+
 #else /* CONFIG_KASAN */
 
 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
@@ -102,7 +104,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
 
 static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
 static inline void kasan_kfree_large(const void *ptr) {}
-static inline void kasan_kfree(void *ptr) {}
+static inline void kasan_poison_kfree(void *ptr) {}
 static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
                                size_t size, gfp_t flags) {}
 static inline void kasan_krealloc(const void *object, size_t new_size,
@@ -114,11 +116,12 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
 {
        return false;
 }
-static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {}
 
 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
 
+static inline void kasan_unpoison_slab(const void *ptr) { }
+
 #endif /* CONFIG_KASAN */
 
 #endif /* LINUX_KASAN_H */
index d2b13066e78155ccacc072afcd75a2ae2bcb23fa..e5e7f2e80a54dd2f15737a3ef478287458641f6e 100644 (file)
@@ -42,15 +42,16 @@ struct led_classdev {
 #define LED_UNREGISTERING      (1 << 1)
        /* Upper 16 bits reflect control information */
 #define LED_CORE_SUSPENDRESUME (1 << 16)
-#define LED_BLINK_ONESHOT      (1 << 17)
-#define LED_BLINK_ONESHOT_STOP (1 << 18)
-#define LED_BLINK_INVERT       (1 << 19)
-#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20)
-#define LED_BLINK_DISABLE      (1 << 21)
-#define LED_SYSFS_DISABLE      (1 << 22)
-#define LED_DEV_CAP_FLASH      (1 << 23)
-#define LED_HW_PLUGGABLE       (1 << 24)
-#define LED_PANIC_INDICATOR    (1 << 25)
+#define LED_BLINK_SW           (1 << 17)
+#define LED_BLINK_ONESHOT      (1 << 18)
+#define LED_BLINK_ONESHOT_STOP (1 << 19)
+#define LED_BLINK_INVERT       (1 << 20)
+#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21)
+#define LED_BLINK_DISABLE      (1 << 22)
+#define LED_SYSFS_DISABLE      (1 << 23)
+#define LED_DEV_CAP_FLASH      (1 << 24)
+#define LED_HW_PLUGGABLE       (1 << 25)
+#define LED_PANIC_INDICATOR    (1 << 26)
 
        /* Set LED brightness level
         * Must not sleep. Use brightness_set_blocking for drivers
@@ -72,8 +73,8 @@ struct led_classdev {
         * and if both are zero then a sensible default should be chosen.
         * The call should adjust the timings in that case and if it can't
         * match the values specified exactly.
-        * Deactivate blinking again when the brightness is set to a fixed
-        * value via the brightness_set() callback.
+        * Deactivate blinking again when the brightness is set to LED_OFF
+        * via the brightness_set() callback.
         */
        int             (*blink_set)(struct led_classdev *led_cdev,
                                     unsigned long *delay_on,
index 035abdf62cfe953e5b15822aa847868032989574..73a48479892dd60ffc60a52e508764a4f897abde 100644 (file)
@@ -1240,8 +1240,6 @@ struct mlx5_destroy_psv_out {
        u8                      rsvd[8];
 };
 
-#define MLX5_CMD_OP_MAX 0x920
-
 enum {
        VPORT_STATE_DOWN                = 0x0,
        VPORT_STATE_UP                  = 0x1,
@@ -1369,6 +1367,12 @@ enum mlx5_cap_type {
 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
        MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
 
+#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
+       MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
+
+#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
+       MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+
 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
        MLX5_GET(flow_table_eswitch_cap, \
                 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
index 9a05cd7e5890b43400c65f6c903d5d815daffa28..e955a285900991c5623fca30010a960f68b7c7e8 100644 (file)
@@ -205,7 +205,8 @@ enum {
        MLX5_CMD_OP_ALLOC_FLOW_COUNTER            = 0x939,
        MLX5_CMD_OP_DEALLOC_FLOW_COUNTER          = 0x93a,
        MLX5_CMD_OP_QUERY_FLOW_COUNTER            = 0x93b,
-       MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c
+       MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c,
+       MLX5_CMD_OP_MAX
 };
 
 struct mlx5_ifc_flow_table_fields_supported_bits {
@@ -500,7 +501,9 @@ struct mlx5_ifc_e_switch_cap_bits {
        u8         vport_svlan_insert[0x1];
        u8         vport_cvlan_insert_if_not_exist[0x1];
        u8         vport_cvlan_insert_overwrite[0x1];
-       u8         reserved_at_5[0x1b];
+       u8         reserved_at_5[0x19];
+       u8         nic_vport_node_guid_modify[0x1];
+       u8         nic_vport_port_guid_modify[0x1];
 
        u8         reserved_at_20[0x7e0];
 };
@@ -4583,7 +4586,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
 };
 
 struct mlx5_ifc_modify_nic_vport_field_select_bits {
-       u8         reserved_at_0[0x19];
+       u8         reserved_at_0[0x16];
+       u8         node_guid[0x1];
+       u8         port_guid[0x1];
+       u8         reserved_at_18[0x1];
        u8         mtu[0x1];
        u8         change_event[0x1];
        u8         promisc[0x1];
index 64221027bf1f0d8bdaa2a1506e18da338e0f0c1e..ab310819ac3605a426c35ae273d4a66926794c48 100644 (file)
@@ -172,6 +172,7 @@ enum {
 enum {
        MLX5_FENCE_MODE_NONE                    = 0 << 5,
        MLX5_FENCE_MODE_INITIATOR_SMALL         = 1 << 5,
+       MLX5_FENCE_MODE_FENCE                   = 2 << 5,
        MLX5_FENCE_MODE_STRONG_ORDERING         = 3 << 5,
        MLX5_FENCE_MODE_SMALL_AND_FENCE         = 4 << 5,
 };
@@ -460,10 +461,9 @@ struct mlx5_core_qp {
 };
 
 struct mlx5_qp_path {
-       u8                      fl;
+       u8                      fl_free_ar;
        u8                      rsvd3;
-       u8                      free_ar;
-       u8                      pkey_index;
+       __be16                  pkey_index;
        u8                      rsvd0;
        u8                      grh_mlid;
        __be16                  rlid;
@@ -560,6 +560,7 @@ struct mlx5_modify_qp_mbox_in {
        __be32                  optparam;
        u8                      rsvd0[4];
        struct mlx5_qp_context  ctx;
+       u8                      rsvd2[16];
 };
 
 struct mlx5_modify_qp_mbox_out {
index 301da4a5e6bfa340363799310a21251c685935f0..6c16c198f680c26df40d1322ca1381e6bbb90510 100644 (file)
@@ -50,6 +50,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
                                           u64 *system_image_guid);
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+                                   u32 vport, u64 node_guid);
 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
                                        u16 *qkey_viol_cntr);
 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
index 5df5feb49575cd66c17d574b7431ed5ee60e7ec3..ece042dfe23cbf46c196b868c8efaff71d7cc828 100644 (file)
@@ -602,7 +602,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 }
 
 void do_set_pte(struct vm_area_struct *vma, unsigned long address,
-               struct page *page, pte_t *pte, bool write, bool anon, bool old);
+               struct page *page, pte_t *pte, bool write, bool anon);
 #endif
 
 /*
index c7292e8ea080118b9d09542a875b95ebbddc31b3..74eb28cadbef032017ae12fe4d11ab4f70996d63 100644 (file)
@@ -614,7 +614,7 @@ static inline struct device_node *of_parse_phandle(const struct device_node *np,
        return NULL;
 }
 
-static inline int of_parse_phandle_with_args(struct device_node *np,
+static inline int of_parse_phandle_with_args(const struct device_node *np,
                                             const char *list_name,
                                             const char *cells_name,
                                             int index,
index f6e9e85164e8bd8dc3c110d01fdca23a504201ba..b969e944396223defb902c7e7f2ac1238b410585 100644 (file)
@@ -8,7 +8,7 @@ struct pci_dev;
 struct of_phandle_args;
 struct device_node;
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_OF_PCI
 int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
 struct device_node *of_pci_find_child_device(struct device_node *parent,
                                             unsigned int devfn);
index ad2f6705437266373ff1bb73881df0a5da8f0d3b..c201060e0c6da3bfd4299d1707095e7f32680f29 100644 (file)
@@ -31,6 +31,13 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
 int of_reserved_mem_device_init(struct device *dev);
 void of_reserved_mem_device_release(struct device *dev);
 
+int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+                                            phys_addr_t align,
+                                            phys_addr_t start,
+                                            phys_addr_t end,
+                                            bool nomap,
+                                            phys_addr_t *res_base);
+
 void fdt_init_reserved_mem(void);
 void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
                               phys_addr_t base, phys_addr_t size);
index d921afd5f10907c8d8058c0136463f6fcf54f345..12343caa114ef3ec27a5ab5a3caaf5320cbe6508 100644 (file)
@@ -175,6 +175,8 @@ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
 int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
                struct device_node *np_config, struct pinctrl_map **map,
                unsigned *num_maps, enum pinctrl_map_type type);
+void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
+               struct pinctrl_map *map, unsigned num_maps);
 
 static inline int pinconf_generic_dt_node_to_map_group(
                struct pinctrl_dev *pctldev, struct device_node *np_config,
index 17018f3c066ed5a44c76ca77ce63b6203725ef34..908b67c847cd656489f0a679422852e7c845d18f 100644 (file)
@@ -235,6 +235,9 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
        if (!pwm)
                return -EINVAL;
 
+       if (duty_ns < 0 || period_ns < 0)
+               return -EINVAL;
+
        pwm_get_state(pwm, &state);
        if (state.duty_cycle == duty_ns && state.period == period_ns)
                return 0;
index 6e42ada26345d507ffdec856107382a9fb67656a..253538f29ade890dcd48d1708fcd15ae362653be 100644 (file)
@@ -3007,7 +3007,7 @@ static inline int object_is_on_stack(void *obj)
        return (obj >= stack) && (obj < (stack + THREAD_SIZE));
 }
 
-extern void thread_info_cache_init(void);
+extern void thread_stack_cache_init(void);
 
 #ifdef CONFIG_DEBUG_STACK_USAGE
 static inline unsigned long stack_not_used(struct task_struct *p)
index 7973a821ac58877a56ff5214793786948170ae8e..ead97654c4e9afa7239e068cb37f6981d77ce6a3 100644 (file)
@@ -277,7 +277,10 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
 
 static inline int raw_read_seqcount_latch(seqcount_t *s)
 {
-       return lockless_dereference(s)->sequence;
+       int seq = READ_ONCE(s->sequence);
+       /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
+       smp_read_barrier_depends();
+       return seq;
 }
 
 /**
@@ -331,7 +334,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
  *     unsigned seq, idx;
  *
  *     do {
- *             seq = lockless_dereference(latch)->seq;
+ *             seq = raw_read_seqcount_latch(&latch->seq);
  *
  *             idx = seq & 0x01;
  *             entry = data_query(latch->data[idx], ...);
index 19c659d1c0f86cd40fb9b90f48dac0cb7071b560..b6810c92b8bb14d9ef8c9d1036a0b25d40530ac3 100644 (file)
@@ -137,8 +137,6 @@ struct rpc_create_args {
 #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT     (1UL << 9)
 
 struct rpc_clnt *rpc_create(struct rpc_create_args *args);
-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
-                                       struct rpc_xprt *xprt);
 struct rpc_clnt        *rpc_bind_new_program(struct rpc_clnt *,
                                const struct rpc_program *, u32);
 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
index b7dabc4baafd817d0b2a41c5cf8e54e4b3fddc47..79ba50856707b9b9a619be78335b27b383312ed9 100644 (file)
@@ -84,6 +84,7 @@ struct svc_xprt {
 
        struct net              *xpt_net;
        struct rpc_xprt         *xpt_bc_xprt;   /* NFSv4.1 backchannel */
+       struct rpc_xprt_switch  *xpt_bc_xps;    /* NFSv4.1 backchannel */
 };
 
 static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
index 5aa3834619a8c3222be6530f927ef30349e2fa74..5e3e1b63dbb3c97f0145bd15a23dba7d4e916528 100644 (file)
@@ -297,6 +297,7 @@ struct xprt_create {
        size_t                  addrlen;
        const char              *servername;
        struct svc_xprt         *bc_xprt;       /* NFSv4.1 backchannel */
+       struct rpc_xprt_switch  *bc_xps;
        unsigned int            flags;
 };
 
index e45abe7db9a627dda034f7d4616fad2d3ae1f920..ee517bef0db04902412c17b7eceac24839a4564f 100644 (file)
@@ -335,6 +335,8 @@ struct thermal_genl_event {
  * @get_trend: a pointer to a function that reads the sensor temperature trend.
  * @set_emul_temp: a pointer to a function that sets sensor emulated
  *                temperature.
+ * @set_trip_temp: a pointer to a function that sets the trip temperature on
+ *                hardware.
  */
 struct thermal_zone_of_device_ops {
        int (*get_temp)(void *, int *);
index 457651bf45b0bd9cad8cebe48cca5f8c005a5579..fefe8b06a63dbc15c28b9c09a76e14c03f6ae1e2 100644 (file)
@@ -1034,6 +1034,8 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget)
  * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL,
  *     this driver will be bound to any available UDC.
  * @pending: UDC core private data used for deferred probe of this driver.
+ * @match_existing_only: If udc is not found, return an error and don't add this
+ *      gadget driver to list of pending driver
  *
  * Devices are disabled till a gadget driver successfully bind()s, which
  * means the driver will handle setup() requests needed to enumerate (and
@@ -1097,6 +1099,7 @@ struct usb_gadget_driver {
 
        char                    *udc_name;
        struct list_head        pending;
+       unsigned                match_existing_only:1;
 };
 
 
index 0b3da40a525ecf2370f528620d5e85370904325d..d315c89078698b60e0d76a15940b0c390f0b5f12 100644 (file)
@@ -142,10 +142,11 @@ enum musb_vbus_id_status {
 };
 
 #if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
-void musb_mailbox(enum musb_vbus_id_status status);
+int musb_mailbox(enum musb_vbus_id_status status);
 #else
-static inline void musb_mailbox(enum musb_vbus_id_status status)
+static inline int musb_mailbox(enum musb_vbus_id_status status)
 {
+       return 0;
 }
 #endif
 
index 98a938aabdfb399c7cfd0de69ff0ef6669e26413..7a8d6037a4bb784aa4d836c825ed42a5e92bdf4a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * v4l2-mc.h - Media Controller V4L2 types and prototypes
  *
- * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
  * Copyright (C) 2006-2010 Nokia Corporation
  * Copyright (c) 2016 Intel Corporation.
  *
index 48103cf94e976e9c13809cfec895e9bb1c9fa96c..13de0ccaa0594172a946572bbc04397ab0a89032 100644 (file)
@@ -42,6 +42,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
 
 int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
                      struct sockaddr __user **, struct iovec **);
+struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
 asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
                                   unsigned int);
 asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
index af4c10ebb2414494e75c279b3d9c91da48442982..cd6018a9ee2467cec1fbe4d621fc294bc843a05e 100644 (file)
@@ -1232,7 +1232,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
 const char *ip_vs_state_name(__u16 proto, int state);
 
 void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
-int ip_vs_check_template(struct ip_vs_conn *ct);
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
 void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
 int ip_vs_conn_init(void);
 void ip_vs_conn_cleanup(void);
index 9c5638ad872e39d2d01cca6f86c1620e499c916c..0dbce55437f2c57cc1642d865a91ffc4169c9866 100644 (file)
@@ -28,8 +28,8 @@ struct nf_queue_handler {
                                                struct nf_hook_ops *ops);
 };
 
-void nf_register_queue_handler(const struct nf_queue_handler *qh);
-void nf_unregister_queue_handler(void);
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
+void nf_unregister_queue_handler(struct net *net);
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
 
 void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
index 38aa4983e2a90bd5a391e844e0556591e5d85f60..36d723579af21f78e2da780c4840762ce88a4770 100644 (file)
@@ -5,11 +5,13 @@
 
 struct proc_dir_entry;
 struct nf_logger;
+struct nf_queue_handler;
 
 struct netns_nf {
 #if defined CONFIG_PROC_FS
        struct proc_dir_entry *proc_netfilter;
 #endif
+       const struct nf_queue_handler __rcu *queue_handler;
        const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *nf_log_dir_header;
index 0f7efa88f210360928b36a80022c9bd53d778b40..3722dda0199ddaa0e5e8554c57c843968075a7b9 100644 (file)
@@ -392,16 +392,20 @@ struct tc_cls_u32_offload {
        };
 };
 
-static inline bool tc_should_offload(struct net_device *dev, u32 flags)
+static inline bool tc_should_offload(const struct net_device *dev,
+                                    const struct tcf_proto *tp, u32 flags)
 {
+       const struct Qdisc *sch = tp->q;
+       const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
+
        if (!(dev->features & NETIF_F_HW_TC))
                return false;
-
        if (flags & TCA_CLS_FLAGS_SKIP_HW)
                return false;
-
        if (!dev->netdev_ops->ndo_setup_tc)
                return false;
+       if (cops && cops->tcf_cl_offload)
+               return cops->tcf_cl_offload(tp->classid);
 
        return true;
 }
index a1fd76c22a5903cadf2e97c844017925cfbc9f93..62d553184e91c5c6f27da411f9c448fef91ded0f 100644 (file)
@@ -168,6 +168,7 @@ struct Qdisc_class_ops {
 
        /* Filter manipulation */
        struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
+       bool                    (*tcf_cl_offload)(u32 classid);
        unsigned long           (*bind_tcf)(struct Qdisc *, unsigned long,
                                        u32 classid);
        void                    (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -691,9 +692,11 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
        /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
        if (!sch->gso_skb) {
                sch->gso_skb = sch->dequeue(sch);
-               if (sch->gso_skb)
+               if (sch->gso_skb) {
                        /* it's still part of the queue */
+                       qdisc_qstats_backlog_inc(sch, sch->gso_skb);
                        sch->q.qlen++;
+               }
        }
 
        return sch->gso_skb;
@@ -706,6 +709,7 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
 
        if (skb) {
                sch->gso_skb = NULL;
+               qdisc_qstats_backlog_dec(sch, skb);
                sch->q.qlen--;
        } else {
                skb = sch->dequeue(sch);
index 432bed510369e7b19a4773ec5a8bb09b8d9bb1e6..7e440d41487aa8671fbb8952fb01e2bbf63b11f0 100644 (file)
@@ -217,10 +217,10 @@ enum ib_device_cap_flags {
        IB_DEVICE_CROSS_CHANNEL         = (1 << 27),
        IB_DEVICE_MANAGED_FLOW_STEERING         = (1 << 29),
        IB_DEVICE_SIGNATURE_HANDOVER            = (1 << 30),
-       IB_DEVICE_ON_DEMAND_PAGING              = (1 << 31),
+       IB_DEVICE_ON_DEMAND_PAGING              = (1ULL << 31),
        IB_DEVICE_SG_GAPS_REG                   = (1ULL << 32),
-       IB_DEVICE_VIRTUAL_FUNCTION              = ((u64)1 << 33),
-       IB_DEVICE_RAW_SCATTER_FCS               = ((u64)1 << 34),
+       IB_DEVICE_VIRTUAL_FUNCTION              = (1ULL << 33),
+       IB_DEVICE_RAW_SCATTER_FCS               = (1ULL << 34),
 };
 
 enum ib_signature_prot_cap {
index 16274e2133cdcca109fa0d40f1b9bf591493d27e..9c9a27d42aaa5e89aa778d3597cb54b13c270ecc 100644 (file)
@@ -203,7 +203,9 @@ struct rvt_driver_provided {
 
        /*
         * Allocate a private queue pair data structure for driver specific
-        * information which is opaque to rdmavt.
+        * information which is opaque to rdmavt.  Errors are returned via
+        * ERR_PTR(err).  The driver is free to return NULL or a valid
+        * pointer.
         */
        void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
                                gfp_t gfp);
index fc3a481ad91ecdb5f25fd5c3c7d9b210d4f74f5e..530c57bdefa06567c3bcaba9249845af1d4e535a 100644 (file)
@@ -53,18 +53,19 @@ struct hdmi_codec_params {
        int channels;
 };
 
+struct hdmi_codec_pdata;
 struct hdmi_codec_ops {
        /*
         * Called when ASoC starts an audio stream setup.
         * Optional
         */
-       int (*audio_startup)(struct device *dev);
+       int (*audio_startup)(struct device *dev, void *data);
 
        /*
         * Configures HDMI-encoder for audio stream.
         * Mandatory
         */
-       int (*hw_params)(struct device *dev,
+       int (*hw_params)(struct device *dev, void *data,
                         struct hdmi_codec_daifmt *fmt,
                         struct hdmi_codec_params *hparms);
 
@@ -72,19 +73,20 @@ struct hdmi_codec_ops {
         * Shuts down the audio stream.
         * Mandatory
         */
-       void (*audio_shutdown)(struct device *dev);
+       void (*audio_shutdown)(struct device *dev, void *data);
 
        /*
         * Mute/unmute HDMI audio stream.
         * Optional
         */
-       int (*digital_mute)(struct device *dev, bool enable);
+       int (*digital_mute)(struct device *dev, void *data, bool enable);
 
        /*
         * Provides EDID-Like-Data from connected HDMI device.
         * Optional
         */
-       int (*get_eld)(struct device *dev, uint8_t *buf, size_t len);
+       int (*get_eld)(struct device *dev, void *data,
+                      uint8_t *buf, size_t len);
 };
 
 /* HDMI codec initalization data */
@@ -93,6 +95,7 @@ struct hdmi_codec_pdata {
        uint i2s:1;
        uint spdif:1;
        int max_i2s_channels;
+       void *data;
 };
 
 #define HDMI_CODEC_DRV_NAME "hdmi-audio-codec"
index cdecf87576e890f48c7e93830c920fa33ff9b85e..462246aa200e1db1890de7d6640c81e859da250d 100644 (file)
@@ -487,6 +487,22 @@ struct drm_amdgpu_cs_chunk_data {
 #define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
 #define AMDGPU_INFO_MMR_SH_INDEX_MASK  0xff
 
+struct drm_amdgpu_query_fw {
+       /** AMDGPU_INFO_FW_* */
+       __u32 fw_type;
+       /**
+        * Index of the IP if there are more IPs of
+        * the same type.
+        */
+       __u32 ip_instance;
+       /**
+        * Index of the engine. Whether this is used depends
+        * on the firmware type. (e.g. MEC, SDMA)
+        */
+       __u32 index;
+       __u32 _pad;
+};
+
 /* Input structure for the INFO ioctl */
 struct drm_amdgpu_info {
        /* Where the return value will be stored */
@@ -522,21 +538,7 @@ struct drm_amdgpu_info {
                        __u32 flags;
                } read_mmr_reg;
 
-               struct {
-                       /** AMDGPU_INFO_FW_* */
-                       __u32 fw_type;
-                       /**
-                        * Index of the IP if there are more IPs of
-                        * the same type.
-                        */
-                       __u32 ip_instance;
-                       /**
-                        * Index of the engine. Whether this is used depends
-                        * on the firmware type. (e.g. MEC, SDMA)
-                        */
-                       __u32 index;
-                       __u32 _pad;
-               } query_fw;
+               struct drm_amdgpu_query_fw query_fw;
        };
 };
 
index c17d63d8b54379be763de966b7a9ce8b70028aaa..d7e81a3886fdc41e63ec00cd7b1f0c50c7f24ebb 100644 (file)
@@ -361,6 +361,8 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_GPU_RESET        35
 #define I915_PARAM_HAS_RESOURCE_STREAMER 36
 #define I915_PARAM_HAS_EXEC_SOFTPIN     37
+#define I915_PARAM_HAS_POOLED_EU        38
+#define I915_PARAM_MIN_EU_IN_POOL       39
 
 typedef struct drm_i915_getparam {
        __s32 param;
@@ -1171,6 +1173,7 @@ struct drm_i915_gem_context_param {
 #define I915_CONTEXT_PARAM_BAN_PERIOD  0x1
 #define I915_CONTEXT_PARAM_NO_ZEROMAP  0x2
 #define I915_CONTEXT_PARAM_GTT_SIZE    0x3
+#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE    0x4
        __u64 value;
 };
 
index af12e8a184c82b890968dfc8d45bcfa97ccb3e4f..ad7edc3edf7ca1d653a0bc025a5eda6692b74370 100644 (file)
@@ -37,6 +37,7 @@ extern "C" {
 #define DRM_VC4_MMAP_BO                           0x04
 #define DRM_VC4_CREATE_SHADER_BO                  0x05
 #define DRM_VC4_GET_HANG_STATE                    0x06
+#define DRM_VC4_GET_PARAM                         0x07
 
 #define DRM_IOCTL_VC4_SUBMIT_CL           DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
 #define DRM_IOCTL_VC4_WAIT_SEQNO          DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
@@ -45,6 +46,7 @@ extern "C" {
 #define DRM_IOCTL_VC4_MMAP_BO             DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
 #define DRM_IOCTL_VC4_CREATE_SHADER_BO    DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
 #define DRM_IOCTL_VC4_GET_HANG_STATE      DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
+#define DRM_IOCTL_VC4_GET_PARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
 
 struct drm_vc4_submit_rcl_surface {
        __u32 hindex; /* Handle index, or ~0 if not present. */
@@ -280,6 +282,17 @@ struct drm_vc4_get_hang_state {
        __u32 pad[16];
 };
 
+#define DRM_VC4_PARAM_V3D_IDENT0               0
+#define DRM_VC4_PARAM_V3D_IDENT1               1
+#define DRM_VC4_PARAM_V3D_IDENT2               2
+#define DRM_VC4_PARAM_SUPPORTS_BRANCHES                3
+
+struct drm_vc4_get_param {
+       __u32 param;
+       __u32 pad;
+       __u64 value;
+};
+
 #if defined(__cplusplus)
 }
 #endif
index 23c6960e94a4fa268dbd8286be6a993ebbd6ce12..2bdd1e3e70076dd3476e3398452c4837b2b80f9b 100644 (file)
@@ -118,7 +118,7 @@ struct btrfs_ioctl_vol_args_v2 {
        };
        union {
                char name[BTRFS_SUBVOL_NAME_MAX + 1];
-               u64 devid;
+               __u64 devid;
        };
 };
 
index ca1054dd82497ddcbce9624e9ad20b2791248be4..72a04a0e8ccef1e5560378af9177a6c47954daaf 100644 (file)
@@ -1,5 +1,5 @@
 #ifndef _UAPI_LINUX_GTP_H_
-#define _UAPI_LINUX_GTP_H__
+#define _UAPI_LINUX_GTP_H_
 
 enum gtp_genl_cmds {
        GTP_CMD_NEWPDP,
index a7f27704f9807e7b14eadb9b08cf09c2c183a6a7..691984cb0b915da8afbc60211145d9dbb45c7143 100644 (file)
@@ -1,5 +1,6 @@
 # UAPI Header export list
 header-y += asequencer.h
+header-y += asoc.h
 header-y += asound.h
 header-y += asound_fm.h
 header-y += compress_offload.h
@@ -10,3 +11,5 @@ header-y += hdsp.h
 header-y += hdspm.h
 header-y += sb16_csp.h
 header-y += sfnt_info.h
+header-y += tlv.h
+header-y += usb_stream.h
index 3a2a794017896994a42d370e59b14e7a492c37b9..7adeaae0696109c0088016bd4ad5c340d6455dd1 100644 (file)
@@ -235,9 +235,6 @@ int ipu_di_init_sync_panel(struct ipu_di *, struct ipu_di_signal_cfg *sig);
 struct dmfc_channel;
 int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc);
 void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc);
-int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
-               unsigned long bandwidth_mbs, int burstsize);
-void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc);
 void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width);
 struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel);
 void ipu_dmfc_put(struct dmfc_channel *dmfc);
index 4c17fda5c2ff51c33eb4bf5fbd06a810087c3287..eae02aa03c9e80062ba6920c57c347c415e6d0ca 100644 (file)
@@ -453,7 +453,7 @@ void __init __weak smp_setup_processor_id(void)
 }
 
 # if THREAD_SIZE >= PAGE_SIZE
-void __init __weak thread_info_cache_init(void)
+void __init __weak thread_stack_cache_init(void)
 {
 }
 #endif
@@ -627,7 +627,7 @@ asmlinkage __visible void __init start_kernel(void)
        /* Should be run before the first non-init thread is created */
        init_espfix_bsp();
 #endif
-       thread_info_cache_init();
+       thread_stack_cache_init();
        cred_init();
        fork_init();
        proc_caches_init();
@@ -708,11 +708,13 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
 {
        struct blacklist_entry *entry;
        char fn_name[KSYM_SYMBOL_LEN];
+       unsigned long addr;
 
        if (list_empty(&blacklisted_initcalls))
                return false;
 
-       sprint_symbol_no_offset(fn_name, (unsigned long)fn);
+       addr = (unsigned long) dereference_function_descriptor(fn);
+       sprint_symbol_no_offset(fn_name, addr);
 
        list_for_each_entry(entry, &blacklisted_initcalls, next) {
                if (!strcmp(fn_name, entry->buf)) {
index 274450efea90eebeb5f3fa4ddb777c2ea2e6ef73..9c51ec3f0f440f35dda1bf9e9b3e02e421388df3 100644 (file)
@@ -3862,10 +3862,8 @@ static void _free_event(struct perf_event *event)
        if (event->ctx)
                put_ctx(event->ctx);
 
-       if (event->pmu) {
-               exclusive_event_destroy(event);
-               module_put(event->pmu->module);
-       }
+       exclusive_event_destroy(event);
+       module_put(event->pmu->module);
 
        call_rcu(&event->rcu_head, free_event_rcu);
 }
index 5c2c355aa97ff4552c151f796d18cc8732017677..4a7ec0c6c88c9ea85b2c74c6a749e11533220d44 100644 (file)
@@ -148,18 +148,18 @@ static inline void free_task_struct(struct task_struct *tsk)
 }
 #endif
 
-void __weak arch_release_thread_info(struct thread_info *ti)
+void __weak arch_release_thread_stack(unsigned long *stack)
 {
 }
 
-#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
+#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
 
 /*
  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
  * kmemcache based allocator.
  */
 # if THREAD_SIZE >= PAGE_SIZE
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
                                                  int node)
 {
        struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
@@ -172,33 +172,33 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
        return page ? page_address(page) : NULL;
 }
 
-static inline void free_thread_info(struct thread_info *ti)
+static inline void free_thread_stack(unsigned long *stack)
 {
-       struct page *page = virt_to_page(ti);
+       struct page *page = virt_to_page(stack);
 
        memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
                                    -(1 << THREAD_SIZE_ORDER));
        __free_kmem_pages(page, THREAD_SIZE_ORDER);
 }
 # else
-static struct kmem_cache *thread_info_cache;
+static struct kmem_cache *thread_stack_cache;
 
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
                                                  int node)
 {
-       return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
+       return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
 }
 
-static void free_thread_info(struct thread_info *ti)
+static void free_thread_stack(unsigned long *stack)
 {
-       kmem_cache_free(thread_info_cache, ti);
+       kmem_cache_free(thread_stack_cache, stack);
 }
 
-void thread_info_cache_init(void)
+void thread_stack_cache_init(void)
 {
-       thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+       thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
                                              THREAD_SIZE, 0, NULL);
-       BUG_ON(thread_info_cache == NULL);
+       BUG_ON(thread_stack_cache == NULL);
 }
 # endif
 #endif
@@ -221,9 +221,9 @@ struct kmem_cache *vm_area_cachep;
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
-static void account_kernel_stack(struct thread_info *ti, int account)
+static void account_kernel_stack(unsigned long *stack, int account)
 {
-       struct zone *zone = page_zone(virt_to_page(ti));
+       struct zone *zone = page_zone(virt_to_page(stack));
 
        mod_zone_page_state(zone, NR_KERNEL_STACK, account);
 }
@@ -231,8 +231,8 @@ static void account_kernel_stack(struct thread_info *ti, int account)
 void free_task(struct task_struct *tsk)
 {
        account_kernel_stack(tsk->stack, -1);
-       arch_release_thread_info(tsk->stack);
-       free_thread_info(tsk->stack);
+       arch_release_thread_stack(tsk->stack);
+       free_thread_stack(tsk->stack);
        rt_mutex_debug_task_free(tsk);
        ftrace_graph_exit_task(tsk);
        put_seccomp_filter(tsk);
@@ -343,7 +343,7 @@ void set_task_stack_end_magic(struct task_struct *tsk)
 static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 {
        struct task_struct *tsk;
-       struct thread_info *ti;
+       unsigned long *stack;
        int err;
 
        if (node == NUMA_NO_NODE)
@@ -352,15 +352,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
        if (!tsk)
                return NULL;
 
-       ti = alloc_thread_info_node(tsk, node);
-       if (!ti)
+       stack = alloc_thread_stack_node(tsk, node);
+       if (!stack)
                goto free_tsk;
 
        err = arch_dup_task_struct(tsk, orig);
        if (err)
-               goto free_ti;
+               goto free_stack;
 
-       tsk->stack = ti;
+       tsk->stack = stack;
 #ifdef CONFIG_SECCOMP
        /*
         * We must handle setting up seccomp filters once we're under
@@ -392,14 +392,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
        tsk->task_frag.page = NULL;
        tsk->wake_q.next = NULL;
 
-       account_kernel_stack(ti, 1);
+       account_kernel_stack(stack, 1);
 
        kcov_task_init(tsk);
 
        return tsk;
 
-free_ti:
-       free_thread_info(ti);
+free_stack:
+       free_thread_stack(stack);
 free_tsk:
        free_task_struct(tsk);
        return NULL;
index ee25f5ba4aca13f8a05a9e6c3a4c5a2aff768131..33664f70e2d25e880efdbc8695fcc12989871150 100644 (file)
@@ -469,7 +469,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
 {
        unsigned long address = (unsigned long)uaddr;
        struct mm_struct *mm = current->mm;
-       struct page *page;
+       struct page *page, *tail;
        struct address_space *mapping;
        int err, ro = 0;
 
@@ -530,7 +530,15 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
         * considered here and page lock forces unnecessarily serialization
         * From this point on, mapping will be re-verified if necessary and
         * page lock will be acquired only if it is unavoidable
-        */
+        *
+        * Mapping checks require the head page for any compound page so the
+        * head page and mapping is looked up now. For anonymous pages, it
+        * does not matter if the page splits in the future as the key is
+        * based on the address. For filesystem-backed pages, the tail is
+        * required as the index of the page determines the key. For
+        * base pages, there is no tail page and tail == page.
+        */
+       tail = page;
        page = compound_head(page);
        mapping = READ_ONCE(page->mapping);
 
@@ -654,7 +662,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
 
                key->both.offset |= FUT_OFF_INODE; /* inode-based key */
                key->shared.inode = inode;
-               key->shared.pgoff = basepage_index(page);
+               key->shared.pgoff = basepage_index(tail);
                rcu_read_unlock();
        }
 
index 05254eeb4b4e485be75bacff667c8ed3aab4a200..4b353e0be1215a081e2cf367d3a3074700e4a685 100644 (file)
@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
 
 void static_key_slow_inc(struct static_key *key)
 {
+       int v, v1;
+
        STATIC_KEY_CHECK_USE();
-       if (atomic_inc_not_zero(&key->enabled))
-               return;
+
+       /*
+        * Careful if we get concurrent static_key_slow_inc() calls;
+        * later calls must wait for the first one to _finish_ the
+        * jump_label_update() process.  At the same time, however,
+        * the jump_label_update() call below wants to see
+        * static_key_enabled(&key) for jumps to be updated properly.
+        *
+        * So give a special meaning to negative key->enabled: it sends
+        * static_key_slow_inc() down the slow path, and it is non-zero
+        * so it counts as "enabled" in jump_label_update().  Note that
+        * atomic_inc_unless_negative() checks >= 0, so roll our own.
+        */
+       for (v = atomic_read(&key->enabled); v > 0; v = v1) {
+               v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
+               if (likely(v1 == v))
+                       return;
+       }
 
        jump_label_lock();
-       if (atomic_inc_return(&key->enabled) == 1)
+       if (atomic_read(&key->enabled) == 0) {
+               atomic_set(&key->enabled, -1);
                jump_label_update(key);
+               atomic_set(&key->enabled, 1);
+       } else {
+               atomic_inc(&key->enabled);
+       }
        jump_label_unlock();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
 static void __static_key_slow_dec(struct static_key *key,
                unsigned long rate_limit, struct delayed_work *work)
 {
+       /*
+        * The negative count check is valid even when a negative
+        * key->enabled is in use by static_key_slow_inc(); a
+        * __static_key_slow_dec() before the first static_key_slow_inc()
+        * returns is unbalanced, because all other static_key_slow_inc()
+        * instances block while the update is in progress.
+        */
        if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
                WARN(atomic_read(&key->enabled) < 0,
                     "jump label: negative count!\n");
index a02f2dddd1d710b7ea33bd58963e2d9d388266c1..8d44b3fea9d08f901e3b24ed4a619a4883d4042f 100644 (file)
@@ -264,7 +264,12 @@ static const struct file_operations kcov_fops = {
 
 static int __init kcov_init(void)
 {
-       if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) {
+       /*
+        * The kcov debugfs file won't ever get removed and thus,
+        * there is no need to protect it against removal races. The
+        * use of debugfs_create_file_unsafe() is actually safe here.
+        */
+       if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
                pr_err("failed to create kcov in debugfs\n");
                return -ENOMEM;
        }
index 3ef3736002d895854794a4d940adcd96288640f2..9c951fade415be1549b568d86275e0327bbdba58 100644 (file)
@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
 }
 
 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                           struct thread_info *ti)
+                           struct task_struct *task)
 {
        SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
 
        /* Mark the current thread as blocked on the lock: */
-       ti->task->blocked_on = waiter;
+       task->blocked_on = waiter;
 }
 
 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                        struct thread_info *ti)
+                        struct task_struct *task)
 {
        DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
-       DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
-       DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
-       ti->task->blocked_on = NULL;
+       DEBUG_LOCKS_WARN_ON(waiter->task != task);
+       DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
+       task->blocked_on = NULL;
 
        list_del_init(&waiter->list);
        waiter->task = NULL;
index 0799fd3e4cfacbfdac923c6dedba278dbc36e0f9..d06ae3bb46c5f9f928cf7123464f49e284ab3ecf 100644 (file)
@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
 extern void debug_mutex_add_waiter(struct mutex *lock,
                                   struct mutex_waiter *waiter,
-                                  struct thread_info *ti);
+                                  struct task_struct *task);
 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                               struct thread_info *ti);
+                               struct task_struct *task);
 extern void debug_mutex_unlock(struct mutex *lock);
 extern void debug_mutex_init(struct mutex *lock, const char *name,
                             struct lock_class_key *key);
index e364b424b019ff51de0bde17fe2611444b5c1efb..a70b90db3909c1d3a3cbfaf45d6e27f90a786b1b 100644 (file)
@@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
        if (!hold_ctx)
                return 0;
 
-       if (unlikely(ctx == hold_ctx))
-               return -EALREADY;
-
        if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
            (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
 #ifdef CONFIG_DEBUG_MUTEXES
@@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        unsigned long flags;
        int ret;
 
+       if (use_ww_ctx) {
+               struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+               if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
+                       return -EALREADY;
+       }
+
        preempt_disable();
        mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 
@@ -534,7 +537,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                goto skip_wait;
 
        debug_mutex_lock_common(lock, &waiter);
-       debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
+       debug_mutex_add_waiter(lock, &waiter, task);
 
        /* add waiting tasks to the end of the waitqueue (FIFO): */
        list_add_tail(&waiter.list, &lock->wait_list);
@@ -581,7 +584,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        }
        __set_task_state(task, TASK_RUNNING);
 
-       mutex_remove_waiter(lock, &waiter, current_thread_info());
+       mutex_remove_waiter(lock, &waiter, task);
        /* set it to 0 if there are no waiters left: */
        if (likely(list_empty(&lock->wait_list)))
                atomic_set(&lock->count, 0);
@@ -602,7 +605,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        return 0;
 
 err:
-       mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+       mutex_remove_waiter(lock, &waiter, task);
        spin_unlock_mutex(&lock->wait_lock, flags);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, 1, ip);
index 5cda397607f25011eedcc4cbcab7aeef9d211321..a68bae5e852a08f1c0d75587b57c5e31835f937e 100644 (file)
@@ -13,7 +13,7 @@
                do { spin_lock(lock); (void)(flags); } while (0)
 #define spin_unlock_mutex(lock, flags) \
                do { spin_unlock(lock); (void)(flags); } while (0)
-#define mutex_remove_waiter(lock, waiter, ti) \
+#define mutex_remove_waiter(lock, waiter, task) \
                __list_del((waiter)->list.prev, (waiter)->list.next)
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
index ce2f75e32ae155b30cfe324c56f9bc526771b66a..5fc8c311b8fe59d46decc2c5a049ce6e860a07b8 100644 (file)
@@ -267,6 +267,66 @@ static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
 #define queued_spin_lock_slowpath      native_queued_spin_lock_slowpath
 #endif
 
+/*
+ * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
+ * issuing an _unordered_ store to set _Q_LOCKED_VAL.
+ *
+ * This means that the store can be delayed, but no later than the
+ * store-release from the unlock. This means that simply observing
+ * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
+ *
+ * There are two paths that can issue the unordered store:
+ *
+ *  (1) clear_pending_set_locked():    *,1,0 -> *,0,1
+ *
+ *  (2) set_locked():                  t,0,0 -> t,0,1 ; t != 0
+ *      atomic_cmpxchg_relaxed():      t,0,0 -> 0,0,1
+ *
+ * However, in both cases we have other !0 state we've set before to queue
+ * ourseves:
+ *
+ * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
+ * load is constrained by that ACQUIRE to not pass before that, and thus must
+ * observe the store.
+ *
+ * For (2) we have a more intersting scenario. We enqueue ourselves using
+ * xchg_tail(), which ends up being a RELEASE. This in itself is not
+ * sufficient, however that is followed by an smp_cond_acquire() on the same
+ * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
+ * guarantees we must observe that store.
+ *
+ * Therefore both cases have other !0 state that is observable before the
+ * unordered locked byte store comes through. This means we can use that to
+ * wait for the lock store, and then wait for an unlock.
+ */
+#ifndef queued_spin_unlock_wait
+void queued_spin_unlock_wait(struct qspinlock *lock)
+{
+       u32 val;
+
+       for (;;) {
+               val = atomic_read(&lock->val);
+
+               if (!val) /* not locked, we're done */
+                       goto done;
+
+               if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
+                       break;
+
+               /* not locked, but pending, wait until we observe the lock */
+               cpu_relax();
+       }
+
+       /* any unlock is good */
+       while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+               cpu_relax();
+
+done:
+       smp_rmb(); /* CTRL + RMB -> ACQUIRE */
+}
+EXPORT_SYMBOL(queued_spin_unlock_wait);
+#endif
+
 #endif /* _GEN_PV_LOCK_SLOWPATH */
 
 /**
index df058bed53cebea8172fceaa1a97041d29a78fa6..0c2ee9761d57b7aa585e0191b5fbfcd7d84d6ae4 100644 (file)
@@ -146,6 +146,18 @@ int freeze_processes(void)
        if (!error && !oom_killer_disable())
                error = -EBUSY;
 
+       /*
+        * There is a hard to fix race between oom_reaper kernel thread
+        * and oom_killer_disable. oom_reaper calls exit_oom_victim
+        * before the victim reaches exit_mm so try to freeze all the tasks
+        * again and catch such a left over task.
+        */
+       if (!error) {
+               pr_info("Double checking all user space processes after OOM killer disable... ");
+               error = try_to_freeze_tasks(true);
+               pr_cont("\n");
+       }
+
        if (error)
                thaw_processes();
        return error;
index 074994bcfa9be14336ecb06912a969f6d9546c9d..04d7cf3ef8cf932e036c7c76f6a27cefa962efc0 100644 (file)
@@ -614,6 +614,7 @@ struct rchan *relay_open(const char *base_filename,
 
        kref_put(&chan->kref, relay_destroy_channel);
        mutex_unlock(&relay_channels_mutex);
+       kfree(chan);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(relay_open);
index 7f2cae4620c7a949fdc294c06211884e2ca83afb..51d7105f529a5527ee8c90ac789f08583027a373 100644 (file)
@@ -1536,7 +1536,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
        for (;;) {
                /* Any allowed, online CPU? */
                for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
-                       if (!cpu_active(dest_cpu))
+                       if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
+                               continue;
+                       if (!cpu_online(dest_cpu))
                                continue;
                        goto out;
                }
@@ -2253,9 +2255,11 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
 #endif
 #endif
 
+#ifdef CONFIG_SCHEDSTATS
+
 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
+static bool __initdata __sched_schedstats = false;
 
-#ifdef CONFIG_SCHEDSTATS
 static void set_schedstats(bool enabled)
 {
        if (enabled)
@@ -2278,11 +2282,16 @@ static int __init setup_schedstats(char *str)
        if (!str)
                goto out;
 
+       /*
+        * This code is called before jump labels have been set up, so we can't
+        * change the static branch directly just yet.  Instead set a temporary
+        * variable so init_schedstats() can do it later.
+        */
        if (!strcmp(str, "enable")) {
-               set_schedstats(true);
+               __sched_schedstats = true;
                ret = 1;
        } else if (!strcmp(str, "disable")) {
-               set_schedstats(false);
+               __sched_schedstats = false;
                ret = 1;
        }
 out:
@@ -2293,6 +2302,11 @@ static int __init setup_schedstats(char *str)
 }
 __setup("schedstats=", setup_schedstats);
 
+static void __init init_schedstats(void)
+{
+       set_schedstats(__sched_schedstats);
+}
+
 #ifdef CONFIG_PROC_SYSCTL
 int sysctl_schedstats(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -2313,8 +2327,10 @@ int sysctl_schedstats(struct ctl_table *table, int write,
                set_schedstats(state);
        return err;
 }
-#endif
-#endif
+#endif /* CONFIG_PROC_SYSCTL */
+#else  /* !CONFIG_SCHEDSTATS */
+static inline void init_schedstats(void) {}
+#endif /* CONFIG_SCHEDSTATS */
 
 /*
  * fork()/clone()-time setup:
@@ -2521,10 +2537,9 @@ void wake_up_new_task(struct task_struct *p)
         */
        set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
 #endif
-       /* Post initialize new task's util average when its cfs_rq is set */
+       rq = __task_rq_lock(p, &rf);
        post_init_entity_util_avg(&p->se);
 
-       rq = __task_rq_lock(p, &rf);
        activate_task(rq, p, 0);
        p->on_rq = TASK_ON_RQ_QUEUED;
        trace_sched_wakeup_new(p);
@@ -3156,7 +3171,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
 static inline void schedule_debug(struct task_struct *prev)
 {
 #ifdef CONFIG_SCHED_STACK_END_CHECK
-       BUG_ON(task_stack_end_corrupted(prev));
+       if (task_stack_end_corrupted(prev))
+               panic("corrupted stack end detected inside scheduler\n");
 #endif
 
        if (unlikely(in_atomic_preempt_off())) {
@@ -5133,14 +5149,16 @@ void show_state_filter(unsigned long state_filter)
                /*
                 * reset the NMI-timeout, listing all files on a slow
                 * console might take a lot of time:
+                * Also, reset softlockup watchdogs on all CPUs, because
+                * another CPU might be blocked waiting for us to process
+                * an IPI.
                 */
                touch_nmi_watchdog();
+               touch_all_softlockup_watchdogs();
                if (!state_filter || (p->state & state_filter))
                        sched_show_task(p);
        }
 
-       touch_all_softlockup_watchdogs();
-
 #ifdef CONFIG_SCHED_DEBUG
        if (!state_filter)
                sysrq_sched_debug_show();
@@ -7487,6 +7505,8 @@ void __init sched_init(void)
 #endif
        init_sched_fair_class();
 
+       init_schedstats();
+
        scheduler_running = 1;
 }
 
index cf905f655ba120fd91d2dd023c6a5a93ad699d4b..0368c393a3362d981e79745716cbb59b2989dac8 100644 (file)
@@ -427,19 +427,12 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
                SPLIT_NS(p->se.vruntime),
                (long long)(p->nvcsw + p->nivcsw),
                p->prio);
-#ifdef CONFIG_SCHEDSTATS
-       if (schedstat_enabled()) {
-               SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
-                       SPLIT_NS(p->se.statistics.wait_sum),
-                       SPLIT_NS(p->se.sum_exec_runtime),
-                       SPLIT_NS(p->se.statistics.sum_sleep_runtime));
-       }
-#else
+
        SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
-               0LL, 0L,
+               SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)),
                SPLIT_NS(p->se.sum_exec_runtime),
-               0LL, 0L);
-#endif
+               SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime)));
+
 #ifdef CONFIG_NUMA_BALANCING
        SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
 #endif
index 218f8e83db731e4afe4d4aaf7d7919a588087a12..bdcbeea90c950523b5276cfd1d8464ceef2d4fd8 100644 (file)
@@ -2904,6 +2904,23 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
        }
 }
 
+/*
+ * Unsigned subtract and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define sub_positive(_ptr, _val) do {                          \
+       typeof(_ptr) ptr = (_ptr);                              \
+       typeof(*ptr) val = (_val);                              \
+       typeof(*ptr) res, var = READ_ONCE(*ptr);                \
+       res = var - val;                                        \
+       if (res > var)                                          \
+               res = 0;                                        \
+       WRITE_ONCE(*ptr, res);                                  \
+} while (0)
+
 /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
 static inline int
 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
@@ -2913,15 +2930,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
 
        if (atomic_long_read(&cfs_rq->removed_load_avg)) {
                s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
-               sa->load_avg = max_t(long, sa->load_avg - r, 0);
-               sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
+               sub_positive(&sa->load_avg, r);
+               sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
                removed_load = 1;
        }
 
        if (atomic_long_read(&cfs_rq->removed_util_avg)) {
                long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
-               sa->util_avg = max_t(long, sa->util_avg - r, 0);
-               sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
+               sub_positive(&sa->util_avg, r);
+               sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
                removed_util = 1;
        }
 
@@ -2994,10 +3011,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
                          &se->avg, se->on_rq * scale_load_down(se->load.weight),
                          cfs_rq->curr == se, NULL);
 
-       cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
-       cfs_rq->avg.load_sum = max_t(s64,  cfs_rq->avg.load_sum - se->avg.load_sum, 0);
-       cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
-       cfs_rq->avg.util_sum = max_t(s32,  cfs_rq->avg.util_sum - se->avg.util_sum, 0);
+       sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
+       sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
+       sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
+       sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
 
        cfs_rq_util_change(cfs_rq);
 }
@@ -3246,7 +3263,7 @@ static inline void check_schedstat_required(void)
                        trace_sched_stat_iowait_enabled()  ||
                        trace_sched_stat_blocked_enabled() ||
                        trace_sched_stat_runtime_enabled())  {
-               pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, "
+               printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
                             "stat_blocked and stat_runtime require the "
                             "kernel parameter schedstats=enabled or "
                             "kernel.sched_schedstats=1\n");
@@ -4185,6 +4202,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
        if (!cfs_bandwidth_used())
                return;
 
+       /* Synchronize hierarchical throttle counter: */
+       if (unlikely(!cfs_rq->throttle_uptodate)) {
+               struct rq *rq = rq_of(cfs_rq);
+               struct cfs_rq *pcfs_rq;
+               struct task_group *tg;
+
+               cfs_rq->throttle_uptodate = 1;
+
+               /* Get closest up-to-date node, because leaves go first: */
+               for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
+                       pcfs_rq = tg->cfs_rq[cpu_of(rq)];
+                       if (pcfs_rq->throttle_uptodate)
+                               break;
+               }
+               if (tg) {
+                       cfs_rq->throttle_count = pcfs_rq->throttle_count;
+                       cfs_rq->throttled_clock_task = rq_clock_task(rq);
+               }
+       }
+
        /* an active group must be handled by the update_curr()->put() path */
        if (!cfs_rq->runtime_enabled || cfs_rq->curr)
                return;
@@ -4500,15 +4537,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 
                /* Don't dequeue parent if it has other entities besides us */
                if (cfs_rq->load.weight) {
+                       /* Avoid re-evaluating load for this entity: */
+                       se = parent_entity(se);
                        /*
                         * Bias pick_next to pick a task from this cfs_rq, as
                         * p is sleeping when it is within its sched_slice.
                         */
-                       if (task_sleep && parent_entity(se))
-                               set_next_buddy(parent_entity(se));
-
-                       /* avoid re-evaluating load for this entity */
-                       se = parent_entity(se);
+                       if (task_sleep && se && !throttled_hierarchy(cfs_rq))
+                               set_next_buddy(se);
                        break;
                }
                flags |= DEQUEUE_SLEEP;
@@ -8496,8 +8532,9 @@ void free_fair_sched_group(struct task_group *tg)
 
 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
 {
-       struct cfs_rq *cfs_rq;
        struct sched_entity *se;
+       struct cfs_rq *cfs_rq;
+       struct rq *rq;
        int i;
 
        tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8512,6 +8549,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
        init_cfs_bandwidth(tg_cfs_bandwidth(tg));
 
        for_each_possible_cpu(i) {
+               rq = cpu_rq(i);
+
                cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
                                      GFP_KERNEL, cpu_to_node(i));
                if (!cfs_rq)
@@ -8525,7 +8564,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
                init_cfs_rq(cfs_rq);
                init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
                init_entity_runnable_average(se);
+
+               raw_spin_lock_irq(&rq->lock);
                post_init_entity_util_avg(se);
+               raw_spin_unlock_irq(&rq->lock);
        }
 
        return 1;
index bd12c6c714ecea0718565df4539304e3bf9d9e57..c5aeedf4e93ad8f8f5c2edaf7248ac0ce9c47e8e 100644 (file)
@@ -127,7 +127,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
  */
 static void cpuidle_idle_call(void)
 {
-       struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+       struct cpuidle_device *dev = cpuidle_get_device();
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
        int next_state, entered_state;
 
index 72f1f3087b04a72fc0afc9ff81d768018eb05391..7cbeb92a1cb9361102b8514f612ee81be87fef10 100644 (file)
@@ -437,7 +437,7 @@ struct cfs_rq {
 
        u64 throttled_clock, throttled_clock_task;
        u64 throttled_clock_task_time;
-       int throttled, throttle_count;
+       int throttled, throttle_count, throttle_uptodate;
        struct list_head throttled_list;
 #endif /* CONFIG_CFS_BANDWIDTH */
 #endif /* CONFIG_FAIR_GROUP_SCHED */
index 70b3b6a20fb0e362f4c816fe0f069c7c8576c7f4..78955cbea31c4a5378bc0b73f5f3bbcbdfa23d75 100644 (file)
@@ -33,6 +33,8 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
 # define schedstat_inc(rq, field)      do { if (schedstat_enabled()) { (rq)->field++; } } while (0)
 # define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0)
 # define schedstat_set(var, val)       do { if (schedstat_enabled()) { var = (val); } } while (0)
+# define schedstat_val(rq, field)      ((schedstat_enabled()) ? (rq)->field : 0)
+
 #else /* !CONFIG_SCHEDSTATS */
 static inline void
 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
@@ -47,6 +49,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 # define schedstat_inc(rq, field)      do { } while (0)
 # define schedstat_add(rq, field, amt) do { } while (0)
 # define schedstat_set(var, val)       do { } while (0)
+# define schedstat_val(rq, field)      0
 #endif
 
 #ifdef CONFIG_SCHED_INFO
index 780bcbe1d4de33bbb24d2328b231bb056bc641b5..720b7bb01d43ae5f49761da3a34d8fce4e55dbc2 100644 (file)
@@ -198,7 +198,7 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
        if (unlikely(index >= array->map.max_entries))
                return -E2BIG;
 
-       file = (struct file *)array->ptrs[index];
+       file = READ_ONCE(array->ptrs[index]);
        if (unlikely(!file))
                return -ENOENT;
 
@@ -247,7 +247,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
        if (unlikely(index >= array->map.max_entries))
                return -E2BIG;
 
-       file = (struct file *)array->ptrs[index];
+       file = READ_ONCE(array->ptrs[index]);
        if (unlikely(!file))
                return -ENOENT;
 
index f96f0383f6c6bfe40cba9226b97f8885723a4d9d..ad1d6164e94603cf29f1b26f0a58f8c37988d57b 100644 (file)
@@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
 static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
 {
        struct trace_bprintk_fmt *pos;
+
+       if (!fmt)
+               return ERR_PTR(-EINVAL);
+
        list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
                if (!strcmp(pos->fmt, fmt))
                        return pos;
@@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
        for (iter = start; iter < end; iter++) {
                struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
                if (tb_fmt) {
-                       *iter = tb_fmt->fmt;
+                       if (!IS_ERR(tb_fmt))
+                               *iter = tb_fmt->fmt;
                        continue;
                }
 
index 1427366ad6735acfd09ce47eb6aa2bed042b4b3c..79bfe0e06907ac91875dddc9a3fbcf579b0c4d51 100644 (file)
@@ -441,25 +441,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                /* Found a free page, break it into order-0 pages */
                isolated = split_free_page(page);
+               if (!isolated)
+                       break;
+
                total_isolated += isolated;
+               cc->nr_freepages += isolated;
                for (i = 0; i < isolated; i++) {
                        list_add(&page->lru, freelist);
                        page++;
                }
-
-               /* If a page was split, advance to the end of it */
-               if (isolated) {
-                       cc->nr_freepages += isolated;
-                       if (!strict &&
-                               cc->nr_migratepages <= cc->nr_freepages) {
-                               blockpfn += isolated;
-                               break;
-                       }
-
-                       blockpfn += isolated - 1;
-                       cursor += isolated - 1;
-                       continue;
+               if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
+                       blockpfn += isolated;
+                       break;
                }
+               /* Advance to the end of split page */
+               blockpfn += isolated - 1;
+               cursor += isolated - 1;
+               continue;
 
 isolate_fail:
                if (strict)
@@ -469,6 +467,9 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
        }
 
+       if (locked)
+               spin_unlock_irqrestore(&cc->zone->lock, flags);
+
        /*
         * There is a tiny chance that we have read bogus compound_order(),
         * so be careful to not go outside of the pageblock.
@@ -490,9 +491,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
        if (strict && blockpfn < end_pfn)
                total_isolated = 0;
 
-       if (locked)
-               spin_unlock_irqrestore(&cc->zone->lock, flags);
-
        /* Update the pageblock-skip if the whole pageblock was scanned */
        if (blockpfn == end_pfn)
                update_pageblock_skip(cc, valid_page, total_isolated, false);
@@ -1011,6 +1009,7 @@ static void isolate_freepages(struct compact_control *cc)
                                block_end_pfn = block_start_pfn,
                                block_start_pfn -= pageblock_nr_pages,
                                isolate_start_pfn = block_start_pfn) {
+               unsigned long isolated;
 
                /*
                 * This can iterate a massively long zone without finding any
@@ -1035,8 +1034,12 @@ static void isolate_freepages(struct compact_control *cc)
                        continue;
 
                /* Found a block suitable for isolating free pages from. */
-               isolate_freepages_block(cc, &isolate_start_pfn,
-                                       block_end_pfn, freelist, false);
+               isolated = isolate_freepages_block(cc, &isolate_start_pfn,
+                                               block_end_pfn, freelist, false);
+               /* If isolation failed early, do not continue needlessly */
+               if (!isolated && isolate_start_pfn < block_end_pfn &&
+                   cc->nr_migratepages > cc->nr_freepages)
+                       break;
 
                /*
                 * If we isolated enough freepages, or aborted due to async
index b8024fa7101d9a8ff625606de973b9b670d0ecf4..6c707bfe02fde002feae9ea2fab3fc9647c3baa0 100644 (file)
@@ -126,6 +126,17 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
                 */
                start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
                end_index = (endbyte >> PAGE_SHIFT);
+               if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
+                       /* First page is tricky as 0 - 1 = -1, but pgoff_t
+                        * is unsigned, so the end_index >= start_index
+                        * check below would be true and we'll discard the whole
+                        * file cache which is not what was asked.
+                        */
+                       if (end_index == 0)
+                               break;
+
+                       end_index--;
+               }
 
                if (end_index >= start_index) {
                        unsigned long count = invalidate_mapping_pages(mapping,
index 00ae878b2a3860ce5b4bfe41967430e781ebabc1..20f3b1f33f0e0f83486bba4df1c620f4ac8110f0 100644 (file)
@@ -2186,7 +2186,7 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
                if (file->f_ra.mmap_miss > 0)
                        file->f_ra.mmap_miss--;
                addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
-               do_set_pte(vma, addr, page, pte, false, false, true);
+               do_set_pte(vma, addr, page, pte, false, false);
                unlock_page(page);
                goto next;
 unlock:
index d26162e81feaa78b2fb4615839cbb580fc626f4b..c1f3c0be150a94f09588672ce44ade9047d78958 100644 (file)
@@ -832,8 +832,27 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
         * Only the process that called mmap() has reserves for
         * private mappings.
         */
-       if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
-               return true;
+       if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+               /*
+                * Like the shared case above, a hole punch or truncate
+                * could have been performed on the private mapping.
+                * Examine the value of chg to determine if reserves
+                * actually exist or were previously consumed.
+                * Very Subtle - The value of chg comes from a previous
+                * call to vma_needs_reserves().  The reserve map for
+                * private mappings has different (opposite) semantics
+                * than that of shared mappings.  vma_needs_reserves()
+                * has already taken this difference in semantics into
+                * account.  Therefore, the meaning of chg is the same
+                * as in the shared case above.  Code could easily be
+                * combined, but keeping it separate draws attention to
+                * subtle differences.
+                */
+               if (chg)
+                       return false;
+               else
+                       return true;
+       }
 
        return false;
 }
@@ -1011,6 +1030,7 @@ static void destroy_compound_gigantic_page(struct page *page,
        int nr_pages = 1 << order;
        struct page *p = page + 1;
 
+       atomic_set(compound_mapcount_ptr(page), 0);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                clear_compound_head(p);
                set_page_refcounted(p);
@@ -1816,6 +1836,25 @@ static long __vma_reservation_common(struct hstate *h,
 
        if (vma->vm_flags & VM_MAYSHARE)
                return ret;
+       else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
+               /*
+                * In most cases, reserves always exist for private mappings.
+                * However, a file associated with mapping could have been
+                * hole punched or truncated after reserves were consumed.
+                * As subsequent fault on such a range will not use reserves.
+                * Subtle - The reserve map for private mappings has the
+                * opposite meaning than that of shared mappings.  If NO
+                * entry is in the reserve map, it means a reservation exists.
+                * If an entry exists in the reserve map, it means the
+                * reservation has already been consumed.  As a result, the
+                * return value of this routine is the opposite of the
+                * value returned from reserve map manipulation routines above.
+                */
+               if (ret)
+                       return 0;
+               else
+                       return 1;
+       }
        else
                return ret < 0 ? ret : 0;
 }
@@ -4190,7 +4229,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
                if (saddr) {
                        spte = huge_pte_offset(svma->vm_mm, saddr);
                        if (spte) {
-                               mm_inc_nr_pmds(mm);
                                get_page(virt_to_page(spte));
                                break;
                        }
@@ -4205,9 +4243,9 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        if (pud_none(*pud)) {
                pud_populate(mm, pud,
                                (pmd_t *)((unsigned long)spte & PAGE_MASK));
+               mm_inc_nr_pmds(mm);
        } else {
                put_page(virt_to_page(spte));
-               mm_inc_nr_pmds(mm);
        }
        spin_unlock(ptl);
 out:
index a37e5b6f9d25ca1cfd31938eebee0f90054ed4de..2524ec880e242ceb6efb5531b21f4bade9e9a3c0 100644 (file)
@@ -24,7 +24,8 @@
  */
 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
                        __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
-                       __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
+                       __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
+                       __GFP_ATOMIC)
 
 /* The GFP flags allowed during early boot */
 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
index 18b6a2b8d183550daa6c3e64ae7ba50610ead79c..6845f9294696cedb9d6567bd09a7a86862bd5c3b 100644 (file)
@@ -508,7 +508,7 @@ void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
        kasan_kmalloc(cache, object, cache->object_size, flags);
 }
 
-void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
+static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
 {
        unsigned long size = cache->object_size;
        unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
@@ -626,7 +626,7 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags)
                kasan_kmalloc(page->slab_cache, object, size, flags);
 }
 
-void kasan_kfree(void *ptr)
+void kasan_poison_kfree(void *ptr)
 {
        struct page *page;
 
@@ -636,7 +636,7 @@ void kasan_kfree(void *ptr)
                kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
                                KASAN_FREE_PAGE);
        else
-               kasan_slab_free(page->slab_cache, ptr);
+               kasan_poison_slab_free(page->slab_cache, ptr);
 }
 
 void kasan_kfree_large(const void *ptr)
@@ -763,8 +763,8 @@ static int kasan_mem_notifier(struct notifier_block *nb,
 
 static int __init kasan_memhotplug_init(void)
 {
-       pr_err("WARNING: KASAN doesn't support memory hot-add\n");
-       pr_err("Memory hot-add will be disabled\n");
+       pr_info("WARNING: KASAN doesn't support memory hot-add\n");
+       pr_info("Memory hot-add will be disabled\n");
 
        hotplug_memory_notifier(kasan_mem_notifier, 0);
 
index e6429926e95741055fe1aa87a3082e6ed073f909..04320d3adbef9922edda52e43cc516463e9e33c6 100644 (file)
@@ -307,8 +307,10 @@ static void hex_dump_object(struct seq_file *seq,
        len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
 
        seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
+       kasan_disable_current();
        seq_hex_dump(seq, "    ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
                     HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
+       kasan_enable_current();
 }
 
 /*
index 58c69c94402a74498a488392eabeed15ddddd1ee..ac8664db38232f5ede345ee4b2f2f9ec0c5ac79d 100644 (file)
@@ -1608,7 +1608,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
 
 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 {
-       if (!current->memcg_may_oom || current->memcg_in_oom)
+       if (!current->memcg_may_oom)
                return;
        /*
         * We are in the middle of the charge context here, so we
@@ -4203,7 +4203,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        return &memcg->css;
 fail:
        mem_cgroup_free(memcg);
-       return NULL;
+       return ERR_PTR(-ENOMEM);
 }
 
 static int
@@ -5544,6 +5544,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
        struct mem_cgroup *memcg;
        unsigned int nr_pages;
        bool compound;
+       unsigned long flags;
 
        VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
        VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
@@ -5574,10 +5575,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
 
        commit_charge(newpage, memcg, false);
 
-       local_irq_disable();
+       local_irq_save(flags);
        mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
        memcg_check_events(memcg, newpage);
-       local_irq_enable();
+       local_irq_restore(flags);
 }
 
 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
index 15322b73636b4272d6c84af6742a0822822301db..cd1f29e4897e3e5207071ed9cccabc55f86e6a87 100644 (file)
@@ -2877,7 +2877,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
  * vm_ops->map_pages.
  */
 void do_set_pte(struct vm_area_struct *vma, unsigned long address,
-               struct page *page, pte_t *pte, bool write, bool anon, bool old)
+               struct page *page, pte_t *pte, bool write, bool anon)
 {
        pte_t entry;
 
@@ -2885,8 +2885,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
        entry = mk_pte(page, vma->vm_page_prot);
        if (write)
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-       if (old)
-               entry = pte_mkold(entry);
        if (anon) {
                inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
                page_add_new_anon_rmap(page, vma, address, false);
@@ -2900,16 +2898,8 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
        update_mmu_cache(vma, address, pte);
 }
 
-/*
- * If architecture emulates "accessed" or "young" bit without HW support,
- * there is no much gain with fault_around.
- */
 static unsigned long fault_around_bytes __read_mostly =
-#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-       PAGE_SIZE;
-#else
        rounddown_pow_of_two(65536);
-#endif
 
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
@@ -3032,20 +3022,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         */
        if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
                pte = pte_offset_map_lock(mm, pmd, address, &ptl);
-               if (!pte_same(*pte, orig_pte))
-                       goto unlock_out;
                do_fault_around(vma, address, pte, pgoff, flags);
-               /* Check if the fault is handled by faultaround */
-               if (!pte_same(*pte, orig_pte)) {
-                       /*
-                        * Faultaround produce old pte, but the pte we've
-                        * handler fault for should be young.
-                        */
-                       pte_t entry = pte_mkyoung(*pte);
-                       if (ptep_set_access_flags(vma, address, pte, entry, 0))
-                               update_mmu_cache(vma, address, pte);
+               if (!pte_same(*pte, orig_pte))
                        goto unlock_out;
-               }
                pte_unmap_unlock(pte, ptl);
        }
 
@@ -3060,7 +3039,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                put_page(fault_page);
                return ret;
        }
-       do_set_pte(vma, address, fault_page, pte, false, false, false);
+       do_set_pte(vma, address, fault_page, pte, false, false);
        unlock_page(fault_page);
 unlock_out:
        pte_unmap_unlock(pte, ptl);
@@ -3111,7 +3090,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                }
                goto uncharge_out;
        }
-       do_set_pte(vma, address, new_page, pte, true, true, false);
+       do_set_pte(vma, address, new_page, pte, true, true);
        mem_cgroup_commit_charge(new_page, memcg, false, false);
        lru_cache_add_active_or_unevictable(new_page, vma);
        pte_unmap_unlock(pte, ptl);
@@ -3164,7 +3143,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                put_page(fault_page);
                return ret;
        }
-       do_set_pte(vma, address, fault_page, pte, true, false, false);
+       do_set_pte(vma, address, fault_page, pte, true, false);
        pte_unmap_unlock(pte, ptl);
 
        if (set_page_dirty(fault_page))
index 9e075f829d0d2598f6d404cb99b342bb6596b23f..8f65464da5de84d8bca0a8d97352f3c800f90614 100644 (file)
@@ -104,20 +104,16 @@ static inline void poison_element(mempool_t *pool, void *element)
 
 static void kasan_poison_element(mempool_t *pool, void *element)
 {
-       if (pool->alloc == mempool_alloc_slab)
-               kasan_poison_slab_free(pool->pool_data, element);
-       if (pool->alloc == mempool_kmalloc)
-               kasan_kfree(element);
+       if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+               kasan_poison_kfree(element);
        if (pool->alloc == mempool_alloc_pages)
                kasan_free_pages(element, (unsigned long)pool->pool_data);
 }
 
 static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
 {
-       if (pool->alloc == mempool_alloc_slab)
-               kasan_slab_alloc(pool->pool_data, element, flags);
-       if (pool->alloc == mempool_kmalloc)
-               kasan_krealloc(element, (size_t)pool->pool_data, flags);
+       if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+               kasan_unpoison_slab(element);
        if (pool->alloc == mempool_alloc_pages)
                kasan_alloc_pages(element, (unsigned long)pool->pool_data);
 }
index 9baf41c877ffb98595aac0834b8aa43aedd6dc76..bd3fdc202e8be19411d641dcfdf5addd8c358944 100644 (file)
@@ -431,6 +431,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
 
        return MIGRATEPAGE_SUCCESS;
 }
+EXPORT_SYMBOL(migrate_page_move_mapping);
 
 /*
  * The expected number of remaining references is the same as that
@@ -586,6 +587,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
 
        mem_cgroup_migrate(page, newpage);
 }
+EXPORT_SYMBOL(migrate_page_copy);
 
 /************************************************************
  *                    Migration functions
index acbc432d1a52778d746879d652fd4c2001075868..ddf74487f8487f0d109416f93a433ba58117426e 100644 (file)
@@ -474,13 +474,8 @@ static bool __oom_reap_task(struct task_struct *tsk)
        p = find_lock_task_mm(tsk);
        if (!p)
                goto unlock_oom;
-
        mm = p->mm;
-       if (!atomic_inc_not_zero(&mm->mm_users)) {
-               task_unlock(p);
-               goto unlock_oom;
-       }
-
+       atomic_inc(&mm->mm_users);
        task_unlock(p);
 
        if (!down_read_trylock(&mm->mmap_sem)) {
index b9956fdee8f5d0ed55713eef51e13a6ea42b2e0d..e2481949494c4e6bf4cd0fd0799d178bd2203008 100644 (file)
@@ -373,8 +373,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
        struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
        unsigned long bytes = vm_dirty_bytes;
        unsigned long bg_bytes = dirty_background_bytes;
-       unsigned long ratio = vm_dirty_ratio;
-       unsigned long bg_ratio = dirty_background_ratio;
+       /* convert ratios to per-PAGE_SIZE for higher precision */
+       unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
+       unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
        unsigned long thresh;
        unsigned long bg_thresh;
        struct task_struct *tsk;
@@ -386,26 +387,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
                /*
                 * The byte settings can't be applied directly to memcg
                 * domains.  Convert them to ratios by scaling against
-                * globally available memory.
+                * globally available memory.  As the ratios are in
+                * per-PAGE_SIZE, they can be obtained by dividing bytes by
+                * number of pages.
                 */
                if (bytes)
-                       ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
-                                   global_avail, 100UL);
+                       ratio = min(DIV_ROUND_UP(bytes, global_avail),
+                                   PAGE_SIZE);
                if (bg_bytes)
-                       bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
-                                      global_avail, 100UL);
+                       bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
+                                      PAGE_SIZE);
                bytes = bg_bytes = 0;
        }
 
        if (bytes)
                thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
        else
-               thresh = (ratio * available_memory) / 100;
+               thresh = (ratio * available_memory) / PAGE_SIZE;
 
        if (bg_bytes)
                bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
        else
-               bg_thresh = (bg_ratio * available_memory) / 100;
+               bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 
        if (bg_thresh >= thresh)
                bg_thresh = thresh / 2;
index c6cda3e36212833f78189c3a93b7dc80f8bc7ec9..fedeba88c9cb82acdbc5bcf5c6efab63279607d2 100644 (file)
@@ -207,13 +207,15 @@ void __dump_page_owner(struct page *page)
                .nr_entries = page_ext->nr_entries,
                .entries = &page_ext->trace_entries[0],
        };
-       gfp_t gfp_mask = page_ext->gfp_mask;
-       int mt = gfpflags_to_migratetype(gfp_mask);
+       gfp_t gfp_mask;
+       int mt;
 
        if (unlikely(!page_ext)) {
                pr_alert("There is not page extension available.\n");
                return;
        }
+       gfp_mask = page_ext->gfp_mask;
+       mt = gfpflags_to_migratetype(gfp_mask);
 
        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
                pr_alert("page_owner info is not active (free page?)\n");
index 0c59684f1ff2ef5cdf3cd7ab688442e5afaf014a..9903830aaebbf00256fbb108891c2e5e09baecac 100644 (file)
@@ -112,7 +112,7 @@ struct pcpu_chunk {
        int                     map_used;       /* # of map entries used before the sentry */
        int                     map_alloc;      /* # of map entries allocated */
        int                     *map;           /* allocation map */
-       struct work_struct      map_extend_work;/* async ->map[] extension */
+       struct list_head        map_extend_list;/* on pcpu_map_extend_chunks */
 
        void                    *data;          /* chunk data */
        int                     first_free;     /* no free below this */
@@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
 static int pcpu_reserved_chunk_limit;
 
 static DEFINE_SPINLOCK(pcpu_lock);     /* all internal data structures */
-static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
+static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
 
 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
 
+/* chunks which need their map areas extended, protected by pcpu_lock */
+static LIST_HEAD(pcpu_map_extend_chunks);
+
 /*
  * The number of empty populated pages, protected by pcpu_lock.  The
  * reserved chunk doesn't contribute to the count.
@@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
 {
        int margin, new_alloc;
 
+       lockdep_assert_held(&pcpu_lock);
+
        if (is_atomic) {
                margin = 3;
 
                if (chunk->map_alloc <
-                   chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
-                   pcpu_async_enabled)
-                       schedule_work(&chunk->map_extend_work);
+                   chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
+                       if (list_empty(&chunk->map_extend_list)) {
+                               list_add_tail(&chunk->map_extend_list,
+                                             &pcpu_map_extend_chunks);
+                               pcpu_schedule_balance_work();
+                       }
+               }
        } else {
                margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
        }
@@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
        size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
        unsigned long flags;
 
+       lockdep_assert_held(&pcpu_alloc_mutex);
+
        new = pcpu_mem_zalloc(new_size);
        if (!new)
                return -ENOMEM;
@@ -467,20 +478,6 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
        return 0;
 }
 
-static void pcpu_map_extend_workfn(struct work_struct *work)
-{
-       struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
-                                               map_extend_work);
-       int new_alloc;
-
-       spin_lock_irq(&pcpu_lock);
-       new_alloc = pcpu_need_to_extend(chunk, false);
-       spin_unlock_irq(&pcpu_lock);
-
-       if (new_alloc)
-               pcpu_extend_area_map(chunk, new_alloc);
-}
-
 /**
  * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
  * @chunk: chunk the candidate area belongs to
@@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
        chunk->map_used = 1;
 
        INIT_LIST_HEAD(&chunk->list);
-       INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
+       INIT_LIST_HEAD(&chunk->map_extend_list);
        chunk->free_size = pcpu_unit_size;
        chunk->contig_hint = pcpu_unit_size;
 
@@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
                return NULL;
        }
 
+       if (!is_atomic)
+               mutex_lock(&pcpu_alloc_mutex);
+
        spin_lock_irqsave(&pcpu_lock, flags);
 
        /* serve reserved allocations from the reserved chunk if available */
@@ -967,12 +967,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
        if (is_atomic)
                goto fail;
 
-       mutex_lock(&pcpu_alloc_mutex);
-
        if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
                chunk = pcpu_create_chunk();
                if (!chunk) {
-                       mutex_unlock(&pcpu_alloc_mutex);
                        err = "failed to allocate new chunk";
                        goto fail;
                }
@@ -983,7 +980,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
                spin_lock_irqsave(&pcpu_lock, flags);
        }
 
-       mutex_unlock(&pcpu_alloc_mutex);
        goto restart;
 
 area_found:
@@ -993,8 +989,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
        if (!is_atomic) {
                int page_start, page_end, rs, re;
 
-               mutex_lock(&pcpu_alloc_mutex);
-
                page_start = PFN_DOWN(off);
                page_end = PFN_UP(off + size);
 
@@ -1005,7 +999,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
 
                        spin_lock_irqsave(&pcpu_lock, flags);
                        if (ret) {
-                               mutex_unlock(&pcpu_alloc_mutex);
                                pcpu_free_area(chunk, off, &occ_pages);
                                err = "failed to populate";
                                goto fail_unlock;
@@ -1045,6 +1038,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
                /* see the flag handling in pcpu_blance_workfn() */
                pcpu_atomic_alloc_failed = true;
                pcpu_schedule_balance_work();
+       } else {
+               mutex_unlock(&pcpu_alloc_mutex);
        }
        return NULL;
 }
@@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
                if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
                        continue;
 
+               list_del_init(&chunk->map_extend_list);
                list_move(&chunk->list, &to_free);
        }
 
@@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
                pcpu_destroy_chunk(chunk);
        }
 
+       /* service chunks which requested async area map extension */
+       do {
+               int new_alloc = 0;
+
+               spin_lock_irq(&pcpu_lock);
+
+               chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
+                                       struct pcpu_chunk, map_extend_list);
+               if (chunk) {
+                       list_del_init(&chunk->map_extend_list);
+                       new_alloc = pcpu_need_to_extend(chunk, false);
+               }
+
+               spin_unlock_irq(&pcpu_lock);
+
+               if (new_alloc)
+                       pcpu_extend_area_map(chunk, new_alloc);
+       } while (chunk);
+
        /*
         * Ensure there are certain number of free populated pages for
         * atomic allocs.  Fill up from the most packed so that atomic
@@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
         */
        schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
        INIT_LIST_HEAD(&schunk->list);
-       INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
+       INIT_LIST_HEAD(&schunk->map_extend_list);
        schunk->base_addr = base_addr;
        schunk->map = smap;
        schunk->map_alloc = ARRAY_SIZE(smap);
@@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
        if (dyn_size) {
                dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
                INIT_LIST_HEAD(&dchunk->list);
-               INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
+               INIT_LIST_HEAD(&dchunk->map_extend_list);
                dchunk->base_addr = base_addr;
                dchunk->map = dmap;
                dchunk->map_alloc = ARRAY_SIZE(dmap);
index a36144909b2840213c087f3eb32fbd10d7c880ee..24463b67b6efa5817e7c1e806d1bf6337d300ba2 100644 (file)
@@ -2227,7 +2227,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                        /* Remove the !PageUptodate pages we added */
                        shmem_undo_range(inode,
                                (loff_t)start << PAGE_SHIFT,
-                               (loff_t)index << PAGE_SHIFT, true);
+                               ((loff_t)index << PAGE_SHIFT) - 1, true);
                        goto undone;
                }
 
index 95916142fc46d3a80a6ddfa54218e1de641f6045..90530ff8ed161864de461f208b8c511c369a167a 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -242,7 +242,7 @@ void rotate_reclaimable_page(struct page *page)
                get_page(page);
                local_irq_save(flags);
                pvec = this_cpu_ptr(&lru_rotate_pvecs);
-               if (!pagevec_add(pvec, page))
+               if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_move_tail(pvec);
                local_irq_restore(flags);
        }
@@ -296,7 +296,7 @@ void activate_page(struct page *page)
                struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 
                get_page(page);
-               if (!pagevec_add(pvec, page))
+               if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, __activate_page, NULL);
                put_cpu_var(activate_page_pvecs);
        }
@@ -391,9 +391,8 @@ static void __lru_cache_add(struct page *page)
        struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
 
        get_page(page);
-       if (!pagevec_space(pvec))
+       if (!pagevec_add(pvec, page) || PageCompound(page))
                __pagevec_lru_add(pvec);
-       pagevec_add(pvec, page);
        put_cpu_var(lru_add_pvec);
 }
 
@@ -628,7 +627,7 @@ void deactivate_file_page(struct page *page)
        if (likely(get_page_unless_zero(page))) {
                struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
 
-               if (!pagevec_add(pvec, page))
+               if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
                put_cpu_var(lru_deactivate_file_pvecs);
        }
@@ -648,7 +647,7 @@ void deactivate_page(struct page *page)
                struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
 
                get_page(page);
-               if (!pagevec_add(pvec, page))
+               if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
                put_cpu_var(lru_deactivate_pvecs);
        }
@@ -667,6 +666,24 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
 
 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 
+/*
+ * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
+ * workqueue, aiding in getting memory freed.
+ */
+static struct workqueue_struct *lru_add_drain_wq;
+
+static int __init lru_init(void)
+{
+       lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
+
+       if (WARN(!lru_add_drain_wq,
+               "Failed to create workqueue lru_add_drain_wq"))
+               return -ENOMEM;
+
+       return 0;
+}
+early_initcall(lru_init);
+
 void lru_add_drain_all(void)
 {
        static DEFINE_MUTEX(lock);
@@ -686,7 +703,7 @@ void lru_add_drain_all(void)
                    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
                    need_activate_page_drain(cpu)) {
                        INIT_WORK(work, lru_add_drain_per_cpu);
-                       schedule_work_on(cpu, work);
+                       queue_work_on(cpu, lru_add_drain_wq, work);
                        cpumask_set_cpu(cpu, &has_work);
                }
        }
index 0d457e7db8d6df5e3bd185ac2b2333830bccf7b4..c99463ac02fb56d270fdc4ea7ab749e1f58fe666 100644 (file)
@@ -252,7 +252,10 @@ static inline void free_swap_cache(struct page *page)
 void free_page_and_swap_cache(struct page *page)
 {
        free_swap_cache(page);
-       put_page(page);
+       if (is_huge_zero_page(page))
+               put_huge_zero_page();
+       else
+               put_page(page);
 }
 
 /*
index dcea4f4c62b3b43d701fb68a74a6aa1a1e0aa8cf..c18080ad408572f53df75e18e2b56f714f784edb 100644 (file)
@@ -279,6 +279,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
         * change from under us.
         */
        list_for_each_entry(v, &vg->vlan_list, vlist) {
+               if (!br_vlan_should_use(v))
+                       continue;
                f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
                if (f && f->is_local && !f->dst)
                        fdb_delete_local(br, NULL, f);
index 5cfd26a0006f07d15ee62a5660e0177745f454cf..1cd2ec046164a659d7e4ad1b1acc24eceb780a6e 100644 (file)
@@ -309,8 +309,8 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
        __scm_destroy(scm);
 }
 
-static int do_set_attach_filter(struct socket *sock, int level, int optname,
-                               char __user *optval, unsigned int optlen)
+/* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */
+struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval)
 {
        struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
        struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
@@ -323,6 +323,19 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
            __get_user(ptr, &fprog32->filter) ||
            __put_user(len, &kfprog->len) ||
            __put_user(compat_ptr(ptr), &kfprog->filter))
+               return NULL;
+
+       return kfprog;
+}
+EXPORT_SYMBOL_GPL(get_compat_bpf_fprog);
+
+static int do_set_attach_filter(struct socket *sock, int level, int optname,
+                               char __user *optval, unsigned int optlen)
+{
+       struct sock_fprog __user *kfprog;
+
+       kfprog = get_compat_bpf_fprog(optval);
+       if (!kfprog)
                return -EFAULT;
 
        return sock_setsockopt(sock, level, optname, (char __user *)kfprog,
@@ -354,7 +367,8 @@ static int do_set_sock_timeout(struct socket *sock, int level,
 static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
                                char __user *optval, unsigned int optlen)
 {
-       if (optname == SO_ATTACH_FILTER)
+       if (optname == SO_ATTACH_FILTER ||
+           optname == SO_ATTACH_REUSEPORT_CBPF)
                return do_set_attach_filter(sock, level, optname,
                                            optval, optlen);
        if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
index f96ee8b9478d8a752e78e1ca39c2351f5863f054..be873e4e312572516a2ab47cdac6053efd3459af 100644 (file)
@@ -47,6 +47,7 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
  * @xstats_type: TLV type for backward compatibility xstats TLV
  * @lock: statistics lock
  * @d: dumping handle
+ * @padattr: padding attribute
  *
  * Initializes the dumping handle, grabs the statistic lock and appends
  * an empty TLV header to the socket buffer for use a container for all
@@ -87,6 +88,7 @@ EXPORT_SYMBOL(gnet_stats_start_copy_compat);
  * @type: TLV type for top level statistic TLV
  * @lock: statistics lock
  * @d: dumping handle
+ * @padattr: padding attribute
  *
  * Initializes the dumping handle, grabs the statistic lock and appends
  * an empty TLV header to the socket buffer for use a container for all
index 2b3f76fe65f49f8018c63aa44b345262fb40cde7..7a0b616557abb6570dbc86a0a7cf04a50a83214b 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/jiffies.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
+#include <linux/of_net.h>
 
 #include "net-sysfs.h"
 
index d56c0559b477cb96ce78c9b9b7dacc3109594f3a..0ff31d97d48586af00c6973d123576e040ed422a 100644 (file)
@@ -1618,12 +1618,12 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                }
        }
 
-       if (rcu_access_pointer(sk->sk_filter)) {
-               if (udp_lib_checksum_complete(skb))
+       if (rcu_access_pointer(sk->sk_filter) &&
+           udp_lib_checksum_complete(skb))
                        goto csum_error;
-               if (sk_filter(sk, skb))
-                       goto drop;
-       }
+
+       if (sk_filter(sk, skb))
+               goto drop;
 
        udp_csum_pull_header(skb);
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
index f4ac2842d4d9543d2ac1e7d4b91b99df32d43e6d..fdc9de276ab1aeecfbc917e8718953d0aa0691ee 100644 (file)
@@ -1256,6 +1256,8 @@ static int ip6gre_tap_init(struct net_device *dev)
        if (ret)
                return ret;
 
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
        tunnel = netdev_priv(dev);
 
        ip6gre_tnl_link_config(tunnel, 1);
@@ -1289,6 +1291,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
 
        dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 }
 
 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
index cbf127ae7c676650cc626cbf12cd61b6b570ea43..635b8d340cdbbbce62a173ed4016b6c5c81fc6b9 100644 (file)
@@ -1071,17 +1071,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
                                         const struct in6_addr *final_dst)
 {
        struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
-       int err;
 
        dst = ip6_sk_dst_check(sk, dst, fl6);
+       if (!dst)
+               dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
 
-       err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
-       if (err)
-               return ERR_PTR(err);
-       if (final_dst)
-               fl6->daddr = *final_dst;
-
-       return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+       return dst;
 }
 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
 
index 6989c70ae29f1541e0216d36d58ee142934a83c2..4a84b5ad9ecbb74b29ef509d00fbda60868833cd 100644 (file)
@@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
        fl6.daddr = *gw;
        fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
                        (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
+       fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
        dst = ip6_route_output(net, NULL, &fl6);
        if (dst->error) {
                dst_release(dst);
index 79e33e02f11accfd5a6c8cf2751df0a8793ab016..f36c2d076fce0df30d202d7683a67e3614d77fe9 100644 (file)
@@ -1721,7 +1721,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
        destp = ntohs(inet->inet_dport);
        srcp  = ntohs(inet->inet_sport);
 
-       if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
+       if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
+           icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+           icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                timer_active    = 1;
                timer_expires   = icsk->icsk_timeout;
        } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
index 2da1896af93496cc58762c67b4cd4b4f42924901..f421c9f23c5bef7bf58937635474713cf4722516 100644 (file)
@@ -653,12 +653,12 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                }
        }
 
-       if (rcu_access_pointer(sk->sk_filter)) {
-               if (udp_lib_checksum_complete(skb))
-                       goto csum_error;
-               if (sk_filter(sk, skb))
-                       goto drop;
-       }
+       if (rcu_access_pointer(sk->sk_filter) &&
+           udp_lib_checksum_complete(skb))
+               goto csum_error;
+
+       if (sk_filter(sk, skb))
+               goto drop;
 
        udp_csum_pull_header(skb);
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
index 6edfa99803148815e383eb13ce6a9c1eb098058d..1e40dacaa137f00df3df79c0faf4eb4de65e86c2 100644 (file)
@@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
        tunnel->encap = encap;
        if (encap == L2TP_ENCAPTYPE_UDP) {
-               struct udp_tunnel_sock_cfg udp_cfg;
+               struct udp_tunnel_sock_cfg udp_cfg = { };
 
                udp_cfg.sk_user_data = tunnel;
                udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
index 4c6404e1ad6e683da649f507829c19b232fcd417..21b1fdf5d01d50d751ac6c3cf86670ecf98850be 100644 (file)
@@ -161,6 +161,10 @@ void mesh_sta_cleanup(struct sta_info *sta)
                del_timer_sync(&sta->mesh->plink_timer);
        }
 
+       /* make sure no readers can access nexthop sta from here on */
+       mesh_path_flush_by_nexthop(sta);
+       synchronize_net();
+
        if (changed)
                ieee80211_mbss_info_change_notify(sdata, changed);
 }
index c8b8ccc370eb70c75ab43a8e013e60c1f280debd..78b0ef32ddddd6768377bf49da5acdfabf7e762f 100644 (file)
@@ -280,7 +280,7 @@ struct ieee80211_fast_tx {
        u8 sa_offs, da_offs, pn_offs;
        u8 band;
        u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
-              sizeof(rfc1042_header)];
+              sizeof(rfc1042_header)] __aligned(2);
 
        struct rcu_head rcu_head;
 };
index 2cb3c626cd4307a51e06b9f0bf5c7b92cdb961a9..096a45103f14cbbaccc07b7574b9fcfc7e55ff92 100644 (file)
@@ -762,7 +762,7 @@ static int expire_quiescent_template(struct netns_ipvs *ipvs,
  *     If available, return 1, otherwise invalidate this connection
  *     template and return 0.
  */
-int ip_vs_check_template(struct ip_vs_conn *ct)
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest)
 {
        struct ip_vs_dest *dest = ct->dest;
        struct netns_ipvs *ipvs = ct->ipvs;
@@ -772,7 +772,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
         */
        if ((dest == NULL) ||
            !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
-           expire_quiescent_template(ipvs, dest)) {
+           expire_quiescent_template(ipvs, dest) ||
+           (cdest && (dest != cdest))) {
                IP_VS_DBG_BUF(9, "check_template: dest not available for "
                              "protocol %s s:%s:%d v:%s:%d "
                              "-> d:%s:%d\n",
index 1207f20d24e4a25050363e2d6b9e30cc6aaf03b5..2c1b498a7a271df0f6e3ffa86407b89ba103e9d9 100644 (file)
@@ -321,7 +321,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
 
        /* Check if a template already exists */
        ct = ip_vs_ct_in_get(&param);
-       if (!ct || !ip_vs_check_template(ct)) {
+       if (!ct || !ip_vs_check_template(ct, NULL)) {
                struct ip_vs_scheduler *sched;
 
                /*
@@ -1154,7 +1154,8 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
                                                  vport, &param) < 0)
                        return NULL;
                ct = ip_vs_ct_in_get(&param);
-               if (!ct) {
+               /* check if template exists and points to the same dest */
+               if (!ct || !ip_vs_check_template(ct, dest)) {
                        ct = ip_vs_conn_new(&param, dest->af, daddr, dport,
                                            IP_VS_CONN_F_TEMPLATE, dest, 0);
                        if (!ct) {
index 883c691ec8d05724d8a63734bba9a3a30ae06eb2..19efeba02abb0e8db4d4b9b0f7827d2d3808c69b 100644 (file)
@@ -632,6 +632,7 @@ static int __init nf_conntrack_ftp_init(void)
                        if (ret) {
                                pr_err("failed to register helper for pf: %d port: %d\n",
                                       ftp[i][j].tuple.src.l3num, ports[i]);
+                               ports_c = i;
                                nf_conntrack_ftp_fini();
                                return ret;
                        }
index f703adb7e5f7166ca47380aedd8ad0074e361ee4..196cb39649e1ec186108acfc417ae2d7bf8a0bb2 100644 (file)
@@ -361,9 +361,10 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_log);
 
 int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
 {
-       int ret = 0;
-       struct nf_conntrack_helper *cur;
+       struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
        unsigned int h = helper_hash(&me->tuple);
+       struct nf_conntrack_helper *cur;
+       int ret = 0;
 
        BUG_ON(me->expect_policy == NULL);
        BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
@@ -371,9 +372,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
 
        mutex_lock(&nf_ct_helper_mutex);
        hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
-               if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
-                   cur->tuple.src.l3num == me->tuple.src.l3num &&
-                   cur->tuple.dst.protonum == me->tuple.dst.protonum) {
+               if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) {
                        ret = -EEXIST;
                        goto out;
                }
index 8b6da27196005f61d2269d8af92124096c85047e..f97ac61d2536aac5d0c73d6e86b1a6df36bc60fd 100644 (file)
@@ -271,6 +271,7 @@ static int __init nf_conntrack_irc_init(void)
                if (ret) {
                        pr_err("failed to register helper for pf: %u port: %u\n",
                               irc[i].tuple.src.l3num, ports[i]);
+                       ports_c = i;
                        nf_conntrack_irc_fini();
                        return ret;
                }
index 7523a575f6d16ded0c5724156cfe280598c5c505..3fcbaab83b3d34c728c2e55f55746156e78d2d89 100644 (file)
@@ -223,6 +223,7 @@ static int __init nf_conntrack_sane_init(void)
                        if (ret) {
                                pr_err("failed to register helper for pf: %d port: %d\n",
                                       sane[i][j].tuple.src.l3num, ports[i]);
+                               ports_c = i;
                                nf_conntrack_sane_fini();
                                return ret;
                        }
index 3e06402739e0acfd946fc8de04116aef0feea4ad..f72ba5587588fdaabb02232cbb33a0500ee2d422 100644 (file)
@@ -1669,6 +1669,7 @@ static int __init nf_conntrack_sip_init(void)
                        if (ret) {
                                pr_err("failed to register helper for pf: %u port: %u\n",
                                       sip[i][j].tuple.src.l3num, ports[i]);
+                               ports_c = i;
                                nf_conntrack_sip_fini();
                                return ret;
                        }
index f87e84ebcec3ac78cc4b941ec8828fab8c623f55..c026c472ea80d8dae3b57e6486c62d56903bca4e 100644 (file)
@@ -487,8 +487,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
        { }
 };
 
-#define NET_NF_CONNTRACK_MAX 2089
-
 static struct ctl_table nf_ct_netfilter_table[] = {
        {
                .procname       = "nf_conntrack_max",
index 36f9640664615bee6a640e11f4f18d1459952a81..2e65b5430fba6d09fa060aef44099360c033096c 100644 (file)
@@ -142,6 +142,7 @@ static int __init nf_conntrack_tftp_init(void)
                        if (ret) {
                                pr_err("failed to register helper for pf: %u port: %u\n",
                                       tftp[i][j].tuple.src.l3num, ports[i]);
+                               ports_c = i;
                                nf_conntrack_tftp_fini();
                                return ret;
                        }
index 5baa8e24e6ac1b512250c676cf0caf115f0b17f9..b19ad20a705ca7845ad21a5d272accba190f3f38 100644 (file)
  * Once the queue is registered it must reinject all packets it
  * receives, no matter what.
  */
-static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
 
 /* return EBUSY when somebody else is registered, return EEXIST if the
  * same handler is registered, return 0 in case of success. */
-void nf_register_queue_handler(const struct nf_queue_handler *qh)
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
 {
        /* should never happen, we only have one queueing backend in kernel */
-       WARN_ON(rcu_access_pointer(queue_handler));
-       rcu_assign_pointer(queue_handler, qh);
+       WARN_ON(rcu_access_pointer(net->nf.queue_handler));
+       rcu_assign_pointer(net->nf.queue_handler, qh);
 }
 EXPORT_SYMBOL(nf_register_queue_handler);
 
 /* The caller must flush their queue before this */
-void nf_unregister_queue_handler(void)
+void nf_unregister_queue_handler(struct net *net)
 {
-       RCU_INIT_POINTER(queue_handler, NULL);
-       synchronize_rcu();
+       RCU_INIT_POINTER(net->nf.queue_handler, NULL);
 }
 EXPORT_SYMBOL(nf_unregister_queue_handler);
 
@@ -103,7 +101,7 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
        const struct nf_queue_handler *qh;
 
        rcu_read_lock();
-       qh = rcu_dereference(queue_handler);
+       qh = rcu_dereference(net->nf.queue_handler);
        if (qh)
                qh->nf_hook_drop(net, ops);
        rcu_read_unlock();
@@ -122,9 +120,10 @@ int nf_queue(struct sk_buff *skb,
        struct nf_queue_entry *entry = NULL;
        const struct nf_afinfo *afinfo;
        const struct nf_queue_handler *qh;
+       struct net *net = state->net;
 
        /* QUEUE == DROP if no one is waiting, to be safe. */
-       qh = rcu_dereference(queue_handler);
+       qh = rcu_dereference(net->nf.queue_handler);
        if (!qh) {
                status = -ESRCH;
                goto err;
index 4d292b933b5c5c83d205f844f1a876ebefca708c..7b7aa871a174fa2a1fd1e002fe0838231035e305 100644 (file)
@@ -2647,6 +2647,8 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
        /* Only accept unspec with dump */
        if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
                return -EAFNOSUPPORT;
+       if (!nla[NFTA_SET_TABLE])
+               return -EINVAL;
 
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
        if (IS_ERR(set))
index aa93877ab6e2629bc31791135ace6ef474abd35e..5d36a0926b4a4859304fdd1808b428c87a2ee8c4 100644 (file)
@@ -557,7 +557,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 
        if (entskb->tstamp.tv64) {
                struct nfqnl_msg_packet_timestamp ts;
-               struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
+               struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
 
                ts.sec = cpu_to_be64(kts.tv_sec);
                ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
@@ -1482,21 +1482,29 @@ static int __net_init nfnl_queue_net_init(struct net *net)
                         net->nf.proc_netfilter, &nfqnl_file_ops))
                return -ENOMEM;
 #endif
+       nf_register_queue_handler(net, &nfqh);
        return 0;
 }
 
 static void __net_exit nfnl_queue_net_exit(struct net *net)
 {
+       nf_unregister_queue_handler(net);
 #ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
 #endif
 }
 
+static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
+{
+       synchronize_rcu();
+}
+
 static struct pernet_operations nfnl_queue_net_ops = {
-       .init   = nfnl_queue_net_init,
-       .exit   = nfnl_queue_net_exit,
-       .id     = &nfnl_queue_net_id,
-       .size   = sizeof(struct nfnl_queue_net),
+       .init           = nfnl_queue_net_init,
+       .exit           = nfnl_queue_net_exit,
+       .exit_batch     = nfnl_queue_net_exit_batch,
+       .id             = &nfnl_queue_net_id,
+       .size           = sizeof(struct nfnl_queue_net),
 };
 
 static int __init nfnetlink_queue_init(void)
@@ -1517,7 +1525,6 @@ static int __init nfnetlink_queue_init(void)
        }
 
        register_netdevice_notifier(&nfqnl_dev_notifier);
-       nf_register_queue_handler(&nfqh);
        return status;
 
 cleanup_netlink_notifier:
@@ -1529,7 +1536,6 @@ static int __init nfnetlink_queue_init(void)
 
 static void __exit nfnetlink_queue_fini(void)
 {
-       nf_unregister_queue_handler();
        unregister_netdevice_notifier(&nfqnl_dev_notifier);
        nfnetlink_subsys_unregister(&nfqnl_subsys);
        netlink_unregister_notifier(&nfqnl_rtnl_notifier);
index c69c892231d7b6b8699f20913847f64f1a758a42..2675d580c490b3de07562ce9174a0b4f69d55035 100644 (file)
@@ -612,7 +612,7 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems,
                return -EINVAL;
 
        if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
-           target_offset + sizeof(struct compat_xt_standard_target) != next_offset)
+           COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
                return -EINVAL;
 
        /* compat_xt_entry match has less strict aligment requirements,
@@ -694,7 +694,7 @@ int xt_check_entry_offsets(const void *base,
                return -EINVAL;
 
        if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
-           target_offset + sizeof(struct xt_standard_target) != next_offset)
+           XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
                return -EINVAL;
 
        return xt_check_entry_match(elems, base + target_offset,
index 4040eb92d9c9dc3b1ce0ba41227b6b1e9bd32eff..9bff6ef16fa7632fcfc05f23dd696d75ead6d5e8 100644 (file)
@@ -93,6 +93,7 @@
 #include <net/inet_common.h>
 #endif
 #include <linux/bpf.h>
+#include <net/compat.h>
 
 #include "internal.h"
 
@@ -3940,6 +3941,27 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
 }
 
 
+#ifdef CONFIG_COMPAT
+static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
+                                   char __user *optval, unsigned int optlen)
+{
+       struct packet_sock *po = pkt_sk(sock->sk);
+
+       if (level != SOL_PACKET)
+               return -ENOPROTOOPT;
+
+       if (optname == PACKET_FANOUT_DATA &&
+           po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
+               optval = (char __user *)get_compat_bpf_fprog(optval);
+               if (!optval)
+                       return -EFAULT;
+               optlen = sizeof(struct sock_fprog);
+       }
+
+       return packet_setsockopt(sock, level, optname, optval, optlen);
+}
+#endif
+
 static int packet_notifier(struct notifier_block *this,
                           unsigned long msg, void *ptr)
 {
@@ -4416,6 +4438,9 @@ static const struct proto_ops packet_ops = {
        .shutdown =     sock_no_shutdown,
        .setsockopt =   packet_setsockopt,
        .getsockopt =   packet_getsockopt,
+#ifdef CONFIG_COMPAT
+       .compat_setsockopt = compat_packet_setsockopt,
+#endif
        .sendmsg =      packet_sendmsg,
        .recvmsg =      packet_recvmsg,
        .mmap =         packet_mmap,
index 80256b08eac0388d702f5981f00703d2118f5cce..387df5f32e49b9084363dc3da3ff62aa252e5507 100644 (file)
@@ -74,6 +74,7 @@ enum {
        RDS_CONN_CONNECTING,
        RDS_CONN_DISCONNECTING,
        RDS_CONN_UP,
+       RDS_CONN_RESETTING,
        RDS_CONN_ERROR,
 };
 
@@ -813,6 +814,7 @@ void rds_connect_worker(struct work_struct *);
 void rds_shutdown_worker(struct work_struct *);
 void rds_send_worker(struct work_struct *);
 void rds_recv_worker(struct work_struct *);
+void rds_connect_path_complete(struct rds_connection *conn, int curr);
 void rds_connect_complete(struct rds_connection *conn);
 
 /* transport.c */
index c0be1ecd11c99ce57360687fa2b34f312c0cf0d6..8413f6c99e13519d7cbfa1ec6ce3877a0059caa7 100644 (file)
@@ -561,5 +561,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
                minfo.fport = inc->i_hdr.h_dport;
        }
 
+       minfo.flags = 0;
+
        rds_info_copy(iter, &minfo, sizeof(minfo));
 }
index c9cdb358ea885e3e356cc675b579f1313ed94ff9..b1962f8e30f7bfc9fa77fb9bb413c42c81018e57 100644 (file)
@@ -99,6 +99,7 @@ void rds_send_reset(struct rds_connection *conn)
        list_splice_init(&conn->c_retrans, &conn->c_send_queue);
        spin_unlock_irqrestore(&conn->c_lock, flags);
 }
+EXPORT_SYMBOL_GPL(rds_send_reset);
 
 static int acquire_in_xmit(struct rds_connection *conn)
 {
index 86187dad14403100ff6199ee79beed5262de818f..74ee126a6fe6c00ce8bce45053ec2b8805fc222e 100644 (file)
@@ -126,9 +126,81 @@ void rds_tcp_restore_callbacks(struct socket *sock,
 }
 
 /*
- * This is the only path that sets tc->t_sock.  Send and receive trust that
- * it is set.  The RDS_CONN_UP bit protects those paths from being
- * called while it isn't set.
+ * rds_tcp_reset_callbacks() switches the to the new sock and
+ * returns the existing tc->t_sock.
+ *
+ * The only functions that set tc->t_sock are rds_tcp_set_callbacks
+ * and rds_tcp_reset_callbacks.  Send and receive trust that
+ * it is set.  The absence of RDS_CONN_UP bit protects those paths
+ * from being called while it isn't set.
+ */
+void rds_tcp_reset_callbacks(struct socket *sock,
+                            struct rds_connection *conn)
+{
+       struct rds_tcp_connection *tc = conn->c_transport_data;
+       struct socket *osock = tc->t_sock;
+
+       if (!osock)
+               goto newsock;
+
+       /* Need to resolve a duelling SYN between peers.
+        * We have an outstanding SYN to this peer, which may
+        * potentially have transitioned to the RDS_CONN_UP state,
+        * so we must quiesce any send threads before resetting
+        * c_transport_data. We quiesce these threads by setting
+        * c_state to something other than RDS_CONN_UP, and then
+        * waiting for any existing threads in rds_send_xmit to
+        * complete release_in_xmit(). (Subsequent threads entering
+        * rds_send_xmit() will bail on !rds_conn_up().
+        *
+        * However an incoming syn-ack at this point would end up
+        * marking the conn as RDS_CONN_UP, and would again permit
+        * rds_send_xmi() threads through, so ideally we would
+        * synchronize on RDS_CONN_UP after lock_sock(), but cannot
+        * do that: waiting on !RDS_IN_XMIT after lock_sock() may
+        * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
+        * would not get set. As a result, we set c_state to
+        * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
+        * cannot mark rds_conn_path_up() in the window before lock_sock()
+        */
+       atomic_set(&conn->c_state, RDS_CONN_RESETTING);
+       wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags));
+       lock_sock(osock->sk);
+       /* reset receive side state for rds_tcp_data_recv() for osock  */
+       if (tc->t_tinc) {
+               rds_inc_put(&tc->t_tinc->ti_inc);
+               tc->t_tinc = NULL;
+       }
+       tc->t_tinc_hdr_rem = sizeof(struct rds_header);
+       tc->t_tinc_data_rem = 0;
+       tc->t_sock = NULL;
+
+       write_lock_bh(&osock->sk->sk_callback_lock);
+
+       osock->sk->sk_user_data = NULL;
+       osock->sk->sk_data_ready = tc->t_orig_data_ready;
+       osock->sk->sk_write_space = tc->t_orig_write_space;
+       osock->sk->sk_state_change = tc->t_orig_state_change;
+       write_unlock_bh(&osock->sk->sk_callback_lock);
+       release_sock(osock->sk);
+       sock_release(osock);
+newsock:
+       rds_send_reset(conn);
+       lock_sock(sock->sk);
+       write_lock_bh(&sock->sk->sk_callback_lock);
+       tc->t_sock = sock;
+       sock->sk->sk_user_data = conn;
+       sock->sk->sk_data_ready = rds_tcp_data_ready;
+       sock->sk->sk_write_space = rds_tcp_write_space;
+       sock->sk->sk_state_change = rds_tcp_state_change;
+
+       write_unlock_bh(&sock->sk->sk_callback_lock);
+       release_sock(sock->sk);
+}
+
+/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
+ * above rds_tcp_reset_callbacks for notes about synchronization
+ * with data path
  */
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
 {
index 41c228300525c029df02472b609ada1a71cea98e..ec0602b0dc24b443c40437d5697037082d270f44 100644 (file)
@@ -50,6 +50,7 @@ struct rds_tcp_statistics {
 void rds_tcp_tune(struct socket *sock);
 void rds_tcp_nonagle(struct socket *sock);
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn);
+void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn);
 void rds_tcp_restore_callbacks(struct socket *sock,
                               struct rds_tcp_connection *tc);
 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
index fb82e0a0bf89361e005a23c371f0e95dfa4970bc..fba13d0305fb25f12c476e6014a2a9d4c5559075 100644 (file)
@@ -60,7 +60,7 @@ void rds_tcp_state_change(struct sock *sk)
                case TCP_SYN_RECV:
                        break;
                case TCP_ESTABLISHED:
-                       rds_connect_complete(conn);
+                       rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
                        break;
                case TCP_CLOSE_WAIT:
                case TCP_CLOSE:
index 4bf4befe5066a0084a7430cc39f9fd0c08a9a20e..686b1d03a55858aeb59d9863339c2c8e99e03d90 100644 (file)
@@ -78,7 +78,6 @@ int rds_tcp_accept_one(struct socket *sock)
        struct inet_sock *inet;
        struct rds_tcp_connection *rs_tcp = NULL;
        int conn_state;
-       struct sock *nsk;
 
        if (!sock) /* module unload or netns delete in progress */
                return -ENETUNREACH;
@@ -136,26 +135,21 @@ int rds_tcp_accept_one(struct socket *sock)
                    !conn->c_outgoing) {
                        goto rst_nsk;
                } else {
-                       atomic_set(&conn->c_state, RDS_CONN_CONNECTING);
-                       wait_event(conn->c_waitq,
-                                  !test_bit(RDS_IN_XMIT, &conn->c_flags));
-                       rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+                       rds_tcp_reset_callbacks(new_sock, conn);
                        conn->c_outgoing = 0;
+                       /* rds_connect_path_complete() marks RDS_CONN_UP */
+                       rds_connect_path_complete(conn, RDS_CONN_DISCONNECTING);
                }
+       } else {
+               rds_tcp_set_callbacks(new_sock, conn);
+               rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
        }
-       rds_tcp_set_callbacks(new_sock, conn);
-       rds_connect_complete(conn); /* marks RDS_CONN_UP */
        new_sock = NULL;
        ret = 0;
        goto out;
 rst_nsk:
        /* reset the newly returned accept sock and bail */
-       nsk = new_sock->sk;
-       rds_tcp_stats_inc(s_tcp_listen_closed_stale);
-       nsk->sk_user_data = NULL;
-       nsk->sk_prot->disconnect(nsk, 0);
-       tcp_done(nsk);
-       new_sock = NULL;
+       kernel_sock_shutdown(new_sock, SHUT_RDWR);
        ret = 0;
 out:
        if (rs_tcp)
index 454aa6d23327b2a302bcfc72b6b3d857656e33ed..4a323045719b5c0aa0407916a772fe65aa900b0b 100644 (file)
@@ -71,9 +71,9 @@
 struct workqueue_struct *rds_wq;
 EXPORT_SYMBOL_GPL(rds_wq);
 
-void rds_connect_complete(struct rds_connection *conn)
+void rds_connect_path_complete(struct rds_connection *conn, int curr)
 {
-       if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) {
+       if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) {
                printk(KERN_WARNING "%s: Cannot transition to state UP, "
                                "current state is %d\n",
                                __func__,
@@ -90,6 +90,12 @@ void rds_connect_complete(struct rds_connection *conn)
        queue_delayed_work(rds_wq, &conn->c_send_w, 0);
        queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 }
+EXPORT_SYMBOL_GPL(rds_connect_path_complete);
+
+void rds_connect_complete(struct rds_connection *conn)
+{
+       rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
+}
 EXPORT_SYMBOL_GPL(rds_connect_complete);
 
 /*
index 6b726a046a7d47995ecc281f67979073fe02c102..bab56ed649ba034847375de135b7b17357f0e532 100644 (file)
@@ -1162,9 +1162,7 @@ static int rxkad_init(void)
        /* pin the cipher we need so that the crypto layer doesn't invoke
         * keventd to go get it */
        rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(rxkad_ci))
-               return PTR_ERR(rxkad_ci);
-       return 0;
+       return PTR_ERR_OR_ZERO(rxkad_ci);
 }
 
 /*
index b884dae692a162fccf5204f1dad27734748a69c2..c557789765dc6fed833a27b7d0353e3bda0a477f 100644 (file)
@@ -38,7 +38,7 @@ struct tcf_police {
        bool                    peak_present;
 };
 #define to_police(pc)  \
-       container_of(pc, struct tcf_police, common)
+       container_of(pc->priv, struct tcf_police, common)
 
 #define POL_TAB_MASK     15
 
@@ -119,14 +119,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
                                 struct nlattr *est, struct tc_action *a,
                                 int ovr, int bind)
 {
-       unsigned int h;
        int ret = 0, err;
        struct nlattr *tb[TCA_POLICE_MAX + 1];
        struct tc_police *parm;
        struct tcf_police *police;
        struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
        struct tc_action_net *tn = net_generic(net, police_net_id);
-       struct tcf_hashinfo *hinfo = tn->hinfo;
        int size;
 
        if (nla == NULL)
@@ -145,7 +143,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
 
        if (parm->index) {
                if (tcf_hash_search(tn, a, parm->index)) {
-                       police = to_police(a->priv);
+                       police = to_police(a);
                        if (bind) {
                                police->tcf_bindcnt += 1;
                                police->tcf_refcnt += 1;
@@ -156,16 +154,15 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
                        /* not replacing */
                        return -EEXIST;
                }
+       } else {
+               ret = tcf_hash_create(tn, parm->index, NULL, a,
+                                     sizeof(*police), bind, false);
+               if (ret)
+                       return ret;
+               ret = ACT_P_CREATED;
        }
 
-       police = kzalloc(sizeof(*police), GFP_KERNEL);
-       if (police == NULL)
-               return -ENOMEM;
-       ret = ACT_P_CREATED;
-       police->tcf_refcnt = 1;
-       spin_lock_init(&police->tcf_lock);
-       if (bind)
-               police->tcf_bindcnt = 1;
+       police = to_police(a);
 override:
        if (parm->rate.rate) {
                err = -ENOMEM;
@@ -237,16 +234,8 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
                return ret;
 
        police->tcfp_t_c = ktime_get_ns();
-       police->tcf_index = parm->index ? parm->index :
-               tcf_hash_new_index(tn);
-       police->tcf_tm.install = jiffies;
-       police->tcf_tm.lastuse = jiffies;
-       h = tcf_hash(police->tcf_index, POL_TAB_MASK);
-       spin_lock_bh(&hinfo->lock);
-       hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
-       spin_unlock_bh(&hinfo->lock);
+       tcf_hash_insert(tn, a);
 
-       a->priv = police;
        return ret;
 
 failure_unlock:
@@ -255,7 +244,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
        qdisc_put_rtab(P_tab);
        qdisc_put_rtab(R_tab);
        if (ret == ACT_P_CREATED)
-               kfree(police);
+               tcf_hash_cleanup(a, est);
        return err;
 }
 
index 730aacafc22d8638ccebce757cc5b6f8968bd39f..b3b7978f418214104d5d360aa4ea6154b36fc038 100644 (file)
@@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
        struct tc_cls_flower_offload offload = {0};
        struct tc_to_netdev tc;
 
-       if (!tc_should_offload(dev, 0))
+       if (!tc_should_offload(dev, tp, 0))
                return;
 
        offload.command = TC_CLSFLOWER_DESTROY;
@@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp,
        struct tc_cls_flower_offload offload = {0};
        struct tc_to_netdev tc;
 
-       if (!tc_should_offload(dev, flags))
+       if (!tc_should_offload(dev, tp, flags))
                return;
 
        offload.command = TC_CLSFLOWER_REPLACE;
@@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
        struct tc_cls_flower_offload offload = {0};
        struct tc_to_netdev tc;
 
-       if (!tc_should_offload(dev, 0))
+       if (!tc_should_offload(dev, tp, 0))
                return;
 
        offload.command = TC_CLSFLOWER_STATS;
index 079b43b3c5d24dc5eaf7bc5e7b7d0d4d0c672ca2..ffe593efe930680d4ec1a92b619256f12cb9ecda 100644 (file)
@@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
 
-       if (tc_should_offload(dev, 0)) {
+       if (tc_should_offload(dev, tp, 0)) {
                offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
                offload.cls_u32->knode.handle = handle;
                dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
@@ -457,20 +457,21 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp,
        struct tc_to_netdev offload;
        int err;
 
+       if (!tc_should_offload(dev, tp, flags))
+               return tc_skip_sw(flags) ? -EINVAL : 0;
+
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
 
-       if (tc_should_offload(dev, flags)) {
-               offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
-               offload.cls_u32->hnode.divisor = h->divisor;
-               offload.cls_u32->hnode.handle = h->handle;
-               offload.cls_u32->hnode.prio = h->prio;
+       offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
+       offload.cls_u32->hnode.divisor = h->divisor;
+       offload.cls_u32->hnode.handle = h->handle;
+       offload.cls_u32->hnode.prio = h->prio;
 
-               err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                                   tp->protocol, &offload);
-               if (tc_skip_sw(flags))
-                       return err;
-       }
+       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                           tp->protocol, &offload);
+       if (tc_skip_sw(flags))
+               return err;
 
        return 0;
 }
@@ -484,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
 
-       if (tc_should_offload(dev, 0)) {
+       if (tc_should_offload(dev, tp, 0)) {
                offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
                offload.cls_u32->hnode.divisor = h->divisor;
                offload.cls_u32->hnode.handle = h->handle;
@@ -507,27 +508,28 @@ static int u32_replace_hw_knode(struct tcf_proto *tp,
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
 
-       if (tc_should_offload(dev, flags)) {
-               offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
-               offload.cls_u32->knode.handle = n->handle;
-               offload.cls_u32->knode.fshift = n->fshift;
+       if (!tc_should_offload(dev, tp, flags))
+               return tc_skip_sw(flags) ? -EINVAL : 0;
+
+       offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
+       offload.cls_u32->knode.handle = n->handle;
+       offload.cls_u32->knode.fshift = n->fshift;
 #ifdef CONFIG_CLS_U32_MARK
-               offload.cls_u32->knode.val = n->val;
-               offload.cls_u32->knode.mask = n->mask;
+       offload.cls_u32->knode.val = n->val;
+       offload.cls_u32->knode.mask = n->mask;
 #else
-               offload.cls_u32->knode.val = 0;
-               offload.cls_u32->knode.mask = 0;
+       offload.cls_u32->knode.val = 0;
+       offload.cls_u32->knode.mask = 0;
 #endif
-               offload.cls_u32->knode.sel = &n->sel;
-               offload.cls_u32->knode.exts = &n->exts;
-               if (n->ht_down)
-                       offload.cls_u32->knode.link_handle = n->ht_down->handle;
-
-               err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                                   tp->protocol, &offload);
-               if (tc_skip_sw(flags))
-                       return err;
-       }
+       offload.cls_u32->knode.sel = &n->sel;
+       offload.cls_u32->knode.exts = &n->exts;
+       if (n->ht_down)
+               offload.cls_u32->knode.link_handle = n->ht_down->handle;
+
+       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                           tp->protocol, &offload);
+       if (tc_skip_sw(flags))
+               return err;
 
        return 0;
 }
@@ -863,7 +865,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        if (tb[TCA_U32_FLAGS]) {
                flags = nla_get_u32(tb[TCA_U32_FLAGS]);
                if (!tc_flags_valid(flags))
-                       return err;
+                       return -EINVAL;
        }
 
        n = (struct tc_u_knode *)*arg;
@@ -921,11 +923,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                ht->divisor = divisor;
                ht->handle = handle;
                ht->prio = tp->prio;
+
+               err = u32_replace_hw_hnode(tp, ht, flags);
+               if (err) {
+                       kfree(ht);
+                       return err;
+               }
+
                RCU_INIT_POINTER(ht->next, tp_c->hlist);
                rcu_assign_pointer(tp_c->hlist, ht);
                *arg = (unsigned long)ht;
 
-               u32_replace_hw_hnode(tp, ht, flags);
                return 0;
        }
 
index a63e879e89758fe954ab778fe776ddf6fc4e3536..bf8af2c43c2ce8b988b26f58d04074d367f987a6 100644 (file)
@@ -375,6 +375,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                cl->deficit = cl->quantum;
        }
 
+       qdisc_qstats_backlog_inc(sch, skb);
        sch->q.qlen++;
        return err;
 }
@@ -407,6 +408,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
 
                        bstats_update(&cl->bstats, skb);
                        qdisc_bstats_update(sch, skb);
+                       qdisc_qstats_backlog_dec(sch, skb);
                        sch->q.qlen--;
                        return skb;
                }
@@ -428,6 +430,7 @@ static unsigned int drr_drop(struct Qdisc *sch)
                if (cl->qdisc->ops->drop) {
                        len = cl->qdisc->ops->drop(cl->qdisc);
                        if (len > 0) {
+                               sch->qstats.backlog -= len;
                                sch->q.qlen--;
                                if (cl->qdisc->q.qlen == 0)
                                        list_del(&cl->alist);
@@ -463,6 +466,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
                        qdisc_reset(cl->qdisc);
                }
        }
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
 }
 
index 6883a89715625fc7874780ef962e51dc7773a35b..da250b2e06ae3b2cc054ff1d31e29649a74f6e13 100644 (file)
@@ -199,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        unsigned int idx, prev_backlog, prev_qlen;
        struct fq_codel_flow *flow;
        int uninitialized_var(ret);
+       unsigned int pkt_len;
        bool memory_limited;
 
        idx = fq_codel_classify(skb, sch, &ret);
@@ -230,6 +231,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        prev_backlog = sch->qstats.backlog;
        prev_qlen = sch->q.qlen;
 
+       /* save this packet length as it might be dropped by fq_codel_drop() */
+       pkt_len = qdisc_pkt_len(skb);
        /* fq_codel_drop() is quite expensive, as it performs a linear search
         * in q->backlogs[] to find a fat flow.
         * So instead of dropping a single packet, drop half of its backlog
@@ -237,14 +240,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
         */
        ret = fq_codel_drop(sch, q->drop_batch_size);
 
-       q->drop_overlimit += prev_qlen - sch->q.qlen;
+       prev_qlen -= sch->q.qlen;
+       prev_backlog -= sch->qstats.backlog;
+       q->drop_overlimit += prev_qlen;
        if (memory_limited)
-               q->drop_overmemory += prev_qlen - sch->q.qlen;
-       /* As we dropped packet(s), better let upper stack know this */
-       qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
-                                 prev_backlog - sch->qstats.backlog);
+               q->drop_overmemory += prev_qlen;
 
-       return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
+       /* As we dropped packet(s), better let upper stack know this.
+        * If we dropped a packet for this flow, return NET_XMIT_CN,
+        * but in this case, our parents wont increase their backlogs.
+        */
+       if (ret == idx) {
+               qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
+                                         prev_backlog - pkt_len);
+               return NET_XMIT_CN;
+       }
+       qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
+       return NET_XMIT_SUCCESS;
 }
 
 /* This is the specific function called from codel_dequeue()
@@ -649,7 +661,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                qs.backlog = q->backlogs[idx];
                qs.drops = flow->dropped;
        }
-       if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
+       if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
                return -1;
        if (idx < q->flows_cnt)
                return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 269dd71b3828c03867c5dbbe7b041ad4babcf1f1..f9e0e9c03d0a3fba3daf7fb420297ad7e859dccf 100644 (file)
@@ -49,6 +49,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 {
        q->gso_skb = skb;
        q->qstats.requeues++;
+       qdisc_qstats_backlog_inc(q, skb);
        q->q.qlen++;    /* it's still part of the queue */
        __netif_schedule(q);
 
@@ -92,6 +93,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
                txq = skb_get_tx_queue(txq->dev, skb);
                if (!netif_xmit_frozen_or_stopped(txq)) {
                        q->gso_skb = NULL;
+                       qdisc_qstats_backlog_dec(q, skb);
                        q->q.qlen--;
                } else
                        skb = NULL;
index d783d7cc33487f284677e107f5083588f559f742..1ac9f9f03fe31fdf8d63eccfad0c7a13939183c6 100644 (file)
@@ -1529,6 +1529,7 @@ hfsc_reset_qdisc(struct Qdisc *sch)
        q->eligible = RB_ROOT;
        INIT_LIST_HEAD(&q->droplist);
        qdisc_watchdog_cancel(&q->watchdog);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
 }
 
@@ -1559,14 +1560,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
        struct hfsc_sched *q = qdisc_priv(sch);
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_hfsc_qopt qopt;
-       struct hfsc_class *cl;
-       unsigned int i;
-
-       sch->qstats.backlog = 0;
-       for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
-                       sch->qstats.backlog += cl->qdisc->qstats.backlog;
-       }
 
        qopt.defcls = q->defcls;
        if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
@@ -1604,6 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (cl->qdisc->q.qlen == 1)
                set_active(cl, qdisc_pkt_len(skb));
 
+       qdisc_qstats_backlog_inc(sch, skb);
        sch->q.qlen++;
 
        return NET_XMIT_SUCCESS;
@@ -1672,6 +1666,7 @@ hfsc_dequeue(struct Qdisc *sch)
 
        qdisc_unthrottled(sch);
        qdisc_bstats_update(sch, skb);
+       qdisc_qstats_backlog_dec(sch, skb);
        sch->q.qlen--;
 
        return skb;
@@ -1695,6 +1690,7 @@ hfsc_drop(struct Qdisc *sch)
                        }
                        cl->qstats.drops++;
                        qdisc_qstats_drop(sch);
+                       sch->qstats.backlog -= len;
                        sch->q.qlen--;
                        return len;
                }
index 10adbc617905e4386becd7fe272ecad5f96ee086..8fe6999b642ac3c039f40621f4ca123d129718c4 100644 (file)
@@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
        return TC_H_MIN(classid) + 1;
 }
 
+static bool ingress_cl_offload(u32 classid)
+{
+       return true;
+}
+
 static unsigned long ingress_bind_filter(struct Qdisc *sch,
                                         unsigned long parent, u32 classid)
 {
@@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = {
        .put            =       ingress_put,
        .walk           =       ingress_walk,
        .tcf_chain      =       ingress_find_tcf,
+       .tcf_cl_offload =       ingress_cl_offload,
        .bind_tcf       =       ingress_bind_filter,
        .unbind_tcf     =       ingress_put,
 };
@@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
        }
 }
 
+static bool clsact_cl_offload(u32 classid)
+{
+       return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
+}
+
 static unsigned long clsact_bind_filter(struct Qdisc *sch,
                                        unsigned long parent, u32 classid)
 {
@@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = {
        .put            =       ingress_put,
        .walk           =       ingress_walk,
        .tcf_chain      =       clsact_find_tcf,
+       .tcf_cl_offload =       clsact_cl_offload,
        .bind_tcf       =       clsact_bind_filter,
        .unbind_tcf     =       ingress_put,
 };
index fee1b15506b299755c685502a4a33283b458fef5..4b0a82191bc4011c9574a029fe8c30133c5f6f50 100644 (file)
@@ -85,6 +85,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        ret = qdisc_enqueue(skb, qdisc);
        if (ret == NET_XMIT_SUCCESS) {
+               qdisc_qstats_backlog_inc(sch, skb);
                sch->q.qlen++;
                return NET_XMIT_SUCCESS;
        }
@@ -117,6 +118,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
                struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
                if (skb) {
                        qdisc_bstats_update(sch, skb);
+                       qdisc_qstats_backlog_dec(sch, skb);
                        sch->q.qlen--;
                        return skb;
                }
@@ -135,6 +137,7 @@ static unsigned int prio_drop(struct Qdisc *sch)
        for (prio = q->bands-1; prio >= 0; prio--) {
                qdisc = q->queues[prio];
                if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
+                       sch->qstats.backlog -= len;
                        sch->q.qlen--;
                        return len;
                }
@@ -151,6 +154,7 @@ prio_reset(struct Qdisc *sch)
 
        for (prio = 0; prio < q->bands; prio++)
                qdisc_reset(q->queues[prio]);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
 }
 
index 8d2d8d953432e83d8523cc42923793eb852f599c..f18857febdad0adcae567beaebd2520ec9932ce6 100644 (file)
@@ -1235,8 +1235,10 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                         cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
                err = qfq_change_agg(sch, cl, cl->agg->class_weight,
                                     qdisc_pkt_len(skb));
-               if (err)
-                       return err;
+               if (err) {
+                       cl->qstats.drops++;
+                       return qdisc_drop(skb, sch);
+               }
        }
 
        err = qdisc_enqueue(skb, cl->qdisc);
index 8c0508c0e287742a8fbbcbf49134e76e9e52b807..91578bdd378c943e66137fd986c1df6f1674bd27 100644 (file)
@@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        ret = qdisc_enqueue(skb, child);
        if (likely(ret == NET_XMIT_SUCCESS)) {
+               qdisc_qstats_backlog_inc(sch, skb);
                sch->q.qlen++;
        } else if (net_xmit_drop_count(ret)) {
                q->stats.pdrop++;
@@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch)
        skb = child->dequeue(child);
        if (skb) {
                qdisc_bstats_update(sch, skb);
+               qdisc_qstats_backlog_dec(sch, skb);
                sch->q.qlen--;
        } else {
                if (!red_is_idling(&q->vars))
@@ -143,6 +145,7 @@ static unsigned int red_drop(struct Qdisc *sch)
        if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
                q->stats.other++;
                qdisc_qstats_drop(sch);
+               sch->qstats.backlog -= len;
                sch->q.qlen--;
                return len;
        }
@@ -158,6 +161,7 @@ static void red_reset(struct Qdisc *sch)
        struct red_sched_data *q = qdisc_priv(sch);
 
        qdisc_reset(q->qdisc);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
        red_restart(&q->vars);
 }
index 83b90b584fae4fcee779bdeb638df5c59f40f007..3161e491990b29254fdcb54ad3ff5d18dbf4d16f 100644 (file)
@@ -207,6 +207,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                return ret;
        }
 
+       qdisc_qstats_backlog_inc(sch, skb);
        sch->q.qlen++;
        return NET_XMIT_SUCCESS;
 }
@@ -217,6 +218,7 @@ static unsigned int tbf_drop(struct Qdisc *sch)
        unsigned int len = 0;
 
        if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
+               sch->qstats.backlog -= len;
                sch->q.qlen--;
                qdisc_qstats_drop(sch);
        }
@@ -263,6 +265,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
                        q->t_c = now;
                        q->tokens = toks;
                        q->ptokens = ptoks;
+                       qdisc_qstats_backlog_dec(sch, skb);
                        sch->q.qlen--;
                        qdisc_unthrottled(sch);
                        qdisc_bstats_update(sch, skb);
@@ -294,6 +297,7 @@ static void tbf_reset(struct Qdisc *sch)
        struct tbf_sched_data *q = qdisc_priv(sch);
 
        qdisc_reset(q->qdisc);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
        q->t_c = ktime_get_ns();
        q->tokens = q->buffer;
index 06b4df9faaa1696b1e21ead9ddacea85cf41f760..2808d550d2730ff8e36b6d8c072c65e1631064c4 100644 (file)
@@ -446,16 +446,27 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
        return ERR_PTR(err);
 }
 
-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
                                        struct rpc_xprt *xprt)
 {
        struct rpc_clnt *clnt = NULL;
        struct rpc_xprt_switch *xps;
 
-       xps = xprt_switch_alloc(xprt, GFP_KERNEL);
-       if (xps == NULL)
-               return ERR_PTR(-ENOMEM);
-
+       if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
+               WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+               xps = args->bc_xprt->xpt_bc_xps;
+               xprt_switch_get(xps);
+       } else {
+               xps = xprt_switch_alloc(xprt, GFP_KERNEL);
+               if (xps == NULL) {
+                       xprt_put(xprt);
+                       return ERR_PTR(-ENOMEM);
+               }
+               if (xprt->bc_xprt) {
+                       xprt_switch_get(xps);
+                       xprt->bc_xprt->xpt_bc_xps = xps;
+               }
+       }
        clnt = rpc_new_client(args, xps, xprt, NULL);
        if (IS_ERR(clnt))
                return clnt;
@@ -483,7 +494,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
 
        return clnt;
 }
-EXPORT_SYMBOL_GPL(rpc_create_xprt);
 
 /**
  * rpc_create - create an RPC client and transport with one call
@@ -509,6 +519,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
        };
        char servername[48];
 
+       if (args->bc_xprt) {
+               WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+               xprt = args->bc_xprt->xpt_bc_xprt;
+               if (xprt) {
+                       xprt_get(xprt);
+                       return rpc_create_xprt(args, xprt);
+               }
+       }
+
        if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
                xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
        if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
index f5572e31d518f85b2afbbb9ec1b8e89d36448803..4f01f63102ee5d1ce936f54fa8feb3cedfc15607 100644 (file)
@@ -136,6 +136,8 @@ static void svc_xprt_free(struct kref *kref)
        /* See comment on corresponding get in xs_setup_bc_tcp(): */
        if (xprt->xpt_bc_xprt)
                xprt_put(xprt->xpt_bc_xprt);
+       if (xprt->xpt_bc_xps)
+               xprt_switch_put(xprt->xpt_bc_xps);
        xprt->xpt_ops->xpo_free(xprt);
        module_put(owner);
 }
index 2d3e0c42361e6190555eb3b0cd9465851c88fd7b..7e2b2fa189c340e7f0968aead6f878879e8a9a72 100644 (file)
@@ -3057,6 +3057,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
                return xprt;
 
        args->bc_xprt->xpt_bc_xprt = NULL;
+       args->bc_xprt->xpt_bc_xps = NULL;
        xprt_put(xprt);
        ret = ERR_PTR(-EINVAL);
 out_err:
index f795b1dd0ccdf3dc088e16e1e9ad3ae9cc978d16..3ad9fab1985f1cdca35c876f0f228d28b9b5db00 100644 (file)
@@ -604,7 +604,8 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
 
        link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
        link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
-       strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
+       nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]),
+                   TIPC_MAX_LINK_NAME);
 
        return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
                            &link_info, sizeof(link_info));
index 80aa6a3e681763232ba7f5c61663761c7d3ec0d3..735362c26c8e5a3329d3cc6517b330167b393db6 100644 (file)
@@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
                    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
                struct dentry *dentry = unix_sk(s)->path.dentry;
 
-               if (dentry && d_backing_inode(dentry) == i) {
+               if (dentry && d_real_inode(dentry) == i) {
                        sock_hold(s);
                        goto found;
                }
@@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net,
                err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
                if (err)
                        goto fail;
-               inode = d_backing_inode(path.dentry);
+               inode = d_real_inode(path.dentry);
                err = inode_permission(inode, MAY_WRITE);
                if (err)
                        goto put_fail;
@@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                        goto out_up;
                }
                addr->hash = UNIX_HASH_SIZE;
-               hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+               hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
                spin_lock(&unix_table_lock);
                u->path = u_path;
                list = &unix_socket_table[hash];
index d25c82bc1bbe8e80ee0761b466e16088040c717d..ecca3896b9f7a2f18f1cdcfa2d38b2d857decf27 100644 (file)
@@ -363,8 +363,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
        WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
        WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
        WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
-       WARN_ON(ops->set_tx_power && !ops->get_tx_power);
-       WARN_ON(ops->set_antenna && !ops->get_antenna);
 
        alloc_size = sizeof(*rdev) + sizeof_priv;
 
index 6250b1cfcde58758bb480758d1c61217d37a7cd1..dbb2738e356ad83785caa748a0b84352a189bb8b 100644 (file)
@@ -958,8 +958,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
                        return private(dev, iwr, cmd, info, handler);
        }
        /* Old driver API : call driver ioctl handler */
-       if (dev->netdev_ops->ndo_do_ioctl)
-               return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+       if (dev->netdev_ops->ndo_do_ioctl) {
+#ifdef CONFIG_COMPAT
+               if (info->flags & IW_REQUEST_FLAG_COMPAT) {
+                       int ret = 0;
+                       struct iwreq iwr_lcl;
+                       struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
+
+                       memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
+                       iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
+                       iwr_lcl.u.data.length = iwp_compat->length;
+                       iwr_lcl.u.data.flags = iwp_compat->flags;
+
+                       ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
+
+                       iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
+                       iwp_compat->length = iwr_lcl.u.data.length;
+                       iwp_compat->flags = iwr_lcl.u.data.flags;
+
+                       return ret;
+               } else
+#endif
+                       return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+       }
        return -EOPNOTSUPP;
 }
 
index a9155077feefb957d38dc29b4823a9529a7ee0e9..fec75786f75bbb348d03143af7d2280015394643 100644 (file)
@@ -384,7 +384,7 @@ static void do_of_entry_multi(void *symval, struct module *mod)
        len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
                      (*type)[0] ? *type : "*");
 
-       if (compatible[0])
+       if ((*compatible)[0])
                sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
                        *compatible);
 
index bd5a272f28a66f689e3dc3bc0951d37e302ad301..346fbf201c22e37e67dc58b5684c868e79ad9af5 100644 (file)
@@ -597,7 +597,7 @@ int key_reject_and_link(struct key *key,
 
        mutex_unlock(&key_construction_mutex);
 
-       if (keyring)
+       if (keyring && link_ret == 0)
                __key_link_end(keyring, &key->index_key, edit);
 
        /* wake up anyone waiting for a key to be constructed */
index c0f8f613f1f1b5e954ffa82b760c82353dd17c80..172dacd925f5721c2726194ddb917aa612f4afc3 100644 (file)
@@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
 
 static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
 {
+       hrtimer_cancel(&dpcm->timer);
        tasklet_kill(&dpcm->tasklet);
 }
 
index 87041ddd29cbcbf2c63fca9ed90ffdbc0e5f249d..47a358fab13280acad02b319d012953eb0a57e7d 100644 (file)
@@ -444,7 +444,7 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
        err = reg_raw_write(codec, reg, val);
        if (err == -EAGAIN) {
                err = snd_hdac_power_up_pm(codec);
-               if (!err)
+               if (err >= 0)
                        err = reg_raw_write(codec, reg, val);
                snd_hdac_power_down_pm(codec);
        }
@@ -470,7 +470,7 @@ static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
        err = reg_raw_read(codec, reg, val, uncached);
        if (err == -EAGAIN) {
                err = snd_hdac_power_up_pm(codec);
-               if (!err)
+               if (err >= 0)
                        err = reg_raw_read(codec, reg, val, uncached);
                snd_hdac_power_down_pm(codec);
        }
index 9a0d1445ca5cf85c774a6f4e4be1d5a44770adad..94089fc71884a93e38ddbbefdad9a978d02430c6 100644 (file)
@@ -365,8 +365,11 @@ enum {
 
 #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
 #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
+#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
+                       IS_KBL(pci) || IS_KBL_LP(pci)
 
 static char *driver_short_names[] = {
        [AZX_DRIVER_ICH] = "HDA Intel",
@@ -2181,6 +2184,12 @@ static const struct pci_device_id azx_ids[] = {
        /* Sunrise Point-LP */
        { PCI_DEVICE(0x8086, 0x9d70),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+       /* Kabylake */
+       { PCI_DEVICE(0x8086, 0xa171),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+       /* Kabylake-LP */
+       { PCI_DEVICE(0x8086, 0x9d71),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
index 17fd81736d3d67f6a6fb67ef1ad624a6d96d93a9..0621920f7617f91900591c46970f6cbd03216771 100644 (file)
@@ -115,20 +115,20 @@ static int substream_free_pages(struct azx *chip,
 /*
  * Register access ops. Tegra HDA register access is DWORD only.
  */
-static void hda_tegra_writel(u32 value, u32 *addr)
+static void hda_tegra_writel(u32 value, u32 __iomem *addr)
 {
        writel(value, addr);
 }
 
-static u32 hda_tegra_readl(u32 *addr)
+static u32 hda_tegra_readl(u32 __iomem *addr)
 {
        return readl(addr);
 }
 
-static void hda_tegra_writew(u16 value, u16 *addr)
+static void hda_tegra_writew(u16 value, u16 __iomem  *addr)
 {
        unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
-       void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+       void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
        u32 v;
 
        v = readl(dword_addr);
@@ -137,20 +137,20 @@ static void hda_tegra_writew(u16 value, u16 *addr)
        writel(v, dword_addr);
 }
 
-static u16 hda_tegra_readw(u16 *addr)
+static u16 hda_tegra_readw(u16 __iomem *addr)
 {
        unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
-       void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+       void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
        u32 v;
 
        v = readl(dword_addr);
        return (v >> shift) & 0xffff;
 }
 
-static void hda_tegra_writeb(u8 value, u8 *addr)
+static void hda_tegra_writeb(u8 value, u8 __iomem *addr)
 {
        unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
-       void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+       void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
        u32 v;
 
        v = readl(dword_addr);
@@ -159,10 +159,10 @@ static void hda_tegra_writeb(u8 value, u8 *addr)
        writel(v, dword_addr);
 }
 
-static u8 hda_tegra_readb(u8 *addr)
+static u8 hda_tegra_readb(u8 __iomem *addr)
 {
        unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
-       void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+       void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
        u32 v;
 
        v = readl(dword_addr);
index d53c25e7a1c19473d98d001407c5321fc0578e8d..900bfbc3368c3ffc70f74a4b7b2ed41cbf550290 100644 (file)
@@ -346,6 +346,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0234:
        case 0x10ec0274:
        case 0x10ec0294:
+       case 0x10ec0700:
+       case 0x10ec0701:
+       case 0x10ec0703:
                alc_update_coef_idx(codec, 0x10, 1<<15, 0);
                break;
        case 0x10ec0662:
@@ -2655,6 +2658,7 @@ enum {
        ALC269_TYPE_ALC256,
        ALC269_TYPE_ALC225,
        ALC269_TYPE_ALC294,
+       ALC269_TYPE_ALC700,
 };
 
 /*
@@ -2686,6 +2690,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        case ALC269_TYPE_ALC256:
        case ALC269_TYPE_ALC225:
        case ALC269_TYPE_ALC294:
+       case ALC269_TYPE_ALC700:
                ssids = alc269_ssids;
                break;
        default:
@@ -3618,13 +3623,20 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
 static void alc_headset_mode_unplugged(struct hda_codec *codec)
 {
        static struct coef_fw coef0255[] = {
-               WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
                WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
                UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
                WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
                WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
                {}
        };
+       static struct coef_fw coef0255_1[] = {
+               WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
+               {}
+       };
+       static struct coef_fw coef0256[] = {
+               WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
+               {}
+       };
        static struct coef_fw coef0233[] = {
                WRITE_COEF(0x1b, 0x0c0b),
                WRITE_COEF(0x45, 0xc429),
@@ -3677,7 +3689,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
+               alc_process_coef_fw(codec, coef0255_1);
+               alc_process_coef_fw(codec, coef0255);
+               break;
        case 0x10ec0256:
+               alc_process_coef_fw(codec, coef0256);
                alc_process_coef_fw(codec, coef0255);
                break;
        case 0x10ec0233:
@@ -3896,6 +3912,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
                WRITE_COEFEX(0x57, 0x03, 0x8ea6),
                {}
        };
+       static struct coef_fw coef0256[] = {
+               WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
+               WRITE_COEF(0x1b, 0x0c6b),
+               WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+               {}
+       };
        static struct coef_fw coef0233[] = {
                WRITE_COEF(0x45, 0xd429),
                WRITE_COEF(0x1b, 0x0c2b),
@@ -3936,9 +3958,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
-       case 0x10ec0256:
                alc_process_coef_fw(codec, coef0255);
                break;
+       case 0x10ec0256:
+               alc_process_coef_fw(codec, coef0256);
+               break;
        case 0x10ec0233:
        case 0x10ec0283:
                alc_process_coef_fw(codec, coef0233);
@@ -3978,6 +4002,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
                WRITE_COEFEX(0x57, 0x03, 0x8ea6),
                {}
        };
+       static struct coef_fw coef0256[] = {
+               WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
+               WRITE_COEF(0x1b, 0x0c6b),
+               WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+               {}
+       };
        static struct coef_fw coef0233[] = {
                WRITE_COEF(0x45, 0xe429),
                WRITE_COEF(0x1b, 0x0c2b),
@@ -4018,9 +4048,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
-       case 0x10ec0256:
                alc_process_coef_fw(codec, coef0255);
                break;
+       case 0x10ec0256:
+               alc_process_coef_fw(codec, coef0256);
+               break;
        case 0x10ec0233:
        case 0x10ec0283:
                alc_process_coef_fw(codec, coef0233);
@@ -4266,7 +4298,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
 static void alc255_set_default_jack_type(struct hda_codec *codec)
 {
        /* Set to iphone type */
-       static struct coef_fw fw[] = {
+       static struct coef_fw alc255fw[] = {
                WRITE_COEF(0x1b, 0x880b),
                WRITE_COEF(0x45, 0xd089),
                WRITE_COEF(0x1b, 0x080b),
@@ -4274,7 +4306,22 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
                WRITE_COEF(0x1b, 0x0c0b),
                {}
        };
-       alc_process_coef_fw(codec, fw);
+       static struct coef_fw alc256fw[] = {
+               WRITE_COEF(0x1b, 0x884b),
+               WRITE_COEF(0x45, 0xd089),
+               WRITE_COEF(0x1b, 0x084b),
+               WRITE_COEF(0x46, 0x0004),
+               WRITE_COEF(0x1b, 0x0c4b),
+               {}
+       };
+       switch (codec->core.vendor_id) {
+       case 0x10ec0255:
+               alc_process_coef_fw(codec, alc255fw);
+               break;
+       case 0x10ec0256:
+               alc_process_coef_fw(codec, alc256fw);
+               break;
+       }
        msleep(30);
 }
 
@@ -5587,6 +5634,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
@@ -5602,6 +5650,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+       SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
+       SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -5775,10 +5825,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60180},
                {0x14, 0x90170130},
                {0x21, 0x02211040}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60180},
+               {0x14, 0x90170120},
+               {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60160},
                {0x14, 0x90170120},
                {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60170},
+               {0x14, 0x90170120},
+               {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS),
        SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
@@ -6053,6 +6111,14 @@ static int patch_alc269(struct hda_codec *codec)
        case 0x10ec0294:
                spec->codec_variant = ALC269_TYPE_ALC294;
                break;
+       case 0x10ec0700:
+       case 0x10ec0701:
+       case 0x10ec0703:
+               spec->codec_variant = ALC269_TYPE_ALC700;
+               spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
+               alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
+               break;
+
        }
 
        if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -7008,6 +7074,9 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680),
+       HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
        HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
index 8e36e883e453e6bda2a770cd9caa515ff0c20fce..f27d115626db7d4cb250c41e788eb5f4b329d91c 100644 (file)
@@ -112,7 +112,7 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
                return ret;
 
        if (hcp->hcd.ops->audio_startup) {
-               ret = hcp->hcd.ops->audio_startup(dai->dev->parent);
+               ret = hcp->hcd.ops->audio_startup(dai->dev->parent, hcp->hcd.data);
                if (ret) {
                        mutex_lock(&hcp->current_stream_lock);
                        hcp->current_stream = NULL;
@@ -122,8 +122,8 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
        }
 
        if (hcp->hcd.ops->get_eld) {
-               ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->eld,
-                                           sizeof(hcp->eld));
+               ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->hcd.data,
+                                           hcp->eld, sizeof(hcp->eld));
 
                if (!ret) {
                        ret = snd_pcm_hw_constraint_eld(substream->runtime,
@@ -144,7 +144,7 @@ static void hdmi_codec_shutdown(struct snd_pcm_substream *substream,
 
        WARN_ON(hcp->current_stream != substream);
 
-       hcp->hcd.ops->audio_shutdown(dai->dev->parent);
+       hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data);
 
        mutex_lock(&hcp->current_stream_lock);
        hcp->current_stream = NULL;
@@ -195,8 +195,8 @@ static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
        hp.sample_rate = params_rate(params);
        hp.channels = params_channels(params);
 
-       return hcp->hcd.ops->hw_params(dai->dev->parent, &hcp->daifmt[dai->id],
-                                      &hp);
+       return hcp->hcd.ops->hw_params(dai->dev->parent, hcp->hcd.data,
+                                      &hcp->daifmt[dai->id], &hp);
 }
 
 static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
@@ -280,7 +280,8 @@ static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
        dev_dbg(dai->dev, "%s()\n", __func__);
 
        if (hcp->hcd.ops->digital_mute)
-               return hcp->hcd.ops->digital_mute(dai->dev->parent, mute);
+               return hcp->hcd.ops->digital_mute(dai->dev->parent,
+                                                 hcp->hcd.data, mute);
 
        return 0;
 }
index bbf69d248ec56ad187f68969367b15c282fbb7fe..9f53020c32697e0aeb52de6ab8fa8d24a0ac24fa 100644 (file)
@@ -204,6 +204,44 @@ static unsigned long long adjust_signedness(unsigned long long value_int, int si
        return (value_int & value_mask) | ~value_mask;
 }
 
+static int string_set_value(struct bt_ctf_field *field, const char *string)
+{
+       char *buffer = NULL;
+       size_t len = strlen(string), i, p;
+       int err;
+
+       for (i = p = 0; i < len; i++, p++) {
+               if (isprint(string[i])) {
+                       if (!buffer)
+                               continue;
+                       buffer[p] = string[i];
+               } else {
+                       char numstr[5];
+
+                       snprintf(numstr, sizeof(numstr), "\\x%02x",
+                                (unsigned int)(string[i]) & 0xff);
+
+                       if (!buffer) {
+                               buffer = zalloc(i + (len - i) * 4 + 2);
+                               if (!buffer) {
+                                       pr_err("failed to set unprintable string '%s'\n", string);
+                                       return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
+                               }
+                               if (i > 0)
+                                       strncpy(buffer, string, i);
+                       }
+                       strncat(buffer + p, numstr, 4);
+                       p += 3;
+               }
+       }
+
+       if (!buffer)
+               return bt_ctf_field_string_set_value(field, string);
+       err = bt_ctf_field_string_set_value(field, buffer);
+       free(buffer);
+       return err;
+}
+
 static int add_tracepoint_field_value(struct ctf_writer *cw,
                                      struct bt_ctf_event_class *event_class,
                                      struct bt_ctf_event *event,
@@ -270,8 +308,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
                }
 
                if (flags & FIELD_IS_STRING)
-                       ret = bt_ctf_field_string_set_value(field,
-                                       data + offset + i * len);
+                       ret = string_set_value(field, data + offset + i * len);
                else {
                        unsigned long long value_int;
 
index f6fcc68329499f255ddc1a5012dd36fdc04d8024..9b141f12329edc750de5eefd47c3c7f2cb46e503 100644 (file)
@@ -673,6 +673,8 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
        int err;
        union perf_event *event;
 
+       if (symbol_conf.kptr_restrict)
+               return -1;
        if (map == NULL)
                return -1;
 
index 20f9cb32b703cd10330fa80ed328a16126a59a04..54c4ff2b1cee298335a9cdecfd3d07a43f51d241 100644 (file)
@@ -1933,17 +1933,17 @@ int setup_intlist(struct intlist **list, const char *list_str,
 static bool symbol__read_kptr_restrict(void)
 {
        bool value = false;
+       FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
 
-       if (geteuid() != 0) {
-               FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
-               if (fp != NULL) {
-                       char line[8];
+       if (fp != NULL) {
+               char line[8];
 
-                       if (fgets(line, sizeof(line), fp) != NULL)
-                               value = atoi(line) != 0;
+               if (fgets(line, sizeof(line), fp) != NULL)
+                       value = (geteuid() != 0) ?
+                                       (atoi(line) != 0) :
+                                       (atoi(line) == 2);
 
-                       fclose(fp);
-               }
+               fclose(fp);
        }
 
        return value;
index c2b61c4fda1124e4a30789c751f640168a201097..0bf5085281f3770aa376057c27ec893f857f93c1 100644 (file)
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
     exit_unsupported
 fi
 
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
     echo "hist trigger is not supported"
     exit_unsupported
 fi
 
+reset_tracer
+do_reset
+
 echo "Test histogram with execname modifier"
 
 echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger
index b2902d42a53762acfd14772e31c72fc6c1a7da6a..a00184cd9c959d901f5a7008690acb61a267d3d9 100644 (file)
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
     exit_unsupported
 fi
 
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
     echo "hist trigger is not supported"
     exit_unsupported
 fi
 
+reset_tracer
+do_reset
+
 echo "Test histogram basic tigger"
 
 echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
index 03c4a46561fcf58c8c76d0d3f4386246dd9e9b7d..3478b00ead57bdbc5487cfdc69ac0e7ecf9fa6c4 100644 (file)
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
     exit_unsupported
 fi
 
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
     echo "hist trigger is not supported"
     exit_unsupported
 fi
 
+reset_tracer
+do_reset
+
 reset_trigger
 
 echo "Test histogram multiple tiggers"
index 96ba386b1b7bb1c4f7c5ff7d9960f65a731d2426..4a8217448f20183efc5933bb98a73607f6f34476 100644 (file)
@@ -111,9 +111,9 @@ static void attach_ebpf(int fd, uint16_t mod)
        memset(&attr, 0, sizeof(attr));
        attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
        attr.insn_cnt = ARRAY_SIZE(prog);
-       attr.insns = (uint64_t)prog;
-       attr.license = (uint64_t)bpf_license;
-       attr.log_buf = (uint64_t)bpf_log_buf;
+       attr.insns = (unsigned long) &prog;
+       attr.license = (unsigned long) &bpf_license;
+       attr.log_buf = (unsigned long) &bpf_log_buf;
        attr.log_size = sizeof(bpf_log_buf);
        attr.log_level = 1;
        attr.kern_version = 0;
@@ -351,8 +351,8 @@ static void test_filter_no_reuseport(const struct test_params p)
        memset(&eprog, 0, sizeof(eprog));
        eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
        eprog.insn_cnt = ARRAY_SIZE(ecode);
-       eprog.insns = (uint64_t)ecode;
-       eprog.license = (uint64_t)bpf_license;
+       eprog.insns = (unsigned long) &ecode;
+       eprog.license = (unsigned long) &bpf_license;
        eprog.kern_version = 0;
 
        memset(&cprog, 0, sizeof(cprog));
index 932ff577ffc0b01405c1a264bbfc36b2baa8c058..00c4f65d12daf995123cdaaf9ef3e53cbe2e18eb 100644 (file)
@@ -136,7 +136,7 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
        printf("No of huge pages allocated = %d\n",
               (atoi(nr_hugepages)));
 
-       if (write(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages))
+       if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
            != strlen(initial_nr_hugepages)) {
                perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
                goto close_fd;
index 6ba7455298338988f1e4454b0fec3c41bb61bc89..6173adae9f083670f8dca4b37f8c856ae46874d6 100644 (file)
@@ -1,6 +1,6 @@
 all:
 
-all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder
+all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder noring
 
 CFLAGS += -Wall
 CFLAGS += -pthread -O2 -ggdb
@@ -15,11 +15,13 @@ ring: ring.o main.o
 virtio_ring_0_9: virtio_ring_0_9.o main.o
 virtio_ring_poll: virtio_ring_poll.o main.o
 virtio_ring_inorder: virtio_ring_inorder.o main.o
+noring: noring.o main.o
 clean:
        -rm main.o
        -rm ring.o ring
        -rm virtio_ring_0_9.o virtio_ring_0_9
        -rm virtio_ring_poll.o virtio_ring_poll
        -rm virtio_ring_inorder.o virtio_ring_inorder
+       -rm noring.o noring
 
 .PHONY: all clean
index 34e94c46104f0433bf709c2889890bc7cb2c0050..d83707a336c9272720b2f42b2b2cee804df07de8 100644 (file)
@@ -1,2 +1,6 @@
 Partial implementation of various ring layouts, useful to tune virtio design.
 Uses shared memory heavily.
+
+Typical use:
+
+# sh run-on-all.sh perf stat -r 10 --log-fd 1 -- ./ring
diff --git a/tools/virtio/ringtest/noring.c b/tools/virtio/ringtest/noring.c
new file mode 100644 (file)
index 0000000..eda2f48
--- /dev/null
@@ -0,0 +1,69 @@
+#define _GNU_SOURCE
+#include "main.h"
+#include <assert.h>
+
+/* stub implementation: useful for measuring overhead */
+void alloc_ring(void)
+{
+}
+
+/* guest side */
+int add_inbuf(unsigned len, void *buf, void *datap)
+{
+       return 0;
+}
+
+/*
+ * skb_array API provides no way for producer to find out whether a given
+ * buffer was consumed.  Our tests merely require that a successful get_buf
+ * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
+ * fake it accordingly.
+ */
+void *get_buf(unsigned *lenp, void **bufp)
+{
+       return "Buffer";
+}
+
+void poll_used(void)
+{
+}
+
+void disable_call()
+{
+       assert(0);
+}
+
+bool enable_call()
+{
+       assert(0);
+}
+
+void kick_available(void)
+{
+       assert(0);
+}
+
+/* host side */
+void disable_kick()
+{
+       assert(0);
+}
+
+bool enable_kick()
+{
+       assert(0);
+}
+
+void poll_avail(void)
+{
+}
+
+bool use_buf(unsigned *lenp, void **bufp)
+{
+       return true;
+}
+
+void call_used(void)
+{
+       assert(0);
+}
index 52b0f71ffa8da0e3e25d46e5337d8e2aba0737b4..2e69ca812b4cf4b39d653cd774286587fcc87bc7 100755 (executable)
@@ -3,10 +3,10 @@
 #use last CPU for host. Why not the first?
 #many devices tend to use cpu0 by default so
 #it tends to be busier
-HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1)
+HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
 
 #run command on all cpus
-for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n);
+for cpu in $(seq 0 $HOST_AFFINITY)
 do
        #Don't run guest and host on same CPU
        #It actually works ok if using signalling
index 1889163f2f05823888f62bb233b2c5e9510f4cbb..7cf6e1769903976f400b94d3c0c3a8734002b51a 100644 (file)
@@ -492,7 +492,7 @@ static void slab_stats(struct slabinfo *s)
                        s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass;
 
        if (total) {
-               printf("\nSlab Deactivation             Ocurrences  %%\n");
+               printf("\nSlab Deactivation             Occurrences %%\n");
                printf("-------------------------------------------------\n");
                printf("Slab full                     %7lu  %3lu%%\n",
                        s->deactivate_full, (s->deactivate_full * 100) / total);
index 02e98f3131bda11433fd0081752bcb9be350afb5..48bd520fc702c9e0aa812df6b93d564872f42848 100644 (file)
@@ -2941,7 +2941,7 @@ static long kvm_vm_ioctl(struct file *filp,
                if (copy_from_user(&routing, argp, sizeof(routing)))
                        goto out;
                r = -EINVAL;
-               if (routing.nr >= KVM_MAX_IRQ_ROUTES)
+               if (routing.nr > KVM_MAX_IRQ_ROUTES)
                        goto out;
                if (routing.flags)
                        goto out;