]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
authorJohn W. Linville <linville@tuxdriver.com>
Wed, 12 Jun 2013 14:57:04 +0000 (10:57 -0400)
committerJohn W. Linville <linville@tuxdriver.com>
Wed, 12 Jun 2013 14:57:04 +0000 (10:57 -0400)
1540 files changed:
Documentation/devicetree/bindings/mips/ralink.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/macb.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/devicetree/bindings/video/exynos_hdmi.txt [moved from Documentation/devicetree/bindings/drm/exynos/hdmi.txt with 100% similarity]
Documentation/devicetree/bindings/video/exynos_hdmiddc.txt [moved from Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt with 100% similarity]
Documentation/devicetree/bindings/video/exynos_hdmiphy.txt [moved from Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt with 100% similarity]
Documentation/devicetree/bindings/video/exynos_mixer.txt [moved from Documentation/devicetree/bindings/drm/exynos/mixer.txt with 100% similarity]
Documentation/devicetree/bindings/video/simple-framebuffer.txt [new file with mode: 0644]
Documentation/devicetree/usage-model.txt
Documentation/filesystems/xfs.txt
Documentation/kernel-parameters.txt
Documentation/kernel-per-CPU-kthreads.txt [new file with mode: 0644]
Documentation/power/devices.txt
Documentation/power/interface.txt
Documentation/power/notifiers.txt
Documentation/power/states.txt
Documentation/powerpc/transactional_memory.txt
Documentation/rapidio/rapidio.txt
Documentation/rapidio/sysfs.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/arc/Kconfig
arch/arc/boot/dts/abilis_tb100_dvk.dts
arch/arc/boot/dts/abilis_tb101_dvk.dts
arch/arc/boot/dts/abilis_tb10x.dtsi
arch/arc/include/asm/Kbuild
arch/arc/include/asm/cache.h
arch/arc/include/asm/cacheflush.h
arch/arc/include/asm/page.h
arch/arc/include/asm/pgtable.h
arch/arc/include/asm/shmparam.h [new file with mode: 0644]
arch/arc/include/asm/tlb.h
arch/arc/mm/Makefile
arch/arc/mm/cache_arc700.c
arch/arc/mm/mmap.c [new file with mode: 0644]
arch/arc/mm/tlb.c
arch/arc/mm/tlbex.S
arch/arc/plat-tb10x/Kconfig
arch/arc/plat-tb10x/tb10x.c
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/armada-370-xp.dtsi
arch/arm/boot/dts/armada-370.dtsi
arch/arm/boot/dts/armada-xp-gp.dts
arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
arch/arm/boot/dts/armada-xp.dtsi
arch/arm/boot/dts/at91sam9260.dtsi
arch/arm/boot/dts/at91sam9n12.dtsi
arch/arm/boot/dts/at91sam9x25ek.dts
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/sama5d3.dtsi
arch/arm/boot/dts/sama5d3xcm.dtsi
arch/arm/boot/dts/ste-nomadik-s8815.dts
arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
arch/arm/common/mcpm_platsmp.c
arch/arm/configs/exynos_defconfig
arch/arm/configs/omap1_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/tegra_defconfig
arch/arm/crypto/sha1-armv4-large.S
arch/arm/include/asm/cmpxchg.h
arch/arm/include/asm/tlb.h
arch/arm/include/debug/ux500.S
arch/arm/kernel/process.c
arch/arm/kernel/smp.c
arch/arm/kvm/arm.c
arch/arm/kvm/mmu.c
arch/arm/mach-at91/at91rm9200_time.c
arch/arm/mach-at91/at91sam9n12.c
arch/arm/mach-at91/include/mach/at91_pmc.h
arch/arm/mach-exynos/Kconfig
arch/arm/mach-exynos/common.c
arch/arm/mach-exynos/common.h
arch/arm/mach-exynos/include/mach/pm-core.h
arch/arm/mach-exynos/mach-universal_c210.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/headsmp.S
arch/arm/mach-imx/platsmp.c
arch/arm/mach-kirkwood/common.c
arch/arm/mach-kirkwood/ts219-setup.c
arch/arm/mach-mvebu/Kconfig
arch/arm/mach-mvebu/armada-370-xp.c
arch/arm/mach-omap1/dma.c
arch/arm/mach-omap2/cclock33xx_data.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_33xx_data.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/serial.c
arch/arm/mach-orion5x/common.c
arch/arm/mach-shmobile/board-marzen.c
arch/arm/mach-sunxi/Kconfig
arch/arm/mach-tegra/tegra2_emc.c
arch/arm/mach-ux500/Kconfig
arch/arm/mach-ux500/board-mop500.c
arch/arm/mach-ux500/cpu-db8500.c
arch/arm/mach-ux500/setup.h
arch/arm/mach-vt8500/vt8500.c
arch/arm/plat-orion/common.c
arch/arm/plat-orion/include/plat/common.h
arch/arm/plat-samsung/adc.c
arch/arm/plat-samsung/devs.c
arch/arm/vfp/entry.S
arch/arm/xen/enlighten.c
arch/arm64/Kconfig
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/arm64ksyms.c
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/early_printk.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/setup.c
arch/arm64/kernel/sys32.S
arch/arm64/kernel/traps.c
arch/arm64/mm/cache.S
arch/arm64/mm/fault.c
arch/arm64/mm/proc.S
arch/avr32/Kconfig
arch/avr32/include/asm/Kbuild
arch/avr32/include/asm/numnodes.h [deleted file]
arch/avr32/include/asm/param.h [deleted file]
arch/avr32/include/uapi/asm/Kbuild
arch/avr32/include/uapi/asm/param.h [deleted file]
arch/avr32/kernel/module.c
arch/blackfin/Makefile
arch/blackfin/boot/Makefile
arch/blackfin/include/asm/atomic.h
arch/blackfin/include/asm/bfin_sdh.h
arch/blackfin/include/asm/bitops.h
arch/blackfin/include/asm/def_LPBlackfin.h
arch/blackfin/include/asm/mem_init.h
arch/blackfin/kernel/cplb-nompu/cplbinit.c
arch/blackfin/kernel/cplb-nompu/cplbmgr.c
arch/blackfin/kernel/cplbinfo.c
arch/blackfin/kernel/setup.c
arch/blackfin/mach-bf537/boards/stamp.c
arch/blackfin/mach-bf538/boards/ezkit.c
arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h
arch/ia64/include/asm/tlb.h
arch/m68k/Kconfig.cpu
arch/m68k/Kconfig.machine
arch/m68k/Makefile
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/Kbuild
arch/m68k/include/asm/commproc.h
arch/m68k/include/asm/dbg.h [deleted file]
arch/m68k/include/asm/dma.h
arch/m68k/include/asm/futex.h [new file with mode: 0644]
arch/m68k/include/asm/m53xxacr.h
arch/m68k/include/asm/m53xxsim.h [moved from arch/m68k/include/asm/m532xsim.h with 99% similarity]
arch/m68k/include/asm/m54xxacr.h
arch/m68k/include/asm/mcfgpio.h
arch/m68k/include/asm/mcfsim.h
arch/m68k/include/asm/mcftimer.h
arch/m68k/kernel/head.S
arch/m68k/platform/coldfire/Makefile
arch/m68k/platform/coldfire/m53xx.c [moved from arch/m68k/platform/coldfire/m532x.c with 98% similarity]
arch/m68k/platform/coldfire/timers.c
arch/microblaze/configs/mmu_defconfig
arch/microblaze/include/asm/cacheflush.h
arch/microblaze/include/asm/futex.h
arch/microblaze/include/asm/io.h
arch/microblaze/include/asm/pci.h
arch/microblaze/include/asm/uaccess.h
arch/microblaze/kernel/cpu/cache.c
arch/microblaze/kernel/cpu/cpuinfo.c
arch/microblaze/kernel/head.S
arch/microblaze/kernel/intc.c
arch/microblaze/kernel/process.c
arch/microblaze/mm/init.c
arch/microblaze/pci/pci-common.c
arch/mips/Kbuild
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/alchemy/Kconfig
arch/mips/alchemy/Platform
arch/mips/alchemy/board-gpr.c
arch/mips/alchemy/common/time.c
arch/mips/ar7/memory.c
arch/mips/ath79/setup.c
arch/mips/bcm63xx/Kconfig
arch/mips/bcm63xx/boards/board_bcm963xx.c
arch/mips/bcm63xx/clk.c
arch/mips/bcm63xx/cpu.c
arch/mips/bcm63xx/dev-flash.c
arch/mips/bcm63xx/dev-spi.c
arch/mips/bcm63xx/irq.c
arch/mips/bcm63xx/prom.c
arch/mips/bcm63xx/reset.c
arch/mips/bcm63xx/setup.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/cobalt/reset.c
arch/mips/configs/db1000_defconfig
arch/mips/configs/db1235_defconfig
arch/mips/configs/lemote2f_defconfig
arch/mips/configs/malta_defconfig
arch/mips/configs/malta_kvm_defconfig [new file with mode: 0644]
arch/mips/configs/malta_kvm_guest_defconfig [new file with mode: 0644]
arch/mips/configs/maltaaprp_defconfig [new file with mode: 0644]
arch/mips/configs/maltasmtc_defconfig [new file with mode: 0644]
arch/mips/configs/maltasmvp_defconfig [new file with mode: 0644]
arch/mips/configs/maltaup_defconfig [new file with mode: 0644]
arch/mips/configs/sead3_defconfig
arch/mips/configs/sead3micro_defconfig [new file with mode: 0644]
arch/mips/fw/lib/Makefile
arch/mips/fw/lib/cmdline.c [new file with mode: 0644]
arch/mips/include/asm/asm.h
arch/mips/include/asm/bootinfo.h
arch/mips/include/asm/branch.h
arch/mips/include/asm/clock.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/dma-coherence.h [new file with mode: 0644]
arch/mips/include/asm/dma-mapping.h
arch/mips/include/asm/fpu_emulator.h
arch/mips/include/asm/fw/fw.h [new file with mode: 0644]
arch/mips/include/asm/gic.h
arch/mips/include/asm/hazards.h
arch/mips/include/asm/idle.h [new file with mode: 0644]
arch/mips/include/asm/inst.h
arch/mips/include/asm/io.h
arch/mips/include/asm/irqflags.h
arch/mips/include/asm/kvm_host.h [new file with mode: 0644]
arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h [deleted file]
arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
arch/mips/include/asm/mach-bcm63xx/ioremap.h
arch/mips/include/asm/mach-generic/dma-coherence.h
arch/mips/include/asm/mach-generic/spaces.h
arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
arch/mips/include/asm/mach-ralink/mt7620.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt288x.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt305x.h
arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt3883.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
arch/mips/include/asm/mips-boards/generic.h
arch/mips/include/asm/mips-boards/prom.h [deleted file]
arch/mips/include/asm/mips_machine.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/netlogic/haldefs.h
arch/mips/include/asm/netlogic/mips-extns.h
arch/mips/include/asm/netlogic/xlp-hal/pic.h
arch/mips/include/asm/netlogic/xlp-hal/usb.h [deleted file]
arch/mips/include/asm/page.h
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/processor.h
arch/mips/include/asm/prom.h
arch/mips/include/asm/sn/sn_private.h
arch/mips/include/asm/sn/types.h
arch/mips/include/asm/spinlock.h
arch/mips/include/asm/stackframe.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/time.h
arch/mips/include/asm/uaccess.h
arch/mips/include/asm/uasm.h
arch/mips/include/uapi/asm/inst.h
arch/mips/include/uapi/asm/kvm.h [new file with mode: 0644]
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/Makefile
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/binfmt_elfo32.c
arch/mips/kernel/branch.c
arch/mips/kernel/cevt-gic.c [new file with mode: 0644]
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/crash_dump.c
arch/mips/kernel/csrc-gic.c
arch/mips/kernel/genex.S
arch/mips/kernel/idle.c [new file with mode: 0644]
arch/mips/kernel/irq-gic.c
arch/mips/kernel/kprobes.c
arch/mips/kernel/linux32.c
arch/mips/kernel/mips_machine.c
arch/mips/kernel/proc.c
arch/mips/kernel/process.c
arch/mips/kernel/prom.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/setup.c
arch/mips/kernel/signal.c
arch/mips/kernel/smp-mt.c
arch/mips/kernel/smp.c
arch/mips/kernel/smtc-asm.S
arch/mips/kernel/smtc.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/kvm/00README.txt [new file with mode: 0644]
arch/mips/kvm/Kconfig [new file with mode: 0644]
arch/mips/kvm/Makefile [new file with mode: 0644]
arch/mips/kvm/kvm_cb.c [new file with mode: 0644]
arch/mips/kvm/kvm_locore.S [new file with mode: 0644]
arch/mips/kvm/kvm_mips.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_comm.h [new file with mode: 0644]
arch/mips/kvm/kvm_mips_commpage.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_dyntrans.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_emul.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_int.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_int.h [new file with mode: 0644]
arch/mips/kvm/kvm_mips_opcode.h [new file with mode: 0644]
arch/mips/kvm/kvm_mips_stats.c [new file with mode: 0644]
arch/mips/kvm/kvm_tlb.c [new file with mode: 0644]
arch/mips/kvm/kvm_trap_emul.c [new file with mode: 0644]
arch/mips/kvm/trace.h [new file with mode: 0644]
arch/mips/lantiq/xway/gptu.c
arch/mips/lib/bitops.c
arch/mips/lib/memset.S
arch/mips/lib/mips-atomic.c
arch/mips/lib/strlen_user.S
arch/mips/lib/strncpy_user.S
arch/mips/lib/strnlen_user.S
arch/mips/loongson/common/reset.c
arch/mips/loongson1/common/reset.c
arch/mips/math-emu/cp1emu.c
arch/mips/math-emu/dsemul.c
arch/mips/mm/Makefile
arch/mips/mm/c-r4k.c
arch/mips/mm/cache.c
arch/mips/mm/dma-default.c
arch/mips/mm/page.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mm/uasm-micromips.c [new file with mode: 0644]
arch/mips/mm/uasm-mips.c [new file with mode: 0644]
arch/mips/mm/uasm.c
arch/mips/mti-malta/Makefile
arch/mips/mti-malta/Platform
arch/mips/mti-malta/malta-cmdline.c [deleted file]
arch/mips/mti-malta/malta-display.c
arch/mips/mti-malta/malta-init.c
arch/mips/mti-malta/malta-int.c
arch/mips/mti-malta/malta-memory.c
arch/mips/mti-malta/malta-setup.c
arch/mips/mti-malta/malta-time.c
arch/mips/mti-sead3/Makefile
arch/mips/mti-sead3/leds-sead3.c
arch/mips/mti-sead3/sead3-cmdline.c [deleted file]
arch/mips/mti-sead3/sead3-console.c
arch/mips/mti-sead3/sead3-display.c
arch/mips/mti-sead3/sead3-init.c
arch/mips/mti-sead3/sead3-int.c
arch/mips/mti-sead3/sead3-setup.c
arch/mips/mti-sead3/sead3-time.c
arch/mips/netlogic/Kconfig
arch/mips/netlogic/common/smp.c
arch/mips/netlogic/dts/Makefile
arch/mips/netlogic/dts/xlp_evp.dts
arch/mips/netlogic/dts/xlp_svp.dts [new file with mode: 0644]
arch/mips/netlogic/xlp/nlm_hal.c
arch/mips/netlogic/xlp/setup.c
arch/mips/netlogic/xlp/usb-init.c
arch/mips/netlogic/xlr/setup.c
arch/mips/oprofile/op_model_mipsxx.c
arch/mips/pci/pci-ar71xx.c
arch/mips/pci/pci-ar724x.c
arch/mips/pci/pci-bcm63xx.c
arch/mips/pmcs-msp71xx/msp_prom.c
arch/mips/pmcs-msp71xx/msp_setup.c
arch/mips/powertv/init.c
arch/mips/powertv/init.h
arch/mips/powertv/memory.c
arch/mips/powertv/powertv_setup.c
arch/mips/ralink/Kconfig
arch/mips/ralink/Makefile
arch/mips/ralink/Platform
arch/mips/ralink/common.h
arch/mips/ralink/dts/Makefile
arch/mips/ralink/dts/mt7620a.dtsi [new file with mode: 0644]
arch/mips/ralink/dts/mt7620a_eval.dts [new file with mode: 0644]
arch/mips/ralink/dts/rt2880.dtsi [new file with mode: 0644]
arch/mips/ralink/dts/rt2880_eval.dts [new file with mode: 0644]
arch/mips/ralink/dts/rt3050.dtsi
arch/mips/ralink/dts/rt3052_eval.dts
arch/mips/ralink/dts/rt3883.dtsi [new file with mode: 0644]
arch/mips/ralink/dts/rt3883_eval.dts [new file with mode: 0644]
arch/mips/ralink/early_printk.c
arch/mips/ralink/irq.c
arch/mips/ralink/mt7620.c [new file with mode: 0644]
arch/mips/ralink/of.c
arch/mips/ralink/rt288x.c [new file with mode: 0644]
arch/mips/ralink/rt305x.c
arch/mips/ralink/rt3883.c [new file with mode: 0644]
arch/mips/sgi-ip27/ip27-klnuma.c
arch/mips/sgi-ip27/ip27-memory.c
arch/mips/sgi-ip27/ip27-timer.c
arch/mips/txx9/generic/setup.c
arch/mips/vr41xx/common/pmu.c
arch/mips/wrppmc/reset.c
arch/mn10300/include/asm/pci.h
arch/mn10300/kernel/entry.S
arch/mn10300/unit-asb2305/pci.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/include/asm/assembly.h
arch/parisc/include/asm/hardirq.h
arch/parisc/include/asm/mmzone.h
arch/parisc/include/asm/processor.h
arch/parisc/kernel/drivers.c
arch/parisc/kernel/entry.S
arch/parisc/kernel/hardware.c
arch/parisc/kernel/irq.c
arch/parisc/kernel/pacache.S
arch/parisc/kernel/setup.c
arch/parisc/kernel/sys_parisc32.c
arch/parisc/kernel/traps.c
arch/parisc/kernel/unaligned.c
arch/parisc/mm/init.c
arch/powerpc/Kconfig.debug
arch/powerpc/configs/ps3_defconfig
arch/powerpc/include/asm/context_tracking.h [new file with mode: 0644]
arch/powerpc/include/asm/firmware.h
arch/powerpc/include/asm/hvcall.h
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/include/asm/pgalloc-64.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/pte-hash64-64k.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/rtas.h
arch/powerpc/include/asm/signal.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/include/asm/tm.h
arch/powerpc/include/asm/udbg.h
arch/powerpc/include/uapi/asm/Kbuild
arch/powerpc/include/uapi/asm/tm.h [new file with mode: 0644]
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/cpu_setup_power.S
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/machine_kexec_64.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/pci_dn.c
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/kernel/signal.c
arch/powerpc/kernel/signal.h
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/sys_ppc32.c
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/udbg.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_pr_papr.c
arch/powerpc/kvm/book3s_xics.c
arch/powerpc/lib/copypage_power7.S
arch/powerpc/lib/copyuser_power7.S
arch/powerpc/mm/fault.c
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/init_64.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/powernv/Kconfig
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/powernv/pci.h
arch/powerpc/platforms/powernv/powernv.h
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/pseries/Kconfig
arch/powerpc/platforms/pseries/msi.c
arch/powerpc/platforms/pseries/suspend.c
arch/powerpc/platforms/wsp/ics.c
arch/powerpc/sysdev/Makefile
arch/powerpc/sysdev/ehv_pic.c
arch/powerpc/sysdev/mpic.c
arch/powerpc/sysdev/udbg_memcons.c [new file with mode: 0644]
arch/powerpc/sysdev/xics/ics-opal.c
arch/s390/Kconfig
arch/s390/appldata/appldata_base.c
arch/s390/include/asm/dma-mapping.h
arch/s390/include/asm/ftrace.h
arch/s390/include/asm/io.h
arch/s390/include/asm/page.h
arch/s390/include/asm/pgtable.h
arch/s390/kernel/compat_wrapper.S
arch/s390/kernel/dis.c
arch/s390/kernel/ftrace.c
arch/s390/kernel/mcount.S
arch/s390/kernel/mcount64.S
arch/s390/kernel/smp.c
arch/s390/kernel/syscalls.S
arch/s390/mm/pgtable.c
arch/score/mm/init.c
arch/sparc/kernel/sys32.S
arch/sparc/kernel/systbls_64.S
arch/unicore32/kernel/sys.c
arch/x86/Kconfig
arch/x86/crypto/crc32-pclmul_asm.S
arch/x86/crypto/sha256-avx-asm.S
arch/x86/crypto/sha256-ssse3-asm.S
arch/x86/ia32/sys_ia32.c
arch/x86/include/asm/inst.h
arch/x86/include/asm/sys_ia32.h
arch/x86/include/asm/syscalls.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/head64.c
arch/x86/kernel/head_64.S
arch/x86/kernel/i387.c
arch/x86/kernel/microcode_intel_early.c
arch/x86/kernel/process.c
arch/x86/kernel/vm86_32.c
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/init.c
arch/x86/pci/common.c
arch/x86/pci/mrst.c
arch/x86/pci/xen.c
arch/x86/platform/efi/efi.c
arch/x86/syscalls/syscall_32.tbl
arch/x86/xen/enlighten.c
arch/x86/xen/smp.c
arch/x86/xen/smp.h
arch/x86/xen/spinlock.c
drivers/acpi/Makefile
drivers/acpi/ac.c
drivers/acpi/acpi_lpss.c
drivers/acpi/apei/cper.c
drivers/acpi/apei/ghes.c
drivers/acpi/csrt.c [deleted file]
drivers/acpi/device_pm.c
drivers/acpi/ec.c
drivers/acpi/internal.h
drivers/acpi/pci_root.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_idle.c
drivers/acpi/scan.c
drivers/acpi/video.c
drivers/acpi/video_detect.c
drivers/ata/acard-ahci.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ata_piix.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata-sff.c
drivers/ata/pata_ep93xx.c
drivers/ata/pdc_adma.c
drivers/ata/sata_promise.c
drivers/ata/sata_rcar.c
drivers/ata/sata_sil.c
drivers/ata/sata_sx4.c
drivers/ata/sata_via.c
drivers/base/bus.c
drivers/base/core.c
drivers/base/power/common.c
drivers/block/brd.c
drivers/block/rbd.c
drivers/block/xsysace.c
drivers/char/hw_random/mxc-rnga.c
drivers/char/hw_random/omap-rng.c
drivers/char/ipmi/ipmi_bt_sm.c
drivers/char/ipmi/ipmi_devintf.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/lp.c
drivers/char/random.c
drivers/char/ttyprintk.c
drivers/clk/clk-si5351.c
drivers/clk/clk-vt8500.c
drivers/clk/mxs/clk-imx28.c
drivers/clk/samsung/clk-exynos4.c
drivers/clk/tegra/clk-tegra20.c
drivers/clk/ux500/clk-sysctrl.c
drivers/clk/ux500/u8500_clk.c
drivers/clk/x86/clk-lpt.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/arm_big_little.c
drivers/cpufreq/arm_big_little.h
drivers/cpufreq/arm_big_little_dt.c
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/crypto/caam/caamalg.c
drivers/crypto/nx/nx-aes-cbc.c
drivers/crypto/nx/nx-aes-ecb.c
drivers/crypto/nx/nx-aes-gcm.c
drivers/crypto/nx/nx-sha256.c
drivers/crypto/nx/nx-sha512.c
drivers/crypto/nx/nx.c
drivers/dma/acpi-dma.c
drivers/dma/tegra20-apb-dma.c
drivers/edac/amd64_edac_inj.c
drivers/firmware/efi/efivars.c
drivers/gpio/Kconfig
drivers/gpio/gpio-langwell.c
drivers/gpio/gpio-ml-ioh.c
drivers/gpio/gpio-mvebu.c
drivers/gpio/gpio-mxs.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-pch.c
drivers/gpio/gpio-sch.c
drivers/gpio/gpio-tegra.c
drivers/gpio/gpio-viperboard.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_encoder_slave.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_hdmi.c
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/qxl/Kconfig
drivers/gpu/drm/qxl/qxl_cmd.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/qxl/qxl_kms.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/r300_cmdbuf.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_family.h
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/host1x/drm/dc.c
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-steelseries.c
drivers/hv/channel_mgmt.c
drivers/hwmon/abituguru.c
drivers/hwmon/iio_hwmon.c
drivers/hwmon/nct6775.c
drivers/hwmon/tmp401.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-sirf.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-core.c
drivers/idle/intel_idle.c
drivers/iio/adc/exynos_adc.c
drivers/iio/buffer_cb.c
drivers/iio/common/st_sensors/st_sensors_core.c
drivers/iio/dac/Kconfig
drivers/iio/frequency/adf4350.c
drivers/iio/inkern.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/infiniband/ulp/srpt/ib_srpt.h
drivers/input/mouse/synaptics.c
drivers/input/tablet/wacom_wac.c
drivers/input/tablet/wacom_wac.h
drivers/input/touchscreen/egalax_ts.c
drivers/isdn/capi/kcapi.c
drivers/leds/leds-gpio.c
drivers/leds/leds-ot200.c
drivers/lguest/page_tables.c
drivers/md/dm-bufio.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-policy.h
drivers/md/dm-cache-target.c
drivers/md/dm-mpath.c
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-table.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin-metadata.h
drivers/md/dm-thin.c
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/persistent-data/dm-space-map-metadata.c
drivers/md/persistent-data/dm-space-map.h
drivers/media/pci/zoran/zoran.h
drivers/media/pci/zoran/zoran_driver.c
drivers/media/platform/omap/omap_vout.c
drivers/memory/emif.c
drivers/mfd/Kconfig
drivers/mfd/ab8500-core.c
drivers/mfd/ab8500-debugfs.c
drivers/mfd/ab8500-gpadc.c
drivers/mfd/ab8500-sysctrl.c
drivers/mfd/abx500-core.c
drivers/mfd/cros_ec_spi.c
drivers/mfd/db8500-prcmu.c
drivers/mfd/intel_msic.c
drivers/mfd/si476x-cmd.c
drivers/misc/atmel-ssc.c
drivers/misc/dummy-irq.c
drivers/misc/mei/bus.c
drivers/misc/mei/main.c
drivers/misc/vmw_vmci/Kconfig
drivers/misc/vmw_vmci/vmci_queue_pair.c
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/mmci.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-pci.c
drivers/mtd/nand/lpc32xx_mlc.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_3ad.h
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/caif/Kconfig
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.h
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/cadence/Kconfig
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/calxeda/Kconfig
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_hw.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/icplus/ipg.h
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_resources.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macvlan.c
drivers/net/ntb_netdev.c
drivers/net/phy/phy.c
drivers/net/team/team.c
drivers/net/team/team_mode_random.c
drivers/net/team/team_mode_roundrobin.c
drivers/net/tun.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/rtl8150.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/ntb/ntb_hw.c
drivers/ntb/ntb_transport.c
drivers/of/base.c
drivers/parisc/lba_pci.c
drivers/parisc/superio.c
drivers/parport/Kconfig
drivers/parport/parport_gsc.c
drivers/parport/parport_gsc.h
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/pcie/aer/aerdrv_errprint.c
drivers/pcmcia/m8xx_pcmcia.c
drivers/pinctrl/pinconf.c
drivers/pinctrl/pinctrl-abx500.c
drivers/pinctrl/pinctrl-coh901.c
drivers/pinctrl/pinctrl-exynos.c
drivers/pinctrl/pinctrl-exynos.h
drivers/pinctrl/pinctrl-exynos5440.c
drivers/pinctrl/pinctrl-lantiq.c
drivers/pinctrl/pinctrl-samsung.c
drivers/pinctrl/pinctrl-samsung.h
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-sunxi.c
drivers/pinctrl/pinctrl-xway.c
drivers/pinctrl/vt8500/pinctrl-wm8750.c
drivers/pinctrl/vt8500/pinctrl-wmt.c
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/dell-wmi-aio.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/hp_accel.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/pvpanic.c [new file with mode: 0644]
drivers/platform/x86/samsung-q10.c
drivers/platform/x86/sony-laptop.c
drivers/power/Kconfig
drivers/power/pm2301_charger.c
drivers/power/wm831x_backup.c
drivers/ptp/ptp_pch.c
drivers/pwm/pwm-imx.c
drivers/pwm/pwm-puv3.c
drivers/pwm/pwm-pxa.c
drivers/pwm/pwm-tegra.c
drivers/pwm/pwm-tiecap.c
drivers/pwm/pwm-tiehrpwm.c
drivers/pwm/pwm-tipwmss.c
drivers/pwm/pwm-vt8500.c
drivers/rapidio/Kconfig
drivers/rapidio/Makefile
drivers/rapidio/devices/tsi721.c
drivers/rapidio/rio-driver.c
drivers/rapidio/rio-scan.c
drivers/rapidio/rio-sysfs.c
drivers/rapidio/rio.c
drivers/rapidio/rio.h
drivers/regulator/core.c
drivers/regulator/dbx500-prcmu.c
drivers/regulator/palmas-regulator.c
drivers/rtc/Kconfig
drivers/rtc/rtc-max8998.c
drivers/rtc/rtc-nuc900.c
drivers/rtc/rtc-omap.c
drivers/rtc/rtc-pl031.c
drivers/rtc/rtc-s3c.c
drivers/rtc/rtc-tegra.c
drivers/s390/block/dasd.c
drivers/s390/block/xpram.c
drivers/s390/cio/chp.c
drivers/s390/cio/chsc.h
drivers/scsi/Kconfig
drivers/scsi/aic94xx/aic94xx_dev.c
drivers/scsi/aic94xx/aic94xx_hwi.c
drivers/scsi/aic94xx/aic94xx_tmf.c
drivers/scsi/be2iscsi/be.h
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_iscsi.h
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_main.h
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/be2iscsi/be_mgmt.h
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_els.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/bnx2fc/bnx2fc_tgt.c
drivers/scsi/csiostor/csio_lnode.h
drivers/scsi/csiostor/csio_rnode.h
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_fcs.c
drivers/scsi/fnic/fnic_fip.h [new file with mode: 0644]
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/vnic_dev.c
drivers/scsi/fnic/vnic_dev.h
drivers/scsi/fnic/vnic_devcmd.h
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/isci/remote_device.c
drivers/scsi/isci/remote_device.h
drivers/scsi/isci/request.c
drivers/scsi/isci/task.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/libsas/sas_internal.h
drivers/scsi/libsas/sas_port.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_logmsg.h
drivers/scsi/lpfc/lpfc_mbox.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/lpfc/lpfc_vport.c
drivers/scsi/lpfc/lpfc_vport.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/mvsas/mv_sas.h
drivers/scsi/pm8001/Makefile
drivers/scsi/pm8001/pm8001_ctl.c
drivers/scsi/pm8001/pm8001_defs.h
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_hwi.h
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/pm8001/pm8001_sas.h
drivers/scsi/pm8001/pm80xx_hwi.c [new file with mode: 0644]
drivers/scsi/pm8001/pm80xx_hwi.h [new file with mode: 0644]
drivers/scsi/qla2xxx/Kconfig
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/qla4xxx/ql4_iocb.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_pm.c
drivers/scsi/scsi_proc.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/sd_dif.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/Makefile
drivers/scsi/ufs/ufshcd-pltfrm.c [new file with mode: 0644]
drivers/scsi/ufs/ufshcd.c
drivers/spi/spi-atmel.c
drivers/spi/spi-davinci.c
drivers/spi/spi-tegra20-sflash.c
drivers/spi/spi.c
drivers/staging/Kconfig
drivers/staging/android/alarm-dev.c
drivers/staging/android/logger.c
drivers/staging/android/logger.h
drivers/staging/comedi/Kconfig
drivers/staging/comedi/comedi_buf.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/comedi/drivers/ni_labpc.c
drivers/staging/comedi/drivers/ni_labpc.h
drivers/staging/comedi/drivers/ni_mio_common.c
drivers/staging/dwc2/Kconfig
drivers/staging/dwc2/hcd.c
drivers/staging/dwc2/hcd_intr.c
drivers/staging/dwc2/platform.c
drivers/staging/gdm72xx/Kconfig
drivers/staging/iio/adc/mxs-lradc.c
drivers/staging/iio/light/tsl2x7x_core.c
drivers/staging/imx-drm/Kconfig
drivers/staging/imx-drm/imx-tve.c
drivers/staging/imx-drm/ipuv3-crtc.c
drivers/staging/media/solo6x10/Kconfig
drivers/staging/nvec/nvec.c
drivers/staging/nvec/nvec.h
drivers/staging/nvec/nvec_kbd.c
drivers/staging/nvec/nvec_power.c
drivers/staging/nvec/nvec_ps2.c
drivers/staging/sep/Kconfig
drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
drivers/staging/vt6656/hostap.c
drivers/staging/vt6656/iwctl.c
drivers/staging/zcache/ramster.h
drivers/staging/zcache/ramster/debug.c
drivers/staging/zcache/ramster/ramster-howto.txt [new file with mode: 0644]
drivers/staging/zcache/ramster/ramster.c
drivers/staging/zcache/zcache-main.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_parameters.h
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_internal.h
drivers/target/target_core_rd.c
drivers/target/target_core_rd.h
drivers/target/target_core_transport.c
drivers/thermal/armada_thermal.c
drivers/thermal/dove_thermal.c
drivers/thermal/exynos_thermal.c
drivers/tty/ehv_bytechan.c
drivers/tty/mxser.c
drivers/tty/n_tty.c
drivers/tty/rocket.c
drivers/tty/serial/68328serial.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/serial/imx.c
drivers/tty/serial/mcf.c
drivers/tty/serial/mpc52xx_uart.c
drivers/tty/serial/nwpserial.c
drivers/tty/serial/omap-serial.c
drivers/tty/serial/samsung.c
drivers/tty/tty_audit.c
drivers/tty/vt/vt.c
drivers/tty/vt/vt_ioctl.c
drivers/uio/Kconfig
drivers/usb/atm/cxacru.c
drivers/usb/chipidea/Kconfig
drivers/usb/chipidea/ci13xxx_imx.c
drivers/usb/chipidea/core.c
drivers/usb/core/Kconfig
drivers/usb/core/devio.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/Kconfig
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/atmel_usba_udc.c
drivers/usb/gadget/bcm63xx_udc.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/dummy_hcd.c
drivers/usb/gadget/f_ecm.c
drivers/usb/gadget/f_subset.c
drivers/usb/gadget/f_uac2.c
drivers/usb/gadget/fusb300_udc.c
drivers/usb/gadget/imx_udc.c
drivers/usb/gadget/m66592-udc.c
drivers/usb/gadget/pxa25x_udc.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/gadget/s3c2410_udc.c
drivers/usb/gadget/zero.c
drivers/usb/host/Kconfig
drivers/usb/host/ehci-atmel.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/ehci-orion.c
drivers/usb/host/ehci-s5p.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci-spear.c
drivers/usb/host/ehci-tegra.c
drivers/usb/host/isp1760-hcd.c
drivers/usb/host/isp1760-if.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/ohci-exynos.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-nxp.c
drivers/usb/host/ohci-omap3.c
drivers/usb/host/ohci-pxa27x.c
drivers/usb/host/ohci-spear.c
drivers/usb/host/oxu210hp-hcd.c
drivers/usb/host/sl811-hcd.c
drivers/usb/host/uhci-hub.c
drivers/usb/host/uhci-platform.c
drivers/usb/host/uhci-q.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/musb_host.c
drivers/usb/musb/musb_host.h
drivers/usb/musb/omap2430.c
drivers/usb/phy/Kconfig
drivers/usb/phy/phy-ab8500-usb.c
drivers/usb/phy/phy-fsl-usb.c
drivers/usb/phy/phy-gpio-vbus-usb.c
drivers/usb/phy/phy-isp1301.c
drivers/usb/phy/phy-mv-u3d-usb.c
drivers/usb/phy/phy-mv-usb.c
drivers/usb/phy/phy-mxs-usb.c
drivers/usb/phy/phy-nop.c
drivers/usb/phy/phy-samsung-usb2.c
drivers/usb/phy/phy-samsung-usb3.c
drivers/usb/serial/ark3116.c
drivers/usb/serial/cypress_m8.c
drivers/usb/serial/cypress_m8.h
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/generic.c
drivers/usb/serial/io_ti.c
drivers/usb/serial/iuu_phoenix.c
drivers/usb/serial/keyspan.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/usb-serial.c
drivers/usb/serial/visor.c
drivers/usb/serial/whiteheat.c
drivers/usb/serial/zte_ev.c
drivers/usb/storage/realtek_cr.c
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/vhost/vringh.c
drivers/video/Kconfig
drivers/video/Makefile
drivers/video/atmel_lcdfb.c
drivers/video/au1100fb.c
drivers/video/console/Makefile
drivers/video/omap2/dss/core.c
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/omapfb/omapfb-main.c
drivers/video/omap2/vrfb.c
drivers/video/ps3fb.c
drivers/video/simplefb.c [new file with mode: 0644]
drivers/w1/masters/omap_hdq.c
drivers/watchdog/ath79_wdt.c
drivers/watchdog/davinci_wdt.c
drivers/watchdog/imx2_wdt.c
drivers/xen/Kconfig
drivers/xen/balloon.c
drivers/xen/events.c
drivers/xen/privcmd.c
drivers/xen/tmem.c
drivers/xen/xen-pciback/pci_stub.c
drivers/xen/xen-selfballoon.c
drivers/xen/xenbus/xenbus_client.c
drivers/xen/xenbus/xenbus_comms.h
drivers/xen/xenbus/xenbus_dev_backend.c
drivers/xen/xenbus/xenbus_probe.c
drivers/xen/xenbus/xenbus_probe.h
drivers/xen/xenbus/xenbus_probe_frontend.c
fs/aio.c
fs/befs/linuxvfs.c
fs/btrfs/backref.c
fs/btrfs/check-integrity.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-ref.h
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/free-space-cache.c
fs/btrfs/free-space-cache.h
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/raid56.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/cifs/cifs_dfs_ref.c
fs/cifs/cifsfs.c
fs/cifs/connect.c
fs/cifs/dns_resolve.c
fs/cifs/inode.c
fs/ecryptfs/crypto.c
fs/ecryptfs/ecryptfs_kernel.h
fs/efivarfs/file.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/extents_status.c
fs/ext4/extents_status.h
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/page-io.c
fs/fat/inode.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/inode.c
fs/gfs2/Kconfig
fs/gfs2/bmap.c
fs/gfs2/dir.c
fs/gfs2/file.c
fs/gfs2/inode.c
fs/gfs2/lops.c
fs/gfs2/quota.c
fs/gfs2/rgrp.c
fs/gfs2/super.c
fs/hfs/bnode.c
fs/hpfs/dir.c
fs/jfs/jfs_logmgr.c
fs/jfs/super.c
fs/namei.c
fs/nfs/callback_proc.c
fs/nfs/callback_xdr.c
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4session.c
fs/nfs/nfs4session.h
fs/nfs/nfs4state.c
fs/nfs/super.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4recover.c
fs/nilfs2/inode.c
fs/notify/fanotify/fanotify_user.c
fs/ocfs2/extent_map.c
fs/ocfs2/file.c
fs/pnode.c
fs/qnx6/dir.c
fs/reiserfs/dir.c
fs/reiserfs/inode.c
fs/reiserfs/xattr.c
fs/reiserfs/xattr_acl.c
fs/romfs/mmap-nommu.c
fs/xfs/xfs_acl.c
fs/xfs/xfs_acl.h
fs/xfs/xfs_aops.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_attr_remote.c
fs/xfs/xfs_attr_remote.h
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_da_btree.c
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_dir2_format.h
fs/xfs/xfs_dir2_leaf.c
fs/xfs/xfs_dir2_node.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_extfree_item.c
fs/xfs/xfs_fs.h
fs/xfs/xfs_fsops.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_quota.h
fs/xfs/xfs_super.c
fs/xfs/xfs_symlink.c
fs/xfs/xfs_vnodeops.c
include/acpi/acpi_bus.h
include/acpi/acpiosxf.h
include/acpi/processor.h
include/asm-generic/io.h
include/asm-generic/tlb.h
include/drm/drmP.h
include/drm/drm_fb_helper.h
include/drm/drm_os_linux.h
include/drm/drm_pciids.h
include/linux/acpi_dma.h
include/linux/aer.h
include/linux/audit.h
include/linux/brcmphy.h
include/linux/cgroup.h
include/linux/compat.h
include/linux/cpuidle.h
include/linux/device-mapper.h
include/linux/filter.h
include/linux/ftrace.h
include/linux/ftrace_event.h
include/linux/hid.h
include/linux/if_team.h
include/linux/journal-head.h
include/linux/kernel.h
include/linux/kref.h
include/linux/list.h
include/linux/mfd/abx500/ab8500.h
include/linux/mlx4/qp.h
include/linux/netdevice.h
include/linux/netfilter_ipv6.h
include/linux/of_platform.h
include/linux/pci-acpi.h
include/linux/pci_ids.h
include/linux/pinctrl/pinconf-generic.h
include/linux/platform_data/clk-lpss.h
include/linux/platform_data/serial-omap.h
include/linux/printk.h
include/linux/rculist_nulls.h
include/linux/rio.h
include/linux/rio_drv.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/socket.h
include/linux/spi/spi.h
include/linux/time.h
include/linux/tty.h
include/linux/uio.h
include/linux/usb/gadget.h
include/linux/usb/serial.h
include/linux/vt_kern.h
include/linux/wait.h
include/net/addrconf.h
include/net/netfilter/nf_log.h
include/net/netfilter/nfnetlink_log.h
include/net/sch_generic.h
include/net/sock.h
include/net/xfrm.h
include/scsi/libsas.h
include/scsi/osd_protocol.h
include/scsi/sas.h
include/scsi/sas_ata.h
include/scsi/scsi_device.h
include/scsi/scsi_transport_iscsi.h
include/scsi/scsi_transport_sas.h
include/sound/tlv.h
include/target/target_core_base.h
include/target/target_core_fabric.h
include/trace/events/ext4.h
include/uapi/linux/audit.h
include/uapi/linux/virtio_console.h
include/video/omapdss.h
include/xen/xenbus.h
ipc/sem.c
kernel/audit.c
kernel/audit.h
kernel/auditfilter.c
kernel/auditsc.c
kernel/cgroup.c
kernel/cpu/idle.c
kernel/events/core.c
kernel/kmod.c
kernel/module.c
kernel/params.c
kernel/range.c
kernel/rcutree_plugin.h
kernel/sys_ni.c
kernel/sysctl_binary.c
kernel/time/Kconfig
kernel/time/tick-broadcast.c
kernel/time/tick-sched.c
kernel/timer.c
kernel/trace/Kconfig
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_kprobe.c
kernel/workqueue.c
lib/Makefile
lib/iovec.c [new file with mode: 0644]
lib/klist.c
lib/mpi/longlong.h
mm/huge_memory.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/migrate.c
mm/mmu_notifier.c
mm/page_alloc.c
mm/pagewalk.c
net/802/mrp.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/main.c
net/batman-adv/network-coding.c
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/bridge/netfilter/ebt_log.c
net/bridge/netfilter/ebt_ulog.c
net/ceph/osd_client.c
net/compat.c
net/core/dev_addr_lists.c
net/core/filter.c
net/core/iovec.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/netfilter.c
net/ipv6/proc.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/udp_impl.h
net/ipv6/udp_offload.c
net/ipv6/udplite.c
net/ipv6/xfrm6_policy.c
net/irda/irlap_frame.c
net/key/af_key.c
net/netfilter/core.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_sh.c
net/netfilter/nf_log.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/xt_LOG.c
net/netfilter/xt_NFLOG.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TCPOPTSTRIP.c
net/netfilter/xt_addrtype.c
net/netlabel/netlabel_domainhash.c
net/netlink/af_netlink.c
net/sched/act_police.c
net/sched/sch_api.c
net/sched/sch_generic.c
net/sched/sch_htb.c
net/sched/sch_tbf.c
net/sctp/socket.c
net/socket.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/gss_rpc_xdr.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/netns.h
net/sunrpc/rpc_pipe.c
net/sunrpc/sched.c
net/sunrpc/svcauth_unix.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
scripts/Makefile.lib
scripts/config
scripts/kconfig/lxdialog/menubox.c
scripts/kconfig/mconf.c
scripts/kconfig/menu.c
scripts/package/Makefile
security/selinux/xfrm.c
sound/aoa/fabrics/layout.c
sound/aoa/soundbus/i2sbus/core.c
sound/atmel/abdac.c
sound/atmel/ac97c.c
sound/mips/hal2.c
sound/mips/sgio2audio.c
sound/oss/Kconfig
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/ab8500-codec.h
sound/soc/codecs/cs42l52.c
sound/soc/codecs/cs42l52.h
sound/soc/codecs/da7213.c
sound/soc/codecs/max98090.c
sound/soc/codecs/wm0010.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm8994.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/fsl/imx-ssi.c
sound/soc/kirkwood/kirkwood-i2s.c
sound/soc/soc-compress.c
sound/soc/soc-dapm.c
sound/usb/6fire/firmware.c
sound/usb/proc.c
tools/perf/scripts/python/net_dropmonitor.py
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/Makefile
tools/testing/selftests/soft-dirty/Makefile [deleted file]
tools/testing/selftests/soft-dirty/soft-dirty.c [deleted file]
virt/kvm/kvm_main.c

diff --git a/Documentation/devicetree/bindings/mips/ralink.txt b/Documentation/devicetree/bindings/mips/ralink.txt
new file mode 100644 (file)
index 0000000..b35a8d0
--- /dev/null
@@ -0,0 +1,17 @@
+Ralink MIPS SoC device tree bindings
+
+1. SoCs
+
+Each device tree must specify a compatible value for the Ralink SoC
+it uses in the compatible property of the root node. The compatible
+value must be one of the following values:
+
+  ralink,rt2880-soc
+  ralink,rt3050-soc
+  ralink,rt3052-soc
+  ralink,rt3350-soc
+  ralink,rt3352-soc
+  ralink,rt3883-soc
+  ralink,rt5350-soc
+  ralink,mt7620a-soc
+  ralink,mt7620n-soc
index 44afa0e5057d1580698fa81462925d87a9896856..4ff65047bb9a8c63e3e647331fe2f32e1551db56 100644 (file)
@@ -4,7 +4,7 @@ Required properties:
 - compatible: Should be "cdns,[<chip>-]{macb|gem}"
   Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs.
   Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb".
-  Use "cnds,pc302-gem" for Picochip picoXcell pc302 and later devices based on
+  Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
   the Cadence GEM, or the generic form: "cdns,gem".
 - reg: Address and length of the register set for the device
 - interrupts: Should contain macb interrupt
index 4d1919bf23322ac2209eb3046bfce404c5ced483..6931c4348d240ed9f8bf6b21a0d75f9c520edf1d 100644 (file)
@@ -42,6 +42,7 @@ onnn  ON Semiconductor Corp.
 picochip       Picochip Ltd
 powervr        PowerVR (deprecated, use img)
 qcom   Qualcomm, Inc.
+ralink Mediatek/Ralink Technology Corp.
 ramtron        Ramtron International
 realtek Realtek Semiconductor Corp.
 renesas        Renesas Electronics Corporation
diff --git a/Documentation/devicetree/bindings/video/simple-framebuffer.txt b/Documentation/devicetree/bindings/video/simple-framebuffer.txt
new file mode 100644 (file)
index 0000000..3ea4605
--- /dev/null
@@ -0,0 +1,25 @@
+Simple Framebuffer
+
+A simple frame-buffer describes a raw memory region that may be rendered to,
+with the assumption that the display hardware has already been set up to scan
+out from that buffer.
+
+Required properties:
+- compatible: "simple-framebuffer"
+- reg: Should contain the location and size of the framebuffer memory.
+- width: The width of the framebuffer in pixels.
+- height: The height of the framebuffer in pixels.
+- stride: The number of bytes in each line of the framebuffer.
+- format: The format of the framebuffer surface. Valid values are:
+  - r5g6b5 (16-bit pixels, d[15:11]=r, d[10:5]=g, d[4:0]=b).
+
+Example:
+
+       framebuffer {
+               compatible = "simple-framebuffer";
+               reg = <0x1d385000 (1600 * 1200 * 2)>;
+               width = <1600>;
+               height = <1200>;
+               stride = <(1600 * 2)>;
+               format = "r5g6b5";
+       };
index ef9d06c9f8fde9341c3dc1926f83524fb7a47396..0efedaad5165d74a1ef8273fe1ff9491c096d97a 100644 (file)
@@ -191,9 +191,11 @@ Linux it will look something like this:
        };
 
 The bootargs property contains the kernel arguments, and the initrd-*
-properties define the address and size of an initrd blob.  The
-chosen node may also optionally contain an arbitrary number of
-additional properties for platform-specific configuration data.
+properties define the address and size of an initrd blob.  Note that
+initrd-end is the first address after the initrd image, so this doesn't
+match the usual semantic of struct resource.  The chosen node may also
+optionally contain an arbitrary number of additional properties for
+platform-specific configuration data.
 
 During early boot, the architecture setup code calls of_scan_flat_dt()
 several times with different helper callbacks to parse device tree
index 3e4b3dd1e046a76b02c45f91c8779ae82e402d1f..83577f0232a0676e479bc6280ce898517a746ec8 100644 (file)
@@ -33,6 +33,9 @@ When mounting an XFS filesystem, the following options are accepted.
        removing extended attributes) the on-disk superblock feature
        bit field will be updated to reflect this format being in use.
 
+       CRC enabled filesystems always use the attr2 format, and so
+       will reject the noattr2 mount option if it is set.
+
   barrier
        Enables the use of block layer write barriers for writes into
        the journal and unwritten extent conversion.  This allows for
index c3bfacb92910dc8d044bec88122c4a85328ed8d5..6e3b18a8afc6a8749d3d91fd559e2bd5c62cadfc 100644 (file)
@@ -3005,6 +3005,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Force threading of all interrupt handlers except those
                        marked explicitly IRQF_NO_THREAD.
 
+       tmem            [KNL,XEN]
+                       Enable the Transcendent memory driver if built-in.
+
+       tmem.cleancache=0|1 [KNL, XEN]
+                       Default is on (1). Disable the usage of the cleancache
+                       API to send anonymous pages to the hypervisor.
+
+       tmem.frontswap=0|1 [KNL, XEN]
+                       Default is on (1). Disable the usage of the frontswap
+                       API to send swap pages to the hypervisor. If disabled
+                       the selfballooning and selfshrinking are force disabled.
+
+       tmem.selfballooning=0|1 [KNL, XEN]
+                       Default is on (1). Disable the driving of swap pages
+                       to the hypervisor.
+
+       tmem.selfshrinking=0|1 [KNL, XEN]
+                       Default is on (1). Partial swapoff that immediately
+                       transfers pages from Xen hypervisor back to the
+                       kernel based on different criteria.
+
        topology=       [S390]
                        Format: {off | on}
                        Specify if the kernel should make use of the cpu
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt
new file mode 100644 (file)
index 0000000..cbf7ae4
--- /dev/null
@@ -0,0 +1,202 @@
+REDUCING OS JITTER DUE TO PER-CPU KTHREADS
+
+This document lists per-CPU kthreads in the Linux kernel and presents
+options to control their OS jitter.  Note that non-per-CPU kthreads are
+not listed here.  To reduce OS jitter from non-per-CPU kthreads, bind
+them to a "housekeeping" CPU dedicated to such work.
+
+
+REFERENCES
+
+o      Documentation/IRQ-affinity.txt:  Binding interrupts to sets of CPUs.
+
+o      Documentation/cgroups:  Using cgroups to bind tasks to sets of CPUs.
+
+o      man taskset:  Using the taskset command to bind tasks to sets
+       of CPUs.
+
+o      man sched_setaffinity:  Using the sched_setaffinity() system
+       call to bind tasks to sets of CPUs.
+
+o      /sys/devices/system/cpu/cpuN/online:  Control CPU N's hotplug state,
+       writing "0" to offline and "1" to online.
+
+o      In order to locate kernel-generated OS jitter on CPU N:
+
+               cd /sys/kernel/debug/tracing
+               echo 1 > max_graph_depth # Increase the "1" for more detail
+               echo function_graph > current_tracer
+               # run workload
+               cat per_cpu/cpuN/trace
+
+
+KTHREADS
+
+Name: ehca_comp/%u
+Purpose: Periodically process Infiniband-related work.
+To reduce its OS jitter, do any of the following:
+1.     Don't use eHCA Infiniband hardware, instead choosing hardware
+       that does not require per-CPU kthreads.  This will prevent these
+       kthreads from being created in the first place.  (This will
+       work for most people, as this hardware, though important, is
+       relatively old and is produced in relatively low unit volumes.)
+2.     Do all eHCA-Infiniband-related work on other CPUs, including
+       interrupts.
+3.     Rework the eHCA driver so that its per-CPU kthreads are
+       provisioned only on selected CPUs.
+
+
+Name: irq/%d-%s
+Purpose: Handle threaded interrupts.
+To reduce its OS jitter, do the following:
+1.     Use irq affinity to force the irq threads to execute on
+       some other CPU.
+
+Name: kcmtpd_ctr_%d
+Purpose: Handle Bluetooth work.
+To reduce its OS jitter, do one of the following:
+1.     Don't use Bluetooth, in which case these kthreads won't be
+       created in the first place.
+2.     Use irq affinity to force Bluetooth-related interrupts to
+       occur on some other CPU and furthermore initiate all
+       Bluetooth activity on some other CPU.
+
+Name: ksoftirqd/%u
+Purpose: Execute softirq handlers when threaded or when under heavy load.
+To reduce its OS jitter, each softirq vector must be handled
+separately as follows:
+TIMER_SOFTIRQ:  Do all of the following:
+1.     To the extent possible, keep the CPU out of the kernel when it
+       is non-idle, for example, by avoiding system calls and by forcing
+       both kernel threads and interrupts to execute elsewhere.
+2.     Build with CONFIG_HOTPLUG_CPU=y.  After boot completes, force
+       the CPU offline, then bring it back online.  This forces
+       recurring timers to migrate elsewhere.  If you are concerned
+       with multiple CPUs, force them all offline before bringing the
+       first one back online.  Once you have onlined the CPUs in question,
+       do not offline any other CPUs, because doing so could force the
+       timer back onto one of the CPUs in question.
+NET_TX_SOFTIRQ and NET_RX_SOFTIRQ:  Do all of the following:
+1.     Force networking interrupts onto other CPUs.
+2.     Initiate any network I/O on other CPUs.
+3.     Once your application has started, prevent CPU-hotplug operations
+       from being initiated from tasks that might run on the CPU to
+       be de-jittered.  (It is OK to force this CPU offline and then
+       bring it back online before you start your application.)
+BLOCK_SOFTIRQ:  Do all of the following:
+1.     Force block-device interrupts onto some other CPU.
+2.     Initiate any block I/O on other CPUs.
+3.     Once your application has started, prevent CPU-hotplug operations
+       from being initiated from tasks that might run on the CPU to
+       be de-jittered.  (It is OK to force this CPU offline and then
+       bring it back online before you start your application.)
+BLOCK_IOPOLL_SOFTIRQ:  Do all of the following:
+1.     Force block-device interrupts onto some other CPU.
+2.     Initiate any block I/O and block-I/O polling on other CPUs.
+3.     Once your application has started, prevent CPU-hotplug operations
+       from being initiated from tasks that might run on the CPU to
+       be de-jittered.  (It is OK to force this CPU offline and then
+       bring it back online before you start your application.)
+TASKLET_SOFTIRQ: Do one or more of the following:
+1.     Avoid use of drivers that use tasklets.  (Such drivers will contain
+       calls to things like tasklet_schedule().)
+2.     Convert all drivers that you must use from tasklets to workqueues.
+3.     Force interrupts for drivers using tasklets onto other CPUs,
+       and also do I/O involving these drivers on other CPUs.
+SCHED_SOFTIRQ: Do all of the following:
+1.     Avoid sending scheduler IPIs to the CPU to be de-jittered,
+       for example, ensure that at most one runnable kthread is present
+       on that CPU.  If a thread that expects to run on the de-jittered
+       CPU awakens, the scheduler will send an IPI that can result in
+       a subsequent SCHED_SOFTIRQ.
+2.     Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
+       CONFIG_NO_HZ_FULL=y, and, in addition, ensure that the CPU
+       to be de-jittered is marked as an adaptive-ticks CPU using the
+       "nohz_full=" boot parameter.  This reduces the number of
+       scheduler-clock interrupts that the de-jittered CPU receives,
+       minimizing its chances of being selected to do the load balancing
+       work that runs in SCHED_SOFTIRQ context.
+3.     To the extent possible, keep the CPU out of the kernel when it
+       is non-idle, for example, by avoiding system calls and by
+       forcing both kernel threads and interrupts to execute elsewhere.
+       This further reduces the number of scheduler-clock interrupts
+       received by the de-jittered CPU.
+HRTIMER_SOFTIRQ:  Do all of the following:
+1.     To the extent possible, keep the CPU out of the kernel when it
+       is non-idle.  For example, avoid system calls and force both
+       kernel threads and interrupts to execute elsewhere.
+2.     Build with CONFIG_HOTPLUG_CPU=y.  Once boot completes, force the
+       CPU offline, then bring it back online.  This forces recurring
+       timers to migrate elsewhere.  If you are concerned with multiple
+       CPUs, force them all offline before bringing the first one
+       back online.  Once you have onlined the CPUs in question, do not
+       offline any other CPUs, because doing so could force the timer
+       back onto one of the CPUs in question.
+RCU_SOFTIRQ:  Do at least one of the following:
+1.     Offload callbacks and keep the CPU in either dyntick-idle or
+       adaptive-ticks state by doing all of the following:
+       a.      Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
+               CONFIG_NO_HZ_FULL=y, and, in addition ensure that the CPU
+               to be de-jittered is marked as an adaptive-ticks CPU using
+               the "nohz_full=" boot parameter.  Bind the rcuo kthreads
+               to housekeeping CPUs, which can tolerate OS jitter.
+       b.      To the extent possible, keep the CPU out of the kernel
+               when it is non-idle, for example, by avoiding system
+               calls and by forcing both kernel threads and interrupts
+               to execute elsewhere.
+2.     Enable RCU to do its processing remotely via dyntick-idle by
+       doing all of the following:
+       a.      Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y.
+       b.      Ensure that the CPU goes idle frequently, allowing other
+               CPUs to detect that it has passed through an RCU quiescent
+               state.  If the kernel is built with CONFIG_NO_HZ_FULL=y,
+               userspace execution also allows other CPUs to detect that
+               the CPU in question has passed through a quiescent state.
+       c.      To the extent possible, keep the CPU out of the kernel
+               when it is non-idle, for example, by avoiding system
+               calls and by forcing both kernel threads and interrupts
+               to execute elsewhere.
+
+Name: rcuc/%u
+Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels.
+To reduce its OS jitter, do at least one of the following:
+1.     Build the kernel with CONFIG_PREEMPT=n.  This prevents these
+       kthreads from being created in the first place, and also obviates
+       the need for RCU priority boosting.  This approach is feasible
+       for workloads that do not require high degrees of responsiveness.
+2.     Build the kernel with CONFIG_RCU_BOOST=n.  This prevents these
+       kthreads from being created in the first place.  This approach
+       is feasible only if your workload never requires RCU priority
+       boosting, for example, if you ensure frequent idle time on all
+       CPUs that might execute within the kernel.
+3.     Build with CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_ALL=y,
+       which offloads all RCU callbacks to kthreads that can be moved
+       off of CPUs susceptible to OS jitter.  This approach prevents the
+       rcuc/%u kthreads from having any work to do, so that they are
+       never awakened.
+4.     Ensure that the CPU never enters the kernel, and, in particular,
+       avoid initiating any CPU hotplug operations on this CPU.  This is
+       another way of preventing any callbacks from being queued on the
+       CPU, again preventing the rcuc/%u kthreads from having any work
+       to do.
+
+Name: rcuob/%d, rcuop/%d, and rcuos/%d
+Purpose: Offload RCU callbacks from the corresponding CPU.
+To reduce its OS jitter, do at least one of the following:
+1.     Use affinity, cgroups, or other mechanism to force these kthreads
+       to execute on some other CPU.
+2.     Build with CONFIG_RCU_NOCB_CPUS=n, which will prevent these
+       kthreads from being created in the first place.  However, please
+       note that this will not eliminate OS jitter, but will instead
+       shift it to RCU_SOFTIRQ.
+
+Name: watchdog/%u
+Purpose: Detect software lockups on each CPU.
+To reduce its OS jitter, do at least one of the following:
+1.     Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these
+       kthreads from being created in the first place.
+2.     Echo a zero to /proc/sys/kernel/watchdog to disable the
+       watchdog timer.
+3.     Echo a large number of /proc/sys/kernel/watchdog_thresh in
+       order to reduce the frequency of OS jitter due to the watchdog
+       timer down to a level that is acceptable for your workload.
index 504dfe4d52eba3541cfa6330d52595d37265a0b7..a66c9821b5cefa5056f2fb202a9acb00a207f8af 100644 (file)
@@ -268,7 +268,7 @@ situations.
 System Power Management Phases
 ------------------------------
 Suspending or resuming the system is done in several phases.  Different phases
-are used for standby or memory sleep states ("suspend-to-RAM") and the
+are used for freeze, standby, and memory sleep states ("suspend-to-RAM") and the
 hibernation state ("suspend-to-disk").  Each phase involves executing callbacks
 for every device before the next phase begins.  Not all busses or classes
 support all these callbacks and not all drivers use all the callbacks.  The
@@ -309,7 +309,8 @@ execute the corresponding method from dev->driver->pm instead if there is one.
 
 Entering System Suspend
 -----------------------
-When the system goes into the standby or memory sleep state, the phases are:
+When the system goes into the freeze, standby or memory sleep state,
+the phases are:
 
                prepare, suspend, suspend_late, suspend_noirq.
 
@@ -368,7 +369,7 @@ the devices that were suspended.
 
 Leaving System Suspend
 ----------------------
-When resuming from standby or memory sleep, the phases are:
+When resuming from freeze, standby or memory sleep, the phases are:
 
                resume_noirq, resume_early, resume, complete.
 
@@ -433,8 +434,8 @@ the system log.
 
 Entering Hibernation
 --------------------
-Hibernating the system is more complicated than putting it into the standby or
-memory sleep state, because it involves creating and saving a system image.
+Hibernating the system is more complicated than putting it into the other
+sleep states, because it involves creating and saving a system image.
 Therefore there are more phases for hibernation, with a different set of
 callbacks.  These phases always run after tasks have been frozen and memory has
 been freed.
@@ -485,8 +486,8 @@ image forms an atomic snapshot of the system state.
 
 At this point the system image is saved, and the devices then need to be
 prepared for the upcoming system shutdown.  This is much like suspending them
-before putting the system into the standby or memory sleep state, and the phases
-are similar.
+before putting the system into the freeze, standby or memory sleep state,
+and the phases are similar.
 
     9. The prepare phase is discussed above.
 
index c537834af00566e0fee8058a9643803d2c2b3954..f1f0f59a7c47d594a9753207d713d0834a0e99b1 100644 (file)
@@ -7,8 +7,8 @@ running. The interface exists in /sys/power/ directory (assuming sysfs
 is mounted at /sys). 
 
 /sys/power/state controls system power state. Reading from this file
-returns what states are supported, which is hard-coded to 'standby'
-(Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
+returns what states are supported, which is hard-coded to 'freeze',
+'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
 (Suspend-to-Disk). 
 
 Writing to this file one of those strings causes the system to
index c2a4a346c0d98597d3375c5a9f160c012fded709..a81fa254303de0c57ef6e7741dd76ffb0bcfdf02 100644 (file)
@@ -15,8 +15,10 @@ A suspend/hibernation notifier may be used for this purpose.
 The subsystems or drivers having such needs can register suspend notifiers that
 will be called upon the following events by the PM core:
 
-PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will
-                       be frozen immediately.
+PM_HIBERNATION_PREPARE The system is going to hibernate, tasks will be frozen
+                       immediately. This is different from PM_SUSPEND_PREPARE
+                       below because here we do additional work between notifiers
+                       and drivers freezing.
 
 PM_POST_HIBERNATION    The system memory state has been restored from a
                        hibernation image or an error occurred during
index 4416b28630df8d62cbba43e2fedb0cee42d6b3b1..442d43df9b251111df2d677b5e8cc3ef4488c831 100644 (file)
@@ -2,12 +2,26 @@
 System Power Management States
 
 
-The kernel supports three power management states generically, though
-each is dependent on platform support code to implement the low-level
-details for each state. This file describes each state, what they are
+The kernel supports four power management states generically, though
+one is generic and the other three are dependent on platform support
+code to implement the low-level details for each state.
+This file describes each state, what they are
 commonly called, what ACPI state they map to, and what string to write
 to /sys/power/state to enter that state
 
+state:         Freeze / Low-Power Idle
+ACPI state:    S0
+String:                "freeze"
+
+This state is a generic, pure software, light-weight, low-power state.
+It allows more energy to be saved relative to idle by freezing user
+space and putting all I/O devices into low-power states (possibly
+lower-power than available at run time), such that the processors can
+spend more time in their idle states.
+This state can be used for platforms without Standby/Suspend-to-RAM
+support, or it can be used in addition to Suspend-to-RAM (memory sleep)
+to provide reduced resume latency.
+
 
 State:         Standby / Power-On Suspend
 ACPI State:    S1
@@ -22,9 +36,6 @@ We try to put devices in a low-power state equivalent to D1, which
 also offers low power savings, but low resume latency. Not all devices
 support D1, and those that don't are left on. 
 
-A transition from Standby to the On state should take about 1-2
-seconds. 
-
 
 State:         Suspend-to-RAM
 ACPI State:    S3
@@ -42,9 +53,6 @@ transition back to the On state.
 For at least ACPI, STR requires some minimal boot-strapping code to
 resume the system from STR. This may be true on other platforms. 
 
-A transition from Suspend-to-RAM to the On state should take about
-3-5 seconds. 
-
 
 State:         Suspend-to-disk
 ACPI State:    S4
@@ -74,7 +82,3 @@ low-power state (like ACPI S4), or it may simply power down. Powering
 down offers greater savings, and allows this mechanism to work on any
 system. However, entering a real low-power state allows the user to
 trigger wake up events (e.g. pressing a key or opening a laptop lid).
-
-A transition from Suspend-to-Disk to the On state should take about 30
-seconds, though it's typically a bit more with the current
-implementation. 
index c907be41d60f9bbae7e93eddfc4d80b46dcc2386..dc23e58ae2641a0ae876d2a1840530ae7e4ec59d 100644 (file)
@@ -147,6 +147,25 @@ Example signal handler:
       fix_the_problem(ucp->dar);
     }
 
+When in an active transaction that takes a signal, we need to be careful with
+the stack.  It's possible that the stack has moved back up after the tbegin.
+The obvious case here is when the tbegin is called inside a function that
+returns before a tend.  In this case, the stack is part of the checkpointed
+transactional memory state.  If we write over this non transactionally or in
+suspend, we are in trouble because if we get a tm abort, the program counter and
+stack pointer will be back at the tbegin but our in memory stack won't be valid
+anymore.
+
+To avoid this, when taking a signal in an active transaction, we need to use
+the stack pointer from the checkpointed state, rather than the speculated
+state.  This ensures that the signal context (written tm suspended) will be
+written below the stack required for the rollback.  The transaction is aborted
+becuase of the treclaim, so any memory written between the tbegin and the
+signal will be rolled back anyway.
+
+For signals taken in non-TM or suspended mode, we use the
+normal/non-checkpointed stack pointer.
+
 
 Failure cause codes used by kernel
 ==================================
@@ -155,14 +174,18 @@ These are defined in <asm/reg.h>, and distinguish different reasons why the
 kernel aborted a transaction:
 
  TM_CAUSE_RESCHED       Thread was rescheduled.
+ TM_CAUSE_TLBI          Software TLB invalide.
  TM_CAUSE_FAC_UNAV      FP/VEC/VSX unavailable trap.
  TM_CAUSE_SYSCALL       Currently unused; future syscalls that must abort
                         transactions for consistency will use this.
  TM_CAUSE_SIGNAL        Signal delivered.
  TM_CAUSE_MISC          Currently unused.
+ TM_CAUSE_ALIGNMENT     Alignment fault.
+ TM_CAUSE_EMULATE       Emulation that touched memory.
 
-These can be checked by the user program's abort handler as TEXASR[0:7].
-
+These can be checked by the user program's abort handler as TEXASR[0:7].  If
+bit 7 is set, it indicates that the error is consider persistent.  For example
+a TM_CAUSE_ALIGNMENT will be persistent while a TM_CAUSE_RESCHED will not.q
 
 GDB
 ===
index c75694b35d08b7f6f70290a1b715658f4d6c7156..a9c16c979da215d35d774e8e739848fe535acdd1 100644 (file)
@@ -79,20 +79,63 @@ master port that is used to communicate with devices within the network.
 In order to initialize the RapidIO subsystem, a platform must initialize and
 register at least one master port within the RapidIO network. To register mport
 within the subsystem controller driver initialization code calls function
-rio_register_mport() for each available master port. After all active master
-ports are registered with a RapidIO subsystem, the rio_init_mports() routine
-is called to perform enumeration and discovery.
+rio_register_mport() for each available master port.
 
-In the current PowerPC-based implementation a subsys_initcall() is specified to
-perform controller initialization and mport registration. At the end it directly
-calls rio_init_mports() to execute RapidIO enumeration and discovery.
+RapidIO subsystem uses subsys_initcall() or device_initcall() to perform
+controller initialization (depending on controller device type).
+
+After all active master ports are registered with a RapidIO subsystem,
+an enumeration and/or discovery routine may be called automatically or
+by user-space command.
 
 4. Enumeration and Discovery
 ----------------------------
 
-When rio_init_mports() is called it scans a list of registered master ports and
-calls an enumeration or discovery routine depending on the configured role of a
-master port: host or agent.
+4.1 Overview
+------------
+
+RapidIO subsystem configuration options allow users to specify enumeration and
+discovery methods as statically linked components or loadable modules.
+An enumeration/discovery method implementation and available input parameters
+define how any given method can be attached to available RapidIO mports:
+simply to all available mports OR individually to the specified mport device.
+
+Depending on selected enumeration/discovery build configuration, there are
+several methods to initiate an enumeration and/or discovery process:
+
+  (a) Statically linked enumeration and discovery process can be started
+  automatically during kernel initialization time using corresponding module
+  parameters. This was the original method used since introduction of RapidIO
+  subsystem. Now this method relies on enumerator module parameter which is
+  'rio-scan.scan' for existing basic enumeration/discovery method.
+  When automatic start of enumeration/discovery is used a user has to ensure
+  that all discovering endpoints are started before the enumerating endpoint
+  and are waiting for enumeration to be completed.
+  Configuration option CONFIG_RAPIDIO_DISC_TIMEOUT defines time that discovering
+  endpoint waits for enumeration to be completed. If the specified timeout
+  expires the discovery process is terminated without obtaining RapidIO network
+  information. NOTE: a timed out discovery process may be restarted later using
+  a user-space command as it is described later if the given endpoint was
+  enumerated successfully.
+
+  (b) Statically linked enumeration and discovery process can be started by
+  a command from user space. This initiation method provides more flexibility
+  for a system startup compared to the option (a) above. After all participating
+  endpoints have been successfully booted, an enumeration process shall be
+  started first by issuing a user-space command, after an enumeration is
+  completed a discovery process can be started on all remaining endpoints.
+
+  (c) Modular enumeration and discovery process can be started by a command from
+  user space. After an enumeration/discovery module is loaded, a network scan
+  process can be started by issuing a user-space command.
+  Similar to the option (b) above, an enumerator has to be started first.
+
+  (d) Modular enumeration and discovery process can be started by a module
+  initialization routine. In this case an enumerating module shall be loaded
+  first.
+
+When a network scan process is started it calls an enumeration or discovery
+routine depending on the configured role of a master port: host or agent.
 
 Enumeration is performed by a master port if it is configured as a host port by
 assigning a host device ID greater than or equal to zero. A host device ID is
@@ -104,8 +147,58 @@ for it.
 The enumeration and discovery routines use RapidIO maintenance transactions
 to access the configuration space of devices.
 
-The enumeration process is implemented according to the enumeration algorithm
-outlined in the RapidIO Interconnect Specification: Annex I [1].
+4.2 Automatic Start of Enumeration and Discovery
+------------------------------------------------
+
+Automatic enumeration/discovery start method is applicable only to built-in
+enumeration/discovery RapidIO configuration selection. To enable automatic
+enumeration/discovery start by existing basic enumerator method set use boot
+command line parameter "rio-scan.scan=1".
+
+This configuration requires synchronized start of all RapidIO endpoints that
+form a network which will be enumerated/discovered. Discovering endpoints have
+to be started before an enumeration starts to ensure that all RapidIO
+controllers have been initialized and are ready to be discovered. Configuration
+parameter CONFIG_RAPIDIO_DISC_TIMEOUT defines time (in seconds) which
+a discovering endpoint will wait for enumeration to be completed.
+
+When automatic enumeration/discovery start is selected, basic method's
+initialization routine calls rio_init_mports() to perform enumeration or
+discovery for all known mport devices.
+
+Depending on RapidIO network size and configuration this automatic
+enumeration/discovery start method may be difficult to use due to the
+requirement for synchronized start of all endpoints.
+
+4.3 User-space Start of Enumeration and Discovery
+-------------------------------------------------
+
+User-space start of enumeration and discovery can be used with built-in and
+modular build configurations. For user-space controlled start RapidIO subsystem
+creates the sysfs write-only attribute file '/sys/bus/rapidio/scan'. To initiate
+an enumeration or discovery process on specific mport device, a user needs to
+write mport_ID (not RapidIO destination ID) into that file. The mport_ID is a
+sequential number (0 ... RIO_MAX_MPORTS) assigned during mport device
+registration. For example for machine with single RapidIO controller, mport_ID
+for that controller always will be 0.
+
+To initiate RapidIO enumeration/discovery on all available mports a user may
+write '-1' (or RIO_MPORT_ANY) into the scan attribute file.
+
+4.4 Basic Enumeration Method
+----------------------------
+
+This is an original enumeration/discovery method which is available since
+first release of RapidIO subsystem code. The enumeration process is
+implemented according to the enumeration algorithm outlined in the RapidIO
+Interconnect Specification: Annex I [1].
+
+This method can be configured as statically linked or loadable module.
+The method's single parameter "scan" allows to trigger the enumeration/discovery
+process from module initialization routine.
+
+This enumeration/discovery method can be started only once and does not support
+unloading if it is built as a module.
 
 The enumeration process traverses the network using a recursive depth-first
 algorithm. When a new device is found, the enumerator takes ownership of that
@@ -160,6 +253,19 @@ time period. If this wait time period expires before enumeration is completed,
 an agent skips RapidIO discovery and continues with remaining kernel
 initialization.
 
+4.5 Adding New Enumeration/Discovery Method
+-------------------------------------------
+
+RapidIO subsystem code organization allows addition of new enumeration/discovery
+methods as new configuration options without significant impact to to the core
+RapidIO code.
+
+A new enumeration/discovery method has to be attached to one or more mport
+devices before an enumeration/discovery process can be started. Normally,
+method's module initialization routine calls rio_register_scan() to attach
+an enumerator to a specified mport device (or devices). The basic enumerator
+implementation demonstrates this process.
+
 5. References
 -------------
 
index 97f71ce575d65c08652788c95746d47d975e02d0..19878179da4c78657967868fc36ce2abe35aae94 100644 (file)
@@ -88,3 +88,20 @@ that exports additional attributes.
 
 IDT_GEN2:
  errlog - reads contents of device error log until it is empty.
+
+
+5. RapidIO Bus Attributes
+-------------------------
+
+RapidIO bus subdirectory /sys/bus/rapidio implements the following bus-specific
+attribute:
+
+  scan - allows to trigger enumeration discovery process from user space. This
+        is a write-only attribute. To initiate an enumeration or discovery
+        process on specific mport device, a user needs to write mport_ID (not
+        RapidIO destination ID) into this file. The mport_ID is a sequential
+        number (0 ... RIO_MAX_MPORTS) assigned to the mport device.
+        For example, for a machine with a single RapidIO controller, mport_ID
+        for that controller always will be 0.
+        To initiate RapidIO enumeration/discovery on all available mports
+        a user must write '-1' (or RIO_MPORT_ANY) into this attribute file.
index 3d7782b9f90d80d171c97e1d15371a9e2ec51ddc..f35a259a6564a4bc31a7dabbafd8f3f64e9c6a03 100644 (file)
@@ -3322,11 +3322,12 @@ F:      drivers/net/wan/dlci.c
 F:     drivers/net/wan/sdla.c
 
 FRAMEBUFFER LAYER
-M:     Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+M:     Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
+M:     Tomi Valkeinen <tomi.valkeinen@ti.com>
 L:     linux-fbdev@vger.kernel.org
 W:     http://linux-fbdev.sourceforge.net/
 Q:     http://patchwork.kernel.org/project/linux-fbdev/list/
-T:     git git://github.com/schandinat/linux-2.6.git fbdev-next
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/plagnioj/linux-fbdev.git
 S:     Maintained
 F:     Documentation/fb/
 F:     Documentation/devicetree/bindings/fb/
@@ -3865,9 +3866,16 @@ M:       K. Y. Srinivasan <kys@microsoft.com>
 M:     Haiyang Zhang <haiyangz@microsoft.com>
 L:     devel@linuxdriverproject.org
 S:     Maintained
-F:     drivers/hv/
+F:     arch/x86/include/asm/mshyperv.h
+F:     arch/x86/include/uapi/asm/hyperv.h
+F:     arch/x86/kernel/cpu/mshyperv.c
 F:     drivers/hid/hid-hyperv.c
+F:     drivers/hv/
 F:     drivers/net/hyperv/
+F:     drivers/scsi/storvsc_drv.c
+F:     drivers/video/hyperv_fb.c
+F:     include/linux/hyperv.h
+F:     tools/hv/
 
 I2C OVER PARALLEL PORT
 M:     Jean Delvare <khali@linux-fr.org>
@@ -4641,12 +4649,13 @@ F:      include/linux/sunrpc/
 F:     include/uapi/linux/sunrpc/
 
 KERNEL VIRTUAL MACHINE (KVM)
-M:     Marcelo Tosatti <mtosatti@redhat.com>
 M:     Gleb Natapov <gleb@redhat.com>
+M:     Paolo Bonzini <pbonzini@redhat.com>
 L:     kvm@vger.kernel.org
-W:     http://kvm.qumranet.com
+W:     http://linux-kvm.org
 S:     Supported
-F:     Documentation/*/kvm.txt
+F:     Documentation/*/kvm*.txt
+F:     Documentation/virtual/kvm/
 F:     arch/*/kvm/
 F:     arch/*/include/asm/kvm*
 F:     include/linux/kvm*
@@ -4976,6 +4985,13 @@ S:       Maintained
 F:     Documentation/hwmon/lm90
 F:     drivers/hwmon/lm90.c
 
+LM95234 HARDWARE MONITOR DRIVER
+M:     Guenter Roeck <linux@roeck-us.net>
+L:     lm-sensors@lm-sensors.org
+S:     Maintained
+F:     Documentation/hwmon/lm95234
+F:     drivers/hwmon/lm95234.c
+
 LME2510 MEDIA DRIVER
 M:     Malcolm Priestley <tvboxspy@gmail.com>
 L:     linux-media@vger.kernel.org
@@ -5509,18 +5525,18 @@ F:      Documentation/networking/s2io.txt
 F:     Documentation/networking/vxge.txt
 F:     drivers/net/ethernet/neterion/
 
-NETFILTER/IPTABLES/IPCHAINS
-P:     Harald Welte
-P:     Jozsef Kadlecsik
+NETFILTER/IPTABLES
 M:     Pablo Neira Ayuso <pablo@netfilter.org>
 M:     Patrick McHardy <kaber@trash.net>
+M:     Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
 L:     netfilter-devel@vger.kernel.org
 L:     netfilter@vger.kernel.org
 L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
-T:     git git://1984.lsi.us.es/nf
-T:     git git://1984.lsi.us.es/nf-next
+Q:     http://patchwork.ozlabs.org/project/netfilter-devel/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
 S:     Supported
 F:     include/linux/netfilter*
 F:     include/linux/netfilter/
@@ -6069,9 +6085,18 @@ L:       linux-parisc@vger.kernel.org
 W:     http://www.parisc-linux.org/
 Q:     http://patchwork.kernel.org/project/linux-parisc/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
 S:     Maintained
 F:     arch/parisc/
+F:     Documentation/parisc/
 F:     drivers/parisc/
+F:     drivers/char/agp/parisc-agp.c
+F:     drivers/input/serio/gscps2.c
+F:     drivers/parport/parport_gsc.*
+F:     drivers/tty/serial/8250/8250_gsc.c
+F:     drivers/video/sti*
+F:     drivers/video/console/sti*
+F:     drivers/video/logo/logo_parisc*
 
 PC87360 HARDWARE MONITORING DRIVER
 M:     Jim Cromie <jim.cromie@gmail.com>
@@ -7854,7 +7879,7 @@ L:        linux-scsi@vger.kernel.org
 L:     target-devel@vger.kernel.org
 L:     http://groups.google.com/group/linux-iscsi-target-dev
 W:     http://www.linux-iscsi.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core.git master
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
 S:     Supported
 F:     drivers/target/
 F:     include/target/
@@ -8182,6 +8207,13 @@ F:       drivers/mmc/host/sh_mobile_sdhi.c
 F:     include/linux/mmc/tmio.h
 F:     include/linux/mmc/sh_mobile_sdhi.h
 
+TMP401 HARDWARE MONITOR DRIVER
+M:     Guenter Roeck <linux@roeck-us.net>
+L:     lm-sensors@lm-sensors.org
+S:     Maintained
+F:     Documentation/hwmon/tmp401
+F:     drivers/hwmon/tmp401.c
+
 TMPFS (SHMEM FILESYSTEM)
 M:     Hugh Dickins <hughd@google.com>
 L:     linux-mm@kvack.org
index a3a834b11a97ef7d9236a56e7b5e7ca78e9799b8..49aa84b9d2fb8702d055dd09c321f7f560ea10a8 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
-PATCHLEVEL = 9
+PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc4
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
index dd0e8eb8042f746cc7072c0bf67d1fac8c0d4a7e..a4429bcd609ec8a3761e064d200d0646c3cae5f1 100644 (file)
@@ -213,6 +213,9 @@ config USE_GENERIC_SMP_HELPERS
 config GENERIC_SMP_IDLE_THREAD
        bool
 
+config GENERIC_IDLE_POLL_SETUP
+       bool
+
 # Select if arch init_task initializer is different to init/init_task.c
 config ARCH_INIT_TASK
        bool
index 491ae7923b10dd7ee8e156b53ee5bdb16edc1c30..5917099470eaf74d259051b4f7451683cc52901a 100644 (file)
@@ -182,6 +182,10 @@ config ARC_CACHE_PAGES
          Note that Global I/D ENABLE + Per Page DISABLE works but corollary
          Global DISABLE + Per Page ENABLE won't work
 
+config ARC_CACHE_VIPT_ALIASING
+       bool "Support VIPT Aliasing D$"
+       default n
+
 endif  #ARC_CACHE
 
 config ARC_HAS_ICCM
index c0fd3623c39387d5f2fbdc1da3552f08ff13e4da..0fa0d4abe79557cf8338c92ddf2a58a3e9d9d61c 100644 (file)
@@ -37,7 +37,7 @@ memory {
 
        soc100 {
                uart@FF100000 {
-                       pinctrl-names = "abilis,simple-default";
+                       pinctrl-names = "default";
                        pinctrl-0 = <&pctl_uart0>;
                };
                ethernet@FE100000 {
index 6f8c381f62685102891817da925d6b6b8aed9827..a4d80ce283aec1e7df6ca447fadc2a31b72f8b2c 100644 (file)
@@ -37,7 +37,7 @@ memory {
 
        soc100 {
                uart@FF100000 {
-                       pinctrl-names = "abilis,simple-default";
+                       pinctrl-names = "default";
                        pinctrl-0 = <&pctl_uart0>;
                };
                ethernet@FE100000 {
index a6139fc5aaa3e2f6faefbc7fdd5e369611697145..b97e3051ba4bfcd43db2bd66c00df1815a87090b 100644 (file)
@@ -88,8 +88,7 @@ tb10x_ictl: pic@fe002000 {
                };
 
                uart@FF100000 {
-                       compatible = "snps,dw-apb-uart",
-                                       "abilis,simple-pinctrl";
+                       compatible = "snps,dw-apb-uart";
                        reg = <0xFF100000 0x100>;
                        clock-frequency = <166666666>;
                        interrupts = <25 1>;
@@ -184,8 +183,7 @@ spi1: spi@0xFE011000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        cell-index = <1>;
-                       compatible = "abilis,tb100-spi",
-                                       "abilis,simple-pinctrl";
+                       compatible = "abilis,tb100-spi";
                        num-cs = <2>;
                        reg = <0xFE011000 0x20>;
                        interrupt-parent = <&tb10x_ictl>;
index 48af742f8b5a7f5d5078447a639730263b70771a..d8dd660898b9b214927c903e1198e36026513984 100644 (file)
@@ -32,7 +32,6 @@ generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sembuf.h
 generic-y += shmbuf.h
-generic-y += shmparam.h
 generic-y += siginfo.h
 generic-y += socket.h
 generic-y += sockios.h
index 6632273861fd205ce2bf25ed2d48b1e26f8f78ed..d5555fe4742a3f696c7c8ef72d6eae757b51469e 100644 (file)
@@ -55,9 +55,6 @@
        : "r"(data), "r"(ptr));         \
 })
 
-/* used to give SHMLBA a value to avoid Cache Aliasing */
-extern unsigned int ARC_shmlba;
-
 #define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
 
 /*
index ee1f6eae82d2f6afa9a85b78a19d06602e94f862..ef62682e8d9567670fc435be9bc5ddfddb5fa06b 100644 (file)
@@ -19,6 +19,7 @@
 #define _ASM_CACHEFLUSH_H
 
 #include <linux/mm.h>
+#include <asm/shmparam.h>
 
 /*
  * Semantically we need this because icache doesn't snoop dcache/dma.
@@ -33,7 +34,9 @@ void flush_cache_all(void);
 void flush_icache_range(unsigned long start, unsigned long end);
 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
 void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
-void __flush_dcache_page(unsigned long paddr);
+void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr);
+#define __flush_dcache_page(p, v)      \
+               ___flush_dcache_page((unsigned long)p, (unsigned long)v)
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 
@@ -50,18 +53,57 @@ void dma_cache_wback(unsigned long start, unsigned long sz);
 #define flush_cache_vmap(start, end)           flush_cache_all()
 #define flush_cache_vunmap(start, end)         flush_cache_all()
 
-/*
- * VM callbacks when entire/range of user-space V-P mappings are
- * torn-down/get-invalidated
- *
- * Currently we don't support D$ aliasing configs for our VIPT caches
- * NOPS for VIPT Cache with non-aliasing D$ configurations only
- */
-#define flush_cache_dup_mm(mm)                 /* called on fork */
+#define flush_cache_dup_mm(mm)                 /* called on fork (VIVT only) */
+
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+
 #define flush_cache_mm(mm)                     /* called on munmap/exit */
 #define flush_cache_range(mm, u_vstart, u_vend)
 #define flush_cache_page(vma, u_vaddr, pfn)    /* PF handling/COW-break */
 
+#else  /* VIPT aliasing dcache */
+
+/* To clear out stale userspace mappings */
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma,
+       unsigned long start,unsigned long end);
+void flush_cache_page(struct vm_area_struct *vma,
+       unsigned long user_addr, unsigned long page);
+
+/*
+ * To make sure that userspace mapping is flushed to memory before
+ * get_user_pages() uses a kernel mapping to access the page
+ */
+#define ARCH_HAS_FLUSH_ANON_PAGE
+void flush_anon_page(struct vm_area_struct *vma,
+       struct page *page, unsigned long u_vaddr);
+
+#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
+
+/*
+ * Simple wrapper over config option
+ * Bootup code ensures that hardware matches kernel configuration
+ */
+static inline int cache_is_vipt_aliasing(void)
+{
+#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
+       return 1;
+#else
+       return 0;
+#endif
+}
+
+#define CACHE_COLOR(addr)      (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
+
+/*
+ * checks if two addresses (after page aligning) index into same cache set
+ */
+#define addr_not_cache_congruent(addr1, addr2)                         \
+({                                                                     \
+       cache_is_vipt_aliasing() ?                                      \
+               (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0;         \
+})
+
 #define copy_to_user_page(vma, page, vaddr, dst, src, len)             \
 do {                                                                   \
        memcpy(dst, src, len);                                          \
index bdf54610455134d1af10a2adad2952f1928b788c..ab84bf131fe135a2070c4a22bc5c177020787759 100644 (file)
 #define get_user_page(vaddr)           __get_free_page(GFP_KERNEL)
 #define free_user_page(page, addr)     free_page(addr)
 
-/* TBD: for now don't worry about VIPT D$ aliasing */
 #define clear_page(paddr)              memset((paddr), 0, PAGE_SIZE)
 #define copy_page(to, from)            memcpy((to), (from), PAGE_SIZE)
 
-#define clear_user_page(addr, vaddr, pg)       clear_page(addr)
-#define copy_user_page(vto, vfrom, vaddr, pg)  copy_page(vto, vfrom)
+struct vm_area_struct;
+struct page;
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+void copy_user_highpage(struct page *to, struct page *from,
+                       unsigned long u_vaddr, struct vm_area_struct *vma);
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
 
 #undef STRICT_MM_TYPECHECKS
 
index b7e36684c091d1f236d678007e998c5783b964bf..95b1522212a73fce42cd1a7a5c18546c61232346 100644 (file)
@@ -57,9 +57,9 @@
 
 #define _PAGE_ACCESSED      (1<<1)     /* Page is accessed (S) */
 #define _PAGE_CACHEABLE     (1<<2)     /* Page is cached (H) */
-#define _PAGE_EXECUTE       (1<<3)     /* Page has user execute perm (H) */
-#define _PAGE_WRITE         (1<<4)     /* Page has user write perm (H) */
-#define _PAGE_READ          (1<<5)     /* Page has user read perm (H) */
+#define _PAGE_U_EXECUTE     (1<<3)     /* Page has user execute perm (H) */
+#define _PAGE_U_WRITE       (1<<4)     /* Page has user write perm (H) */
+#define _PAGE_U_READ        (1<<5)     /* Page has user read perm (H) */
 #define _PAGE_K_EXECUTE     (1<<6)     /* Page has kernel execute perm (H) */
 #define _PAGE_K_WRITE       (1<<7)     /* Page has kernel write perm (H) */
 #define _PAGE_K_READ        (1<<8)     /* Page has kernel perm (H) */
@@ -72,9 +72,9 @@
 
 /* PD1 */
 #define _PAGE_CACHEABLE     (1<<0)     /* Page is cached (H) */
-#define _PAGE_EXECUTE       (1<<1)     /* Page has user execute perm (H) */
-#define _PAGE_WRITE         (1<<2)     /* Page has user write perm (H) */
-#define _PAGE_READ          (1<<3)     /* Page has user read perm (H) */
+#define _PAGE_U_EXECUTE     (1<<1)     /* Page has user execute perm (H) */
+#define _PAGE_U_WRITE       (1<<2)     /* Page has user write perm (H) */
+#define _PAGE_U_READ        (1<<3)     /* Page has user read perm (H) */
 #define _PAGE_K_EXECUTE     (1<<4)     /* Page has kernel execute perm (H) */
 #define _PAGE_K_WRITE       (1<<5)     /* Page has kernel write perm (H) */
 #define _PAGE_K_READ        (1<<6)     /* Page has kernel perm (H) */
@@ -93,7 +93,8 @@
 #endif
 
 /* Kernel allowed all permissions for all pages */
-#define _K_PAGE_PERMS  (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+#define _K_PAGE_PERMS  (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \
+                       _PAGE_GLOBAL | _PAGE_PRESENT)
 
 #ifdef CONFIG_ARC_CACHE_PAGES
 #define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
  * -by default cached, unless config otherwise
  * -present in memory
  */
-#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
+
+#define _PAGE_READ     (_PAGE_U_READ    | _PAGE_K_READ)
+#define _PAGE_WRITE    (_PAGE_U_WRITE   | _PAGE_K_WRITE)
+#define _PAGE_EXECUTE  (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE)
 
 /* Set of bits not changed in pte_modify */
 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
  * kernel vaddr space - visible in all addr spaces, but kernel mode only
  * Thus Global, all-kernel-access, no-user-access, cached
  */
-#define PAGE_KERNEL          __pgprot(___DEF | _PAGE_GLOBAL)
+#define PAGE_KERNEL          __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
 
 /* ioremap */
-#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
-                                                    _PAGE_GLOBAL)
+#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
 
 /**************************************************************************
  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
@@ -395,6 +399,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 
 #include <asm-generic/pgtable.h>
 
+/* to cope with aliasing VIPT cache */
+#define HAVE_ARCH_UNMAPPED_AREA
+
 /*
  * No page table caches to initialise
  */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
new file mode 100644 (file)
index 0000000..fffeecc
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_SHMPARAM_H
+#define __ARC_ASM_SHMPARAM_H
+
+/* Handle upto 2 cache bins */
+#define        SHMLBA  (2 * PAGE_SIZE)
+
+/* Enforce SHMLBA in shmat */
+#define __ARCH_FORCE_SHMLBA
+
+#endif
index fe91719866a57fed68e5ddb53228e7fec61279b1..cb0c708ca6654cd38d0d73f40e6f43135e34c01e 100644 (file)
@@ -16,7 +16,7 @@
 /* Masks for actual TLB "PD"s */
 #define PTE_BITS_IN_PD0        (_PAGE_GLOBAL | _PAGE_PRESENT)
 #define PTE_BITS_IN_PD1        (PAGE_MASK | _PAGE_CACHEABLE | \
-                        _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
+                        _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \
                         _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
 
 #ifndef __ASSEMBLY__
@@ -30,13 +30,20 @@ do {                                                \
 /*
  * This pair is called at time of munmap/exit to flush cache and TLB entries
  * for mappings being torn down.
- * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
- *    as we don't support aliasing configs in our VIPT D$.
+ * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
  * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
  *
  * Note, read http://lkml.org/lkml/2004/1/15/6
  */
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
 #define tlb_start_vma(tlb, vma)
+#else
+#define tlb_start_vma(tlb, vma)                                                \
+do {                                                                   \
+       if (!tlb->fullmm)                                               \
+               flush_cache_range(vma, vma->vm_start, vma->vm_end);     \
+} while(0)
+#endif
 
 #define tlb_end_vma(tlb, vma)                                          \
 do {                                                                   \
index 168dc146a8f6e33f6bf0e5bb83ca6cf9da1511af..ac95cc239c1e47d3d2f5a133b1d66936c6af0b91 100644 (file)
@@ -7,4 +7,4 @@
 #
 
 obj-y  := extable.o ioremap.o dma.o fault.o init.o
-obj-y  += tlb.o tlbex.o cache_arc700.o
+obj-y  += tlb.o tlbex.o cache_arc700.o mmap.o
index c854cf95f70666669dcff0eec6006bbbe788bab0..aedce1905441cffb1958f00af149b0a939740371 100644 (file)
@@ -68,6 +68,7 @@
 #include <linux/mmu_context.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
+#include <linux/pagemap.h>
 #include <asm/cacheflush.h>
 #include <asm/cachectl.h>
 #include <asm/setup.h>
@@ -138,6 +139,7 @@ void __cpuinit arc_cache_init(void)
        struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
        struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
        int way_pg_ratio = way_pg_ratio;
+       int dcache_does_alias;
        char str[256];
 
        printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
@@ -184,9 +186,13 @@ void __cpuinit arc_cache_init(void)
                panic("Cache H/W doesn't match kernel Config");
        }
 
+       dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
+
        /* check for D-Cache aliasing */
-       if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE)
-               panic("D$ aliasing not handled right now\n");
+       if (dcache_does_alias && !cache_is_vipt_aliasing())
+               panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+       else if (!dcache_does_alias && cache_is_vipt_aliasing())
+               panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
 #endif
 
        /* Set the default Invalidate Mode to "simpy discard dirty lines"
@@ -269,47 +275,57 @@ static inline void __dc_entire_op(const int cacheop)
  * Per Line Operation on D-Cache
  * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
  * It's sole purpose is to help gcc generate ZOL
+ * (aliasing VIPT dcache flushing needs both vaddr and paddr)
  */
-static inline void __dc_line_loop(unsigned long start, unsigned long sz,
-                                         int aux_reg)
+static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
+                                 unsigned long sz, const int aux_reg)
 {
-       int num_lines, slack;
+       int num_lines;
 
        /* Ensure we properly floor/ceil the non-line aligned/sized requests
-        * and have @start - aligned to cache line and integral @num_lines.
+        * and have @paddr - aligned to cache line and integral @num_lines.
         * This however can be avoided for page sized since:
-        *  -@start will be cache-line aligned already (being page aligned)
+        *  -@paddr will be cache-line aligned already (being page aligned)
         *  -@sz will be integral multiple of line size (being page sized).
         */
        if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
-               slack = start & ~DCACHE_LINE_MASK;
-               sz += slack;
-               start -= slack;
+               sz += paddr & ~DCACHE_LINE_MASK;
+               paddr &= DCACHE_LINE_MASK;
+               vaddr &= DCACHE_LINE_MASK;
        }
 
        num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
 
+#if (CONFIG_ARC_MMU_VER <= 2)
+       paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
+#endif
+
        while (num_lines-- > 0) {
 #if (CONFIG_ARC_MMU_VER > 2)
                /*
                 * Just as for I$, in MMU v3, D$ ops also require
                 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
-                * But we pass phy addr for both. This works since Linux
-                * doesn't support aliasing configs for D$, yet.
-                * Thus paddr is enough to provide both tag and index.
                 */
-               write_aux_reg(ARC_REG_DC_PTAG, start);
+               write_aux_reg(ARC_REG_DC_PTAG, paddr);
+
+               write_aux_reg(aux_reg, vaddr);
+               vaddr += ARC_DCACHE_LINE_LEN;
+#else
+               /* paddr contains stuffed vaddrs bits */
+               write_aux_reg(aux_reg, paddr);
 #endif
-               write_aux_reg(aux_reg, start);
-               start += ARC_DCACHE_LINE_LEN;
+               paddr += ARC_DCACHE_LINE_LEN;
        }
 }
 
+/* For kernel mappings cache operation: index is same as paddr */
+#define __dc_line_op_k(p, sz, op)      __dc_line_op(p, p, sz, op)
+
 /*
  * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
  */
-static inline void __dc_line_op(unsigned long start, unsigned long sz,
-                                       const int cacheop)
+static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
+                               unsigned long sz, const int cacheop)
 {
        unsigned long flags, tmp = tmp;
        int aux;
@@ -332,7 +348,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
        else
                aux = ARC_REG_DC_FLDL;
 
-       __dc_line_loop(start, sz, aux);
+       __dc_line_loop(paddr, vaddr, sz, aux);
 
        if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
                wait_for_flush();
@@ -347,7 +363,8 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
 #else
 
 #define __dc_entire_op(cacheop)
-#define __dc_line_op(start, sz, cacheop)
+#define __dc_line_op(paddr, vaddr, sz, cacheop)
+#define __dc_line_op_k(paddr, sz, cacheop)
 
 #endif /* CONFIG_ARC_HAS_DCACHE */
 
@@ -399,49 +416,45 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
 /***********************************************************
  * Machine specific helper for per line I-Cache invalidate.
  */
-static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
+static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
                                unsigned long sz)
 {
        unsigned long flags;
-       int num_lines, slack;
-       unsigned int addr;
+       int num_lines;
 
        /*
         * Ensure we properly floor/ceil the non-line aligned/sized requests:
         * However page sized flushes can be compile time optimised.
-        *  -@phy_start will be cache-line aligned already (being page aligned)
+        *  -@paddr will be cache-line aligned already (being page aligned)
         *  -@sz will be integral multiple of line size (being page sized).
         */
        if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
-               slack = phy_start & ~ICACHE_LINE_MASK;
-               sz += slack;
-               phy_start -= slack;
+               sz += paddr & ~ICACHE_LINE_MASK;
+               paddr &= ICACHE_LINE_MASK;
+               vaddr &= ICACHE_LINE_MASK;
        }
 
        num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
 
-#if (CONFIG_ARC_MMU_VER > 2)
-       vaddr &= ~ICACHE_LINE_MASK;
-       addr = phy_start;
-#else
+#if (CONFIG_ARC_MMU_VER <= 2)
        /* bits 17:13 of vaddr go as bits 4:0 of paddr */
-       addr = phy_start | ((vaddr >> 13) & 0x1F);
+       paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
 #endif
 
        local_irq_save(flags);
        while (num_lines-- > 0) {
 #if (CONFIG_ARC_MMU_VER > 2)
                /* tag comes from phy addr */
-               write_aux_reg(ARC_REG_IC_PTAG, addr);
+               write_aux_reg(ARC_REG_IC_PTAG, paddr);
 
                /* index bits come from vaddr */
                write_aux_reg(ARC_REG_IC_IVIL, vaddr);
                vaddr += ARC_ICACHE_LINE_LEN;
 #else
                /* paddr contains stuffed vaddrs bits */
-               write_aux_reg(ARC_REG_IC_IVIL, addr);
+               write_aux_reg(ARC_REG_IC_IVIL, paddr);
 #endif
-               addr += ARC_ICACHE_LINE_LEN;
+               paddr += ARC_ICACHE_LINE_LEN;
        }
        local_irq_restore(flags);
 }
@@ -457,29 +470,66 @@ static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
  * Exported APIs
  */
 
+/*
+ * Handle cache congruency of kernel and userspace mappings of page when kernel
+ * writes-to/reads-from
+ *
+ * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
+ *  -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
+ *  -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
+ *  -In SMP, if hardware caches are coherent
+ *
+ * There's a corollary case, where kernel READs from a userspace mapped page.
+ * If the U-mapping is not congruent to to K-mapping, former needs flushing.
+ */
 void flush_dcache_page(struct page *page)
 {
-       /* Make a note that dcache is not yet flushed for this page */
-       set_bit(PG_arch_1, &page->flags);
+       struct address_space *mapping;
+
+       if (!cache_is_vipt_aliasing()) {
+               set_bit(PG_arch_1, &page->flags);
+               return;
+       }
+
+       /* don't handle anon pages here */
+       mapping = page_mapping(page);
+       if (!mapping)
+               return;
+
+       /*
+        * pagecache page, file not yet mapped to userspace
+        * Make a note that K-mapping is dirty
+        */
+       if (!mapping_mapped(mapping)) {
+               set_bit(PG_arch_1, &page->flags);
+       } else if (page_mapped(page)) {
+
+               /* kernel reading from page with U-mapping */
+               void *paddr = page_address(page);
+               unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
+
+               if (addr_not_cache_congruent(paddr, vaddr))
+                       __flush_dcache_page(paddr, vaddr);
+       }
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
 
 void dma_cache_wback_inv(unsigned long start, unsigned long sz)
 {
-       __dc_line_op(start, sz, OP_FLUSH_N_INV);
+       __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
 }
 EXPORT_SYMBOL(dma_cache_wback_inv);
 
 void dma_cache_inv(unsigned long start, unsigned long sz)
 {
-       __dc_line_op(start, sz, OP_INV);
+       __dc_line_op_k(start, sz, OP_INV);
 }
 EXPORT_SYMBOL(dma_cache_inv);
 
 void dma_cache_wback(unsigned long start, unsigned long sz)
 {
-       __dc_line_op(start, sz, OP_FLUSH);
+       __dc_line_op_k(start, sz, OP_FLUSH);
 }
 EXPORT_SYMBOL(dma_cache_wback);
 
@@ -560,7 +610,7 @@ void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
 
        local_irq_save(flags);
        __ic_line_inv_vaddr(paddr, vaddr, len);
-       __dc_line_op(paddr, len, OP_FLUSH);
+       __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
        local_irq_restore(flags);
 }
 
@@ -570,9 +620,13 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
        __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
 }
 
-void __flush_dcache_page(unsigned long paddr)
+/*
+ * wrapper to clearout kernel or userspace mappings of a page
+ * For kernel mappings @vaddr == @paddr
+ */
+void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
 {
-       __dc_line_op(paddr, PAGE_SIZE, OP_FLUSH_N_INV);
+       __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
 }
 
 void flush_icache_all(void)
@@ -601,6 +655,88 @@ noinline void flush_cache_all(void)
 
 }
 
+#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+       flush_cache_all();
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
+                     unsigned long pfn)
+{
+       unsigned int paddr = pfn << PAGE_SHIFT;
+
+       __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE);
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+                      unsigned long end)
+{
+       flush_cache_all();
+}
+
+void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+                    unsigned long u_vaddr)
+{
+       /* TBD: do we really need to clear the kernel mapping */
+       __flush_dcache_page(page_address(page), u_vaddr);
+       __flush_dcache_page(page_address(page), page_address(page));
+
+}
+
+#endif
+
+void copy_user_highpage(struct page *to, struct page *from,
+       unsigned long u_vaddr, struct vm_area_struct *vma)
+{
+       void *kfrom = page_address(from);
+       void *kto = page_address(to);
+       int clean_src_k_mappings = 0;
+
+       /*
+        * If SRC page was already mapped in userspace AND it's U-mapping is
+        * not congruent with K-mapping, sync former to physical page so that
+        * K-mapping in memcpy below, sees the right data
+        *
+        * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
+        * equally valid for SRC page as well
+        */
+       if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
+               __flush_dcache_page(kfrom, u_vaddr);
+               clean_src_k_mappings = 1;
+       }
+
+       copy_page(kto, kfrom);
+
+       /*
+        * Mark DST page K-mapping as dirty for a later finalization by
+        * update_mmu_cache(). Although the finalization could have been done
+        * here as well (given that both vaddr/paddr are available).
+        * But update_mmu_cache() already has code to do that for other
+        * non copied user pages (e.g. read faults which wire in pagecache page
+        * directly).
+        */
+       set_bit(PG_arch_1, &to->flags);
+
+       /*
+        * if SRC was already usermapped and non-congruent to kernel mapping
+        * sync the kernel mapping back to physical page
+        */
+       if (clean_src_k_mappings) {
+               __flush_dcache_page(kfrom, kfrom);
+       } else {
+               set_bit(PG_arch_1, &from->flags);
+       }
+}
+
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
+{
+       clear_page(to);
+       set_bit(PG_arch_1, &page->flags);
+}
+
+
 /**********************************************************************
  * Explicit Cache flush request from user space via syscall
  * Needed for JITs which generate code on the fly
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
new file mode 100644 (file)
index 0000000..2e06d56
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * ARC700 mmap
+ *
+ * (started from arm version - for VIPT alias handling)
+ *
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <asm/cacheflush.h>
+
+#define COLOUR_ALIGN(addr, pgoff)                      \
+       ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) +      \
+        (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
+/*
+ * Ensure that shared mappings are correctly aligned to
+ * avoid aliasing issues with VIPT caches.
+ * We need to ensure that
+ * a specific page of an object is always mapped at a multiple of
+ * SHMLBA bytes.
+ */
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int do_align = 0;
+       int aliasing = cache_is_vipt_aliasing();
+       struct vm_unmapped_area_info info;
+
+       /*
+        * We only need to do colour alignment if D cache aliases.
+        */
+       if (aliasing)
+               do_align = filp || (flags & MAP_SHARED);
+
+       /*
+        * We enforce the MAP_FIXED case.
+        */
+       if (flags & MAP_FIXED) {
+               if (aliasing && flags & MAP_SHARED &&
+                   (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+                       return -EINVAL;
+               return addr;
+       }
+
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       if (addr) {
+               if (do_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+               else
+                       addr = PAGE_ALIGN(addr);
+
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr &&
+                   (!vma || addr + len <= vma->vm_start))
+                       return addr;
+       }
+
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = mm->mmap_base;
+       info.high_limit = TASK_SIZE;
+       info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
+       return vm_unmapped_area(&info);
+}
index 003d69ac6ffa63935a7bedf1089b61e238ff7a12..fe1c5a073afe4cf996d28b6486c6b296fbd94344 100644 (file)
@@ -421,25 +421,41 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 /*
  * Called at the end of pagefault, for a userspace mapped page
  *  -pre-install the corresponding TLB entry into MMU
- *  -Finalize the delayed D-cache flush (wback+inv kernel mapping)
+ *  -Finalize the delayed D-cache flush of kernel mapping of page due to
+ *     flush_dcache_page(), copy_user_page()
+ *
+ * Note that flush (when done) involves both WBACK - so physical page is
+ * in sync as well as INV - so any non-congruent aliases don't remain
  */
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
                      pte_t *ptep)
 {
        unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
+       unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
 
        create_tlb(vma, vaddr, ptep);
 
-       /* icache doesn't snoop dcache, thus needs to be made coherent here */
-       if (vma->vm_flags & VM_EXEC) {
+       /*
+        * Exec page : Independent of aliasing/page-color considerations,
+        *             since icache doesn't snoop dcache on ARC, any dirty
+        *             K-mapping of a code page needs to be wback+inv so that
+        *             icache fetch by userspace sees code correctly.
+        * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
+        *             so userspace sees the right data.
+        *  (Avoids the flush for Non-exec + congruent mapping case)
+        */
+       if ((vma->vm_flags & VM_EXEC) ||
+            addr_not_cache_congruent(paddr, vaddr)) {
                struct page *page = pfn_to_page(pte_pfn(*ptep));
 
-               /* if page was dcache dirty, flush now */
                int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
                if (dirty) {
-                       unsigned long paddr =  pte_val(*ptep) & PAGE_MASK;
-                       __flush_dcache_page(paddr);
-                       __inv_icache_page(paddr, vaddr);
+                       /* wback + inv dcache lines */
+                       __flush_dcache_page(paddr, paddr);
+
+                       /* invalidate any existing icache lines */
+                       if (vma->vm_flags & VM_EXEC)
+                               __inv_icache_page(paddr, vaddr);
                }
        }
 }
index 9df765dc7c3abb56755cac6591aa5765ca155587..3357d26ffe54267a8ba9d26e596019b187e23bdd 100644 (file)
@@ -277,7 +277,7 @@ ARC_ENTRY EV_TLBMissI
        ;----------------------------------------------------------------
        ; VERIFY_PTE: Check if PTE permissions approp for executing code
        cmp_s   r2, VMALLOC_START
-       mov.lo  r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE)
+       mov.lo  r2, (_PAGE_PRESENT | _PAGE_U_READ | _PAGE_U_EXECUTE)
        mov.hs  r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE)
 
        and     r3, r0, r2  ; Mask out NON Flag bits from PTE
@@ -320,9 +320,9 @@ ARC_ENTRY EV_TLBMissD
        mov_s   r2, 0
        lr      r3, [ecr]
        btst_s  r3, ECR_C_BIT_DTLB_LD_MISS      ; Read Access
-       or.nz   r2, r2, _PAGE_READ              ; chk for Read flag in PTE
+       or.nz   r2, r2, _PAGE_U_READ            ; chk for Read flag in PTE
        btst_s  r3, ECR_C_BIT_DTLB_ST_MISS      ; Write Access
-       or.nz   r2, r2, _PAGE_WRITE             ; chk for Write flag in PTE
+       or.nz   r2, r2, _PAGE_U_WRITE           ; chk for Write flag in PTE
        ; Above laddering takes care of XCHG access
        ;   which is both Read and Write
 
index 4e121272c4e5418b86f1db7845713f904f428593..1d3452100f1ff448cd4ed493e7ddf85fde0d9f35 100644 (file)
@@ -27,10 +27,3 @@ menuconfig ARC_PLAT_TB10X
          Abilis Systems. TB10x is based on the ARC700 CPU architecture.
          Say Y if you are building a kernel for one of the SOCs in this
          series (e.g. TB100 or TB101). If in doubt say N.
-
-if ARC_PLAT_TB10X
-
-config GENERIC_GPIO
-       def_bool y
-
-endif
index d3567691c7e1259174a9b6db4cf65593c2fb7536..06cb309294608a6753652049e7c4f3df6d8f8ada 100644 (file)
@@ -34,31 +34,6 @@ static void __init tb10x_platform_init(void)
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
-static void __init tb10x_platform_late_init(void)
-{
-       struct device_node *dn;
-
-       /*
-        * Pinctrl documentation recommends setting up the iomux here for
-        * all modules which don't require control over the pins themselves.
-        * Modules which need this kind of assistance are compatible with
-        * "abilis,simple-pinctrl", i.e. we can easily iterate over them.
-        * TODO: Does this recommended method work cleanly with pins required
-        * by modules?
-        */
-       for_each_compatible_node(dn, NULL, "abilis,simple-pinctrl") {
-               struct platform_device *pd = of_find_device_by_node(dn);
-               struct pinctrl *pctl;
-
-               pctl = pinctrl_get_select(&pd->dev, "abilis,simple-default");
-               if (IS_ERR(pctl)) {
-                       int ret = PTR_ERR(pctl);
-                       dev_err(&pd->dev, "Could not set up pinctrl: %d\n",
-                               ret);
-               }
-       }
-}
-
 static const char *tb10x_compat[] __initdata = {
        "abilis,arc-tb10x",
        NULL,
@@ -67,5 +42,4 @@ static const char *tb10x_compat[] __initdata = {
 MACHINE_START(TB10x, "tb10x")
        .dt_compat      = tb10x_compat,
        .init_machine   = tb10x_platform_init,
-       .init_late      = tb10x_platform_late_init,
 MACHINE_END
index d423d58f938dc40fb5b3c01445b9184572631b10..49d993cee51232874a81814fe39ca99e09b88bad 100644 (file)
@@ -38,6 +38,7 @@ config ARM
        select HAVE_GENERIC_HARDIRQS
        select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
        select HAVE_IDE if PCI || ISA || PCMCIA
+       select HAVE_IRQ_TIME_ACCOUNTING
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZMA
        select HAVE_KERNEL_LZO
@@ -488,7 +489,7 @@ config ARCH_IXP4XX
 config ARCH_DOVE
        bool "Marvell Dove"
        select ARCH_REQUIRE_GPIOLIB
-       select CPU_V7
+       select CPU_PJ4
        select GENERIC_CLOCKEVENTS
        select MIGHT_HAVE_PCI
        select PINCTRL
index 47374085befdf6d4ad1cc49c0714e52453ec510f..1ba358ba16b871aec3b366cab9b4e4066048e69c 100644 (file)
@@ -309,7 +309,7 @@ define archhelp
   echo  '  Image         - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
   echo  '* xipImage      - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)'
   echo  '  uImage        - U-Boot wrapped zImage'
-  echo  '  bootpImage    - Combined zImage and initial RAM disk' 
+  echo  '  bootpImage    - Combined zImage and initial RAM disk'
   echo  '                  (supply initrd image via make variable INITRD=<path>)'
   echo  '* dtbs          - Build device tree blobs for enabled boards'
   echo  '  install       - Install uncompressed kernel'
index b9f7121e6ecf02c561e5b1b10308659aad23fb28..f0895c581a89be8668a99db10e6873ae94be0cef 100644 (file)
@@ -177,7 +177,9 @@ dtb-$(CONFIG_ARCH_SPEAR3XX)+= spear300-evb.dtb \
        spear320-evb.dtb \
        spear320-hmi.dtb
 dtb-$(CONFIG_ARCH_SPEAR6XX)+= spear600-evb.dtb
-dtb-$(CONFIG_ARCH_SUNXI) += sun4i-a10-cubieboard.dtb \
+dtb-$(CONFIG_ARCH_SUNXI) += \
+       sun4i-a10-cubieboard.dtb \
+       sun4i-a10-mini-xplus.dtb \
        sun4i-a10-hackberry.dtb \
        sun5i-a13-olinuxino.dtb
 dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
index 272bbc65fab05b809b9df0a57497c14d37eac302..550eb772c30e4c47c2f7f0896469a033dee3e23b 100644 (file)
@@ -33,7 +33,8 @@ soc {
                #size-cells = <1>;
                compatible = "simple-bus";
                interrupt-parent = <&mpic>;
-               ranges = <0 0 0xd0000000 0x100000>;
+               ranges = <0          0 0xd0000000 0x0100000 /* internal registers */
+                         0xe0000000 0 0xe0000000 0x8100000 /* PCIe */>;
 
                internal-regs {
                        compatible = "simple-bus";
index b2c1b5af9749cd0f0b358d32dfbb159c7deab280..aee2b1866ce2ede35fbd58a6bbea1e29ac6b16ea 100644 (file)
@@ -29,7 +29,8 @@ aliases {
        };
 
        soc {
-               ranges = <0 0xd0000000 0x100000>;
+               ranges = <0          0xd0000000 0x0100000 /* internal registers */
+                         0xe0000000 0xe0000000 0x8100000 /* PCIe */>;
                internal-regs {
                        system-controller@18200 {
                                compatible = "marvell,armada-370-xp-system-controller";
@@ -38,12 +39,12 @@ system-controller@18200 {
 
                        L2: l2-cache {
                                compatible = "marvell,aurora-outer-cache";
-                               reg = <0xd0008000 0x1000>;
+                               reg = <0x08000 0x1000>;
                                cache-id-part = <0x100>;
                                wt-override;
                        };
 
-                       mpic: interrupt-controller@20000 {
+                       interrupt-controller@20000 {
                                reg = <0x20a00 0x1d0>, <0x21870 0x58>;
                        };
 
index 26ad06fc147ed78f9446bb5119a1e6e001867dba..3ee63d128e27cf27b3c8f6c5211bcf05b6dadc9e 100644 (file)
@@ -39,6 +39,9 @@ memory {
        };
 
        soc {
+               ranges = <0          0 0xd0000000 0x100000
+                         0xf0000000 0 0xf0000000 0x1000000>;
+
                internal-regs {
                        serial@12000 {
                                clock-frequency = <250000000>;
index f14d36c4615984399b71c36e920ed84be96fcb74..46b785064dd869917afa981103136459a5826922 100644 (file)
@@ -27,6 +27,9 @@ memory {
        };
 
        soc {
+               ranges = <0          0 0xd0000000 0x100000
+                         0xf0000000 0 0xf0000000 0x8000000>;
+
                internal-regs {
                        serial@12000 {
                                clock-frequency = <250000000>;
index bacab11c10dc8151eb7f2b6eb8cf0d2905a52f0e..5b902f9a3af29a84fd0ee83000c8d0f2ffa280ea 100644 (file)
@@ -31,7 +31,7 @@ L2: l2-cache {
                                wt-override;
                        };
 
-                       mpic: interrupt-controller@20000 {
+                       interrupt-controller@20000 {
                              reg = <0x20a00 0x2d0>, <0x21070 0x58>;
                        };
 
index 70b5ccbac234a63d12228e161e2620d4e2d8e40a..84c4bef2d7268760a6d927bd8ed2fdf7d547ea59 100644 (file)
@@ -264,7 +264,7 @@ pinctrl_macb_rmii_mii_alt: macb_rmii_mii-1 {
                                                atmel,pins =
                                                        <0 10 0x2 0x0   /* PA10 periph B */
                                                         0 11 0x2 0x0   /* PA11 periph B */
-                                                        0 24 0x2 0x0   /* PA24 periph B */
+                                                        0 22 0x2 0x0   /* PA22 periph B */
                                                         0 25 0x2 0x0   /* PA25 periph B */
                                                         0 26 0x2 0x0   /* PA26 periph B */
                                                         0 27 0x2 0x0   /* PA27 periph B */
index 3de8e6dfbcb150baa7251d1803990724d93e4fe2..8d25f889928eccd3c7a3440dd9522e4f1d34e37c 100644 (file)
@@ -57,6 +57,7 @@ aic: interrupt-controller@fffff000 {
                                compatible = "atmel,at91rm9200-aic";
                                interrupt-controller;
                                reg = <0xfffff000 0x200>;
+                               atmel,external-irqs = <31>;
                        };
 
                        ramc0: ramc@ffffe800 {
index 3b40d11d65e70c0daabdeddafe27180cacd12825..315250b4995e74e4452af0d0e46bb847790fbaf5 100644 (file)
@@ -11,7 +11,7 @@
 /include/ "at91sam9x5ek.dtsi"
 
 / {
-       model = "Atmel AT91SAM9G25-EK";
+       model = "Atmel AT91SAM9X25-EK";
        compatible = "atmel,at91sam9x25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
 
        ahb {
index 98dfc3ea5c0bee94a80a08da8c5d994d84125977..0673524238a61f706c7c17a096ef3319fb095287 100644 (file)
@@ -497,6 +497,21 @@ usb@12120000 {
                clock-names = "usbhost";
        };
 
+       usbphy@12130000 {
+               compatible = "samsung,exynos5250-usb2phy";
+               reg = <0x12130000 0x100>;
+               clocks = <&clock 1>, <&clock 285>;
+               clock-names = "ext_xtal", "usbhost";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               usbphy-sys {
+                       reg = <0x10040704 0x8>,
+                             <0x10050230 0x4>;
+               };
+       };
+
        amba {
                #address-cells = <1>;
                #size-cells = <1>;
index 82a404da1c0d9531b632e265943632e7ea48d468..99ba6e14ebf3f5880660e3073aebda1fdce3e53b 100644 (file)
@@ -516,7 +516,7 @@ gpmc: gpmc@6e000000 {
                usb_otg_hs: usb_otg_hs@480ab000 {
                        compatible = "ti,omap3-musb";
                        reg = <0x480ab000 0x1000>;
-                       interrupts = <0 92 0x4>, <0 93 0x4>;
+                       interrupts = <92>, <93>;
                        interrupt-names = "mc", "dma";
                        ti,hwmods = "usb_otg_hs";
                        multipoint = <1>;
index 2e643ea51cceba014b713843a81a1f11a4fd88be..5000e0d428496d8105f6157f4ba54b4603796657 100644 (file)
@@ -75,11 +75,6 @@ spi0: spi@f0004000 {
                                compatible = "atmel,at91sam9x5-spi";
                                reg = <0xf0004000 0x100>;
                                interrupts = <24 4 3>;
-                               cs-gpios = <&pioD 13 0
-                                           &pioD 14 0 /* conflicts with SCK0 and CANRX0 */
-                                           &pioD 15 0 /* conflicts with CTS0 and CANTX0 */
-                                           &pioD 16 0 /* conflicts with RTS0 and PWMFI3 */
-                                          >;
                                pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_spi0>;
                                status = "disabled";
@@ -156,7 +151,7 @@ usart1: serial@f0020000 {
                        };
 
                        macb0: ethernet@f0028000 {
-                               compatible = "cnds,pc302-gem", "cdns,gem";
+                               compatible = "cdns,pc302-gem", "cdns,gem";
                                reg = <0xf0028000 0x100>;
                                interrupts = <34 4 3>;
                                pinctrl-names = "default";
@@ -203,11 +198,6 @@ spi1: spi@f8008000 {
                                compatible = "atmel,at91sam9x5-spi";
                                reg = <0xf8008000 0x100>;
                                interrupts = <25 4 3>;
-                               cs-gpios = <&pioC 25 0
-                                           &pioC 26 0 /* conflitcs with TWD1 and ISI_D11 */
-                                           &pioC 27 0 /* conflitcs with TWCK1 and ISI_D10 */
-                                           &pioC 28 0 /* conflitcs with PWMFI0 and ISI_D9 */
-                                          >;
                                pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_spi1>;
                                status = "disabled";
index 1f8ed404626cdb1edba6825a49c90d7a435d81aa..b336e7787cb3ea35ac312e83a66bc948a988e977 100644 (file)
@@ -32,6 +32,10 @@ main_clock: clock@0 {
 
        ahb {
                apb {
+                       spi0: spi@f0004000 {
+                               cs-gpios = <&pioD 13 0>, <0>, <0>, <0>;
+                       };
+
                        macb0: ethernet@f0028000 {
                                phy-mode = "rgmii";
                        };
index b28fbf3408e3b29c265db2f7448649819fe85df1..6f82d9368948856e8123b7193bbfc8230986f96f 100644 (file)
@@ -14,13 +14,19 @@ chosen {
                bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
        };
 
+       /* This is where the interrupt is routed on the S8815 board */
+       external-bus@34000000 {
+               ethernet@300 {
+                       interrupt-parent = <&gpio3>;
+                       interrupts = <8 0x1>;
+               };
+       };
+
        /* Custom board node with GPIO pins to active etc */
        usb-s8815 {
                /* The S8815 is using this very GPIO pin for the SMSC91x IRQs */
                ethernet-gpio {
-                       gpios = <&gpio3 19 0x1>;
-                       interrupts = <19 0x1>;
-                       interrupt-parent = <&gpio3>;
+                       gpios = <&gpio3 8 0x1>;
                };
                /* This will bias the MMC/SD card detect line */
                mmcsd-gpio {
index 4a7c35d6726aaa1290e4ee42b6ac47f517fc159a..078ed7f618d7910cc6030fdf9b288b3b124da065 100644 (file)
@@ -22,8 +22,8 @@ chosen {
                bootargs = "earlyprintk console=ttyS0,115200";
        };
 
-       soc {
-               uart0: uart@01c28000 {
+       soc@01c20000 {
+               uart0: serial@01c28000 {
                        pinctrl-names = "default";
                        pinctrl-0 = <&uart0_pins_a>;
                        status = "okay";
index 52b88d81b7bbaf435d7a9d7d4c85a9e5fe59f07b..3caed0db698614f45652476595ed54b42de54ffc 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/smp.h>
 #include <linux/spinlock.h>
 
-#include <linux/irqchip/arm-gic.h>
-
 #include <asm/mcpm.h>
 #include <asm/smp.h>
 #include <asm/smp_plat.h>
@@ -49,7 +47,6 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i
 static void __cpuinit mcpm_secondary_init(unsigned int cpu)
 {
        mcpm_cpu_powered_up();
-       gic_secondary_init(0);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
index e40b435d204e85f12d41cfc2bd5b43f84f99354e..227abf9cc6018fe2a53ed6146cff8a2d69ee0f97 100644 (file)
@@ -1,4 +1,4 @@
-CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
@@ -7,17 +7,18 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
-CONFIG_EFI_PARTITION=y
 CONFIG_ARCH_EXYNOS=y
-CONFIG_S3C_LOWLEVEL_UART_PORT=1
+CONFIG_S3C_LOWLEVEL_UART_PORT=3
 CONFIG_S3C24XX_PWM=y
 CONFIG_ARCH_EXYNOS5=y
 CONFIG_MACH_EXYNOS4_DT=y
-CONFIG_MACH_EXYNOS5_DT=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
@@ -30,35 +31,58 @@ CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_RFKILL_REGULATOR=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_PROC_DEVICETREE=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=m
 CONFIG_NETDEVICES=y
 CONFIG_SMSC911X=y
 CONFIG_USB_USBNET=y
 CONFIG_USB_NET_SMSC75XX=y
 CONFIG_USB_NET_SMSC95XX=y
 CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
+CONFIG_KEYBOARD_CROS_EC=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_MOUSE_CYAPA=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_HW_RANDOM=y
+CONFIG_TCG_TPM=y
+CONFIG_TCG_TIS_I2C_INFINEON=y
 CONFIG_I2C=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_ARB_GPIO_CHALLENGE=y
+CONFIG_I2C_S3C2410=y
+CONFIG_DEBUG_GPIO=y
 # CONFIG_HWMON is not set
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_MAX77686=y
+CONFIG_MFD_MAX8997=y
+CONFIG_MFD_SEC_CORE=y
 CONFIG_MFD_TPS65090=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_MAX8997=y
+CONFIG_REGULATOR_MAX77686=y
+CONFIG_REGULATOR_S5M8767=y
 CONFIG_REGULATOR_TPS65090=y
 CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_SIMPLE=y
 CONFIG_EXYNOS_VIDEO=y
 CONFIG_EXYNOS_MIPI_DSI=y
 CONFIG_EXYNOS_DP=y
@@ -67,6 +91,20 @@ CONFIG_FONTS=y
 CONFIG_FONT_7x14=y
 CONFIG_LOGO=y
 CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_S5P=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_PHY=y
+CONFIG_SAMSUNG_USB2PHY=y
+CONFIG_SAMSUNG_USB3PHY=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_S3C=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_IDMAC=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_COMMON_CLK_MAX77686=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_EXT4_FS=y
@@ -79,6 +117,7 @@ CONFIG_ROMFS_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
@@ -87,6 +126,5 @@ CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-CONFIG_EARLY_PRINTK=y
+CONFIG_CRYPTO_SHA256=y
 CONFIG_CRC_CCITT=y
index 7e0ebb64a7f9dafe5c73b9201c39e344c309ef93..9940f7b4e438c258d125cc6567ac1b7a9b2fc429 100644 (file)
@@ -199,7 +199,6 @@ CONFIG_USB_PHY=y
 CONFIG_USB_DEBUG=y
 CONFIG_USB_DEVICEFS=y
 # CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_SUSPEND=y
 CONFIG_USB_MON=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_STORAGE=y
index c1ef64bc5abd65781da53de52d4cfd3e392e29cc..abbe31937c65228e53dc2c97b4b2d4c14451ec38 100644 (file)
@@ -20,6 +20,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_ARCH_MULTI_V6=y
 CONFIG_ARCH_OMAP2PLUS=y
 CONFIG_OMAP_RESET_CLOCKS=y
 CONFIG_OMAP_MUX_DEBUG=y
@@ -204,7 +205,6 @@ CONFIG_USB=y
 CONFIG_USB_DEBUG=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_DEVICEFS=y
-CONFIG_USB_SUSPEND=y
 CONFIG_USB_MON=y
 CONFIG_USB_WDM=y
 CONFIG_USB_STORAGE=y
index a5f0485133cf9d9fc608a742e158b629405e1c7d..f7ba316164d4e9e4572f396249e70ad65e9163a9 100644 (file)
@@ -153,6 +153,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_MEDIA_USB_SUPPORT=y
 CONFIG_USB_VIDEO_CLASS=m
 CONFIG_DRM=y
+CONFIG_TEGRA_HOST1X=y
 CONFIG_DRM_TEGRA=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 # CONFIG_LCD_CLASS_DEVICE is not set
@@ -202,7 +203,7 @@ CONFIG_TEGRA20_APB_DMA=y
 CONFIG_STAGING=y
 CONFIG_SENSORS_ISL29018=y
 CONFIG_SENSORS_ISL29028=y
-CONFIG_SENSORS_AK8975=y
+CONFIG_AK8975=y
 CONFIG_MFD_NVEC=y
 CONFIG_KEYBOARD_NVEC=y
 CONFIG_SERIO_NVEC_PS2=y
index 92c6eed7aac9cded01cd9bd259deba3bd6fda782..99207c45ec10f56891cd40d53b8a5fe524337f0f 100644 (file)
@@ -195,6 +195,7 @@ ENTRY(sha1_block_data_order)
        add     r3,r3,r10                       @ E+=F_00_19(B,C,D)
        cmp     r14,sp
        bne     .L_00_15                @ [((11+4)*5+2)*3]
+       sub     sp,sp,#25*4
 #if __ARM_ARCH__<7
        ldrb    r10,[r1,#2]
        ldrb    r9,[r1,#3]
@@ -290,7 +291,6 @@ ENTRY(sha1_block_data_order)
        add     r3,r3,r10                       @ E+=F_00_19(B,C,D)
 
        ldr     r8,.LK_20_39            @ [+15+16*4]
-       sub     sp,sp,#25*4
        cmn     sp,#0                   @ [+3], clear carry to denote 20_39
 .L_20_39_or_60_79:
        ldr     r9,[r14,#15*4]
index 7eb18c1d8d6cbce723b9f62c77746dad81aa11ec..4f009c10540dff2a2e7efd08b0671c2369547b90 100644 (file)
@@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
        ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr),       \
                                                atomic64_t,             \
                                                counter),               \
-                                             (unsigned long)(o),       \
-                                             (unsigned long)(n)))
+                                             (unsigned long long)(o),  \
+                                             (unsigned long long)(n)))
 
 #define cmpxchg64_local(ptr, o, n)                                     \
        ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr),        \
                                                local64_t,              \
                                                a),                     \
-                                            (unsigned long)(o),        \
-                                            (unsigned long)(n)))
+                                            (unsigned long long)(o),   \
+                                            (unsigned long long)(n)))
 
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
index 99a19512ee26e2e5d99135d21f10d8b99e606226..bdf2b8458ec1d3bb0366ff6ce8aa6623e234aa02 100644 (file)
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 
-/*
- * We need to delay page freeing for SMP as other CPUs can access pages
- * which have been removed but not yet had their TLB entries invalidated.
- * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
- * we need to apply this same delaying tactic to ensure correct operation.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
-#define tlb_fast_mode(tlb)     0
-#else
-#define tlb_fast_mode(tlb)     1
-#endif
-
 #define MMU_GATHER_BUNDLE      8
 
 /*
@@ -112,12 +100,10 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 {
        tlb_flush(tlb);
-       if (!tlb_fast_mode(tlb)) {
-               free_pages_and_swap_cache(tlb->pages, tlb->nr);
-               tlb->nr = 0;
-               if (tlb->pages == tlb->local)
-                       __tlb_alloc_page(tlb);
-       }
+       free_pages_and_swap_cache(tlb->pages, tlb->nr);
+       tlb->nr = 0;
+       if (tlb->pages == tlb->local)
+               __tlb_alloc_page(tlb);
 }
 
 static inline void
@@ -178,11 +164,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
 
 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
-       if (tlb_fast_mode(tlb)) {
-               free_page_and_swap_cache(page);
-               return 1; /* avoid calling tlb_flush_mmu */
-       }
-
        tlb->pages[tlb->nr++] = page;
        VM_BUG_ON(tlb->nr > tlb->max);
        return tlb->max - tlb->nr;
index 2848857f5b62f91f7babe3ae928d932e252f723c..fbd24beeb1fad70886ea044387c873a65ef455e2 100644 (file)
@@ -24,9 +24,9 @@
 #define U8500_UART0_PHYS_BASE  (0x80120000)
 #define U8500_UART1_PHYS_BASE  (0x80121000)
 #define U8500_UART2_PHYS_BASE  (0x80007000)
-#define U8500_UART0_VIRT_BASE  (0xa8120000)
-#define U8500_UART1_VIRT_BASE  (0xa8121000)
-#define U8500_UART2_VIRT_BASE  (0xa8007000)
+#define U8500_UART0_VIRT_BASE  (0xf8120000)
+#define U8500_UART1_VIRT_BASE  (0xf8121000)
+#define U8500_UART2_VIRT_BASE  (0xf8007000)
 #define __UX500_PHYS_UART(n)   U8500_UART##n##_PHYS_BASE
 #define __UX500_VIRT_UART(n)   U8500_UART##n##_VIRT_BASE
 #endif
index f219703168366f8142321b93750e0027f1e6c6b2..282de4826abb640bd310ce8cf6099dec297803ce 100644 (file)
@@ -411,7 +411,6 @@ static struct vm_area_struct gate_vma = {
        .vm_start       = 0xffff0000,
        .vm_end         = 0xffff0000 + PAGE_SIZE,
        .vm_flags       = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
-       .vm_mm          = &init_mm,
 };
 
 static int __init gate_vma_init(void)
index 47ab90563bf48b3febdef5fd4085cd2071d551f9..550d63cef68e4be6ada87a3c7942ccd767061c6d 100644 (file)
@@ -251,7 +251,7 @@ void __ref cpu_die(void)
         * this returns, power and/or clocks can be removed at any point
         * from this CPU and its cache by platform_cpu_kill().
         */
-       RCU_NONIDLE(complete(&cpu_died));
+       complete(&cpu_died);
 
        /*
         * Ensure that the cache lines associated with that completion are
index 37d216d814cdd62d12040666e70192b0a33fe350..ef1703b9587b0264c5b7ea8ebb246376dfe76326 100644 (file)
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu)
        wait_event_interruptible(*wq, !vcpu->arch.pause);
 }
 
+static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.target >= 0;
+}
+
 /**
  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
  * @vcpu:      The VCPU pointer
@@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        int ret;
        sigset_t sigsaved;
 
-       /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
-       if (unlikely(vcpu->arch.target < 0))
+       if (unlikely(!kvm_vcpu_initialized(vcpu)))
                return -ENOEXEC;
 
        ret = kvm_vcpu_first_run_init(vcpu);
@@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        case KVM_SET_ONE_REG:
        case KVM_GET_ONE_REG: {
                struct kvm_one_reg reg;
+
+               if (unlikely(!kvm_vcpu_initialized(vcpu)))
+                       return -ENOEXEC;
+
                if (copy_from_user(&reg, argp, sizeof(reg)))
                        return -EFAULT;
                if (ioctl == KVM_SET_ONE_REG)
@@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_reg_list reg_list;
                unsigned n;
 
+               if (unlikely(!kvm_vcpu_initialized(vcpu)))
+                       return -ENOEXEC;
+
                if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
                        return -EFAULT;
                n = reg_list.n;
index 965706578f13bdb0ff062f9df2502608de7a3ae9..84ba67b982c0d32546dae5092ec84ac1d620756b 100644 (file)
@@ -43,7 +43,14 @@ static phys_addr_t hyp_idmap_vector;
 
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
-       kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
+       /*
+        * This function also gets called when dealing with HYP page
+        * tables. As HYP doesn't have an associated struct kvm (and
+        * the HYP page tables are fairly static), we don't do
+        * anything there.
+        */
+       if (kvm)
+               kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
 }
 
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -78,18 +85,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
        return p;
 }
 
-static void clear_pud_entry(pud_t *pud)
+static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
 {
        pmd_t *pmd_table = pmd_offset(pud, 0);
        pud_clear(pud);
+       kvm_tlb_flush_vmid_ipa(kvm, addr);
        pmd_free(NULL, pmd_table);
        put_page(virt_to_page(pud));
 }
 
-static void clear_pmd_entry(pmd_t *pmd)
+static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
 {
        pte_t *pte_table = pte_offset_kernel(pmd, 0);
        pmd_clear(pmd);
+       kvm_tlb_flush_vmid_ipa(kvm, addr);
        pte_free_kernel(NULL, pte_table);
        put_page(virt_to_page(pmd));
 }
@@ -100,11 +109,12 @@ static bool pmd_empty(pmd_t *pmd)
        return page_count(pmd_page) == 1;
 }
 
-static void clear_pte_entry(pte_t *pte)
+static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
 {
        if (pte_present(*pte)) {
                kvm_set_pte(pte, __pte(0));
                put_page(virt_to_page(pte));
+               kvm_tlb_flush_vmid_ipa(kvm, addr);
        }
 }
 
@@ -114,7 +124,8 @@ static bool pte_empty(pte_t *pte)
        return page_count(pte_page) == 1;
 }
 
-static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
+static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
+                       unsigned long long start, u64 size)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -138,15 +149,15 @@ static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
                }
 
                pte = pte_offset_kernel(pmd, addr);
-               clear_pte_entry(pte);
+               clear_pte_entry(kvm, pte, addr);
                range = PAGE_SIZE;
 
                /* If we emptied the pte, walk back up the ladder */
                if (pte_empty(pte)) {
-                       clear_pmd_entry(pmd);
+                       clear_pmd_entry(kvm, pmd, addr);
                        range = PMD_SIZE;
                        if (pmd_empty(pmd)) {
-                               clear_pud_entry(pud);
+                               clear_pud_entry(kvm, pud, addr);
                                range = PUD_SIZE;
                        }
                }
@@ -165,14 +176,14 @@ void free_boot_hyp_pgd(void)
        mutex_lock(&kvm_hyp_pgd_mutex);
 
        if (boot_hyp_pgd) {
-               unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
-               unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
+               unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
+               unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
                kfree(boot_hyp_pgd);
                boot_hyp_pgd = NULL;
        }
 
        if (hyp_pgd)
-               unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
+               unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 
        kfree(init_bounce_page);
        init_bounce_page = NULL;
@@ -200,9 +211,10 @@ void free_hyp_pgds(void)
 
        if (hyp_pgd) {
                for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
-                       unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+                       unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
                for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
-                       unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+                       unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+
                kfree(hyp_pgd);
                hyp_pgd = NULL;
        }
@@ -393,7 +405,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
  */
 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
 {
-       unmap_range(kvm->arch.pgd, start, size);
+       unmap_range(kvm, kvm->arch.pgd, start, size);
 }
 
 /**
@@ -675,7 +687,6 @@ static void handle_hva_to_gpa(struct kvm *kvm,
 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
 {
        unmap_stage2_range(kvm, gpa, PAGE_SIZE);
-       kvm_tlb_flush_vmid_ipa(kvm, gpa);
 }
 
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
index 2acdff4c1dfea8c72c7106dff630a4870d6e70a9..180b3024bec3ab36cc2d7cdb62e92d7b2298d297 100644 (file)
@@ -174,6 +174,7 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
 static struct clock_event_device clkevt = {
        .name           = "at91_tick",
        .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+       .shift          = 32,
        .rating         = 150,
        .set_next_event = clkevt32k_next_event,
        .set_mode       = clkevt32k_mode,
@@ -264,9 +265,11 @@ void __init at91rm9200_timer_init(void)
        at91_st_write(AT91_ST_RTMR, 1);
 
        /* Setup timer clockevent, with minimum of two ticks (important!!) */
+       clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
+       clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
+       clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
        clkevt.cpumask = cpumask_of(0);
-       clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK,
-                                       2, AT91_ST_ALMV);
+       clockevents_register_device(&clkevt);
 
        /* register clocksource */
        clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK);
index 13cdbcd48f51eca105add53d8a674a2ea0e5fa45..c7d670d118025eaf71a4dd145d0d3a33d300a415 100644 (file)
@@ -223,13 +223,7 @@ static void __init at91sam9n12_map_io(void)
        at91_init_sram(0, AT91SAM9N12_SRAM_BASE, AT91SAM9N12_SRAM_SIZE);
 }
 
-void __init at91sam9n12_initialize(void)
-{
-       at91_extern_irq = (1 << AT91SAM9N12_ID_IRQ0);
-}
-
 AT91_SOC_START(at91sam9n12)
        .map_io = at91sam9n12_map_io,
        .register_clocks = at91sam9n12_register_clocks,
-       .init = at91sam9n12_initialize,
 AT91_SOC_END
index 31df12029c4e9a5b32695874e95fb9e4e4c75589..2bd7f51b0b8204d92c4bde747d197ad9dc21fa36 100644 (file)
@@ -179,9 +179,9 @@ extern void __iomem *at91_pmc_base;
 #define                AT91_PMC_PCR_CMD        (0x1  <<  12)           /* Command (read=0, write=1) */
 #define                AT91_PMC_PCR_DIV(n)     ((n)  <<  16)           /* Divisor Value */
 #define                        AT91_PMC_PCR_DIV0       0x0                     /* Peripheral clock is MCK */
-#define                        AT91_PMC_PCR_DIV2       0x2                     /* Peripheral clock is MCK/2 */
-#define                        AT91_PMC_PCR_DIV4       0x4                     /* Peripheral clock is MCK/4 */
-#define                        AT91_PMC_PCR_DIV8       0x8                     /* Peripheral clock is MCK/8 */
+#define                        AT91_PMC_PCR_DIV2       0x1                     /* Peripheral clock is MCK/2 */
+#define                        AT91_PMC_PCR_DIV4       0x2                     /* Peripheral clock is MCK/4 */
+#define                        AT91_PMC_PCR_DIV8       0x3                     /* Peripheral clock is MCK/8 */
 #define                AT91_PMC_PCR_EN         (0x1  <<  28)           /* Enable */
 
 #endif
index d19edff0ea6e07bbddefd7dd1d7a94d0cc171ff1..ff18fc2ea46f092bc68786065bcdd985da325c7e 100644 (file)
@@ -250,6 +250,7 @@ config MACH_ARMLEX4210
 config MACH_UNIVERSAL_C210
        bool "Mobile UNIVERSAL_C210 Board"
        select CLKSRC_MMIO
+       select CLKSRC_SAMSUNG_PWM
        select CPU_EXYNOS4210
        select EXYNOS4_SETUP_FIMC
        select EXYNOS4_SETUP_FIMD0
@@ -281,7 +282,6 @@ config MACH_UNIVERSAL_C210
        select S5P_DEV_TV
        select S5P_GPIO_INT
        select S5P_SETUP_MIPIPHY
-       select SAMSUNG_HRT
        help
          Machine support for Samsung Mobile Universal S5PC210 Reference
          Board.
@@ -410,6 +410,7 @@ config MACH_EXYNOS4_DT
        depends on ARCH_EXYNOS4
        select ARM_AMBA
        select CLKSRC_OF
+       select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
        select CPU_EXYNOS4210
        select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
        select PINCTRL
index 745e304ad0ded17ab6b2f9d9253592739afbc1df..027c9e7f0d137acf36c0502168137ee32f81e9a7 100644 (file)
  */
 
 #include <linux/kernel.h>
+#include <linux/bitops.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irqchip.h>
 #include <linux/io.h>
 #include <linux/device.h>
 #include <linux/gpio.h>
+#include <clocksource/samsung_pwm.h>
 #include <linux/sched.h>
 #include <linux/serial_core.h>
 #include <linux/of.h>
@@ -302,6 +304,13 @@ static struct map_desc exynos5440_iodesc0[] __initdata = {
        },
 };
 
+static struct samsung_pwm_variant exynos4_pwm_variant = {
+       .bits           = 32,
+       .div_base       = 0,
+       .has_tint_cstat = true,
+       .tclk_mask      = 0,
+};
+
 void exynos4_restart(char mode, const char *cmd)
 {
        __raw_writel(0x1, S5P_SWRESET);
@@ -317,9 +326,16 @@ void exynos5_restart(char mode, const char *cmd)
                val = 0x1;
                addr = EXYNOS_SWRESET;
        } else if (of_machine_is_compatible("samsung,exynos5440")) {
+               u32 status;
                np = of_find_compatible_node(NULL, NULL, "samsung,exynos5440-clock");
+
+               addr = of_iomap(np, 0) + 0xbc;
+               status = __raw_readl(addr);
+
                addr = of_iomap(np, 0) + 0xcc;
-               val = (0xfff << 20) | (0x1 << 16);
+               val = __raw_readl(addr);
+
+               val = (val & 0xffff0000) | (status & 0xffff);
        } else {
                pr_err("%s: cannot support non-DT\n", __func__);
                return;
@@ -442,8 +458,20 @@ static void __init exynos5440_map_io(void)
        iotable_init(exynos5440_iodesc0, ARRAY_SIZE(exynos5440_iodesc0));
 }
 
+void __init exynos_set_timer_source(u8 channels)
+{
+       exynos4_pwm_variant.output_mask = BIT(SAMSUNG_PWM_NUM) - 1;
+       exynos4_pwm_variant.output_mask &= ~channels;
+}
+
 void __init exynos_init_time(void)
 {
+       unsigned int timer_irqs[SAMSUNG_PWM_NUM] = {
+               EXYNOS4_IRQ_TIMER0_VIC, EXYNOS4_IRQ_TIMER1_VIC,
+               EXYNOS4_IRQ_TIMER2_VIC, EXYNOS4_IRQ_TIMER3_VIC,
+               EXYNOS4_IRQ_TIMER4_VIC,
+       };
+
        if (of_have_populated_dt()) {
 #ifdef CONFIG_OF
                of_clk_init(NULL);
@@ -455,7 +483,14 @@ void __init exynos_init_time(void)
                exynos4_clk_init(NULL, !soc_is_exynos4210(), S5P_VA_CMU, readl(S5P_VA_CHIPID + 8) & 1);
                exynos4_clk_register_fixed_ext(xxti_f, xusbxti_f);
 #endif
-               mct_init(S5P_VA_SYSTIMER, EXYNOS4_IRQ_MCT_G0, EXYNOS4_IRQ_MCT_L0, EXYNOS4_IRQ_MCT_L1);
+#ifdef CONFIG_CLKSRC_SAMSUNG_PWM
+               if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
+                       samsung_pwm_clocksource_init(S3C_VA_TIMER,
+                                       timer_irqs, &exynos4_pwm_variant);
+               else
+#endif
+                       mct_init(S5P_VA_SYSTIMER, EXYNOS4_IRQ_MCT_G0,
+                                       EXYNOS4_IRQ_MCT_L0, EXYNOS4_IRQ_MCT_L1);
        }
 }
 
index 60dd35cc01a608102ab629a748ea1d19ca8fc2a6..11fc1e29819bb58b2fc4271b78a8ba3e3a8b9776 100644 (file)
@@ -32,6 +32,8 @@ void exynos4_clk_register_fixed_ext(unsigned long, unsigned long);
 
 void exynos_firmware_init(void);
 
+void exynos_set_timer_source(u8 channels);
+
 #ifdef CONFIG_PM_GENERIC_DOMAINS
 int exynos_pm_late_initcall(void);
 #else
index 7dbbfec13ea5b786c57f0ae989ca4fde6ad54f1a..296090e7f423cc72952ccfefd6a4a60be25a044f 100644 (file)
 #ifndef __ASM_ARCH_PM_CORE_H
 #define __ASM_ARCH_PM_CORE_H __FILE__
 
+#include <linux/of.h>
 #include <mach/regs-pmu.h>
 
+#ifdef CONFIG_PINCTRL_EXYNOS
+extern u32 exynos_get_eint_wake_mask(void);
+#else
+static inline u32 exynos_get_eint_wake_mask(void) { return 0xffffffff; }
+#endif
+
 static inline void s3c_pm_debug_init_uart(void)
 {
        /* nothing here yet */
@@ -27,7 +34,12 @@ static inline void s3c_pm_debug_init_uart(void)
 
 static inline void s3c_pm_arch_prepare_irqs(void)
 {
-       __raw_writel(s3c_irqwake_eintmask, S5P_EINT_WAKEUP_MASK);
+       u32 eintmask = s3c_irqwake_eintmask;
+
+       if (of_have_populated_dt())
+               eintmask = exynos_get_eint_wake_mask();
+
+       __raw_writel(eintmask, S5P_EINT_WAKEUP_MASK);
        __raw_writel(s3c_irqwake_intmask & ~(1 << 31), S5P_WAKEUP_MASK);
 }
 
index 327d50d4681d7e97dc7efa720ce39bccc11ead7d..74ddb2b55614234b69ee26d69ff51f5ce8cb3540 100644 (file)
@@ -41,7 +41,6 @@
 #include <plat/mfc.h>
 #include <plat/sdhci.h>
 #include <plat/fimc-core.h>
-#include <plat/samsung-time.h>
 #include <plat/camport.h>
 
 #include <mach/map.h>
@@ -1094,7 +1093,7 @@ static void __init universal_map_io(void)
 {
        exynos_init_io(NULL, 0);
        s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
-       samsung_set_timer_source(SAMSUNG_PWM2, SAMSUNG_PWM4);
+       exynos_set_timer_source(BIT(2) | BIT(4));
        xxti_f = 0;
        xusbxti_f = 24000000;
 }
@@ -1154,7 +1153,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
        .map_io         = universal_map_io,
        .init_machine   = universal_machine_init,
        .init_late      = exynos_init_late,
-       .init_time      = samsung_timer_init,
+       .init_time      = exynos_init_time,
        .reserve        = &universal_reserve,
        .restart        = exynos4_restart,
 MACHINE_END
index 151259003086e8d1f0db34015742ab307b3f986a..dda9a2bd3acbb34da147542d3080770292c8771c 100644 (file)
@@ -177,7 +177,8 @@ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
 static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
 static const char *pll1_sw_sels[]      = { "pll1_sys", "step", };
 static const char *periph_pre_sels[]   = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
-static const char *periph_clk2_sels[]  = { "pll3_usb_otg", "osc", };
+static const char *periph_clk2_sels[]  = { "pll3_usb_otg", "osc", "osc", "dummy", };
+static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", };
 static const char *periph_sels[]       = { "periph_pre", "periph_clk2", };
 static const char *periph2_sels[]      = { "periph2_pre", "periph2_clk2", };
 static const char *axi_sels[]          = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", };
@@ -185,7 +186,7 @@ static const char *audio_sels[]     = { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd
 static const char *gpu_axi_sels[]      = { "axi", "ahb", };
 static const char *gpu2d_core_sels[]   = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
 static const char *gpu3d_core_sels[]   = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
-static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
+static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
 static const char *ipu_sels[]          = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
 static const char *ldb_di_sels[]       = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
 static const char *ipu_di_pre_sels[]   = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
@@ -369,8 +370,8 @@ int __init mx6q_clocks_init(void)
        clk[pll1_sw]          = imx_clk_mux("pll1_sw",          base + 0xc,  2,  1, pll1_sw_sels,      ARRAY_SIZE(pll1_sw_sels));
        clk[periph_pre]       = imx_clk_mux("periph_pre",       base + 0x18, 18, 2, periph_pre_sels,   ARRAY_SIZE(periph_pre_sels));
        clk[periph2_pre]      = imx_clk_mux("periph2_pre",      base + 0x18, 21, 2, periph_pre_sels,   ARRAY_SIZE(periph_pre_sels));
-       clk[periph_clk2_sel]  = imx_clk_mux("periph_clk2_sel",  base + 0x18, 12, 1, periph_clk2_sels,  ARRAY_SIZE(periph_clk2_sels));
-       clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph_clk2_sels,  ARRAY_SIZE(periph_clk2_sels));
+       clk[periph_clk2_sel]  = imx_clk_mux("periph_clk2_sel",  base + 0x18, 12, 2, periph_clk2_sels,  ARRAY_SIZE(periph_clk2_sels));
+       clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
        clk[axi_sel]          = imx_clk_mux("axi_sel",          base + 0x14, 6,  2, axi_sels,          ARRAY_SIZE(axi_sels));
        clk[esai_sel]         = imx_clk_mux("esai_sel",         base + 0x20, 19, 2, audio_sels,        ARRAY_SIZE(audio_sels));
        clk[asrc_sel]         = imx_clk_mux("asrc_sel",         base + 0x30, 7,  2, audio_sels,        ARRAY_SIZE(audio_sels));
@@ -498,7 +499,7 @@ int __init mx6q_clocks_init(void)
        clk[ldb_di1]      = imx_clk_gate2("ldb_di1",       "ldb_di1_podf",      base + 0x74, 14);
        clk[ipu2_di1]     = imx_clk_gate2("ipu2_di1",      "ipu2_di1_sel",      base + 0x74, 10);
        clk[hsi_tx]       = imx_clk_gate2("hsi_tx",        "hsi_tx_podf",       base + 0x74, 16);
-       clk[mlb]          = imx_clk_gate2("mlb",           "pll8_mlb",          base + 0x74, 18);
+       clk[mlb]          = imx_clk_gate2("mlb",           "axi",               base + 0x74, 18);
        clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi",  "mmdc_ch0_axi_podf", base + 0x74, 20);
        clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi",  "mmdc_ch1_axi_podf", base + 0x74, 22);
        clk[ocram]        = imx_clk_gate2("ocram",         "ahb",               base + 0x74, 28);
index 67b9c48dcafe7dc1ee281c49ece9de88338297f4..627f16f0e9d1d393d48527e50f25c5e22c81987d 100644 (file)
        .section ".text.head", "ax"
 
 #ifdef CONFIG_SMP
+diag_reg_offset:
+       .word   g_diag_reg - .
+
+       .macro  set_diag_reg
+       adr     r0, diag_reg_offset
+       ldr     r1, [r0]
+       add     r1, r1, r0              @ r1 = physical &g_diag_reg
+       ldr     r0, [r1]
+       mcr     p15, 0, r0, c15, c0, 1  @ write diagnostic register
+       .endm
+
 ENTRY(v7_secondary_startup)
        bl      v7_invalidate_l1
+       set_diag_reg
        b       secondary_startup
 ENDPROC(v7_secondary_startup)
 #endif
index 4a69305db65e395366187a84cde322e37fc99e94..c6e1ab5448822c8aaed044413f726b0dc8c47a39 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/init.h>
 #include <linux/smp.h>
+#include <asm/cacheflush.h>
 #include <asm/page.h>
 #include <asm/smp_scu.h>
 #include <asm/mach/map.h>
@@ -21,6 +22,7 @@
 
 #define SCU_STANDBY_ENABLE     (1 << 5)
 
+u32 g_diag_reg;
 static void __iomem *scu_base;
 
 static struct map_desc scu_io_desc __initdata = {
@@ -80,6 +82,18 @@ void imx_smp_prepare(void)
 static void __init imx_smp_prepare_cpus(unsigned int max_cpus)
 {
        imx_smp_prepare();
+
+       /*
+        * The diagnostic register holds the errata bits.  Mostly bootloader
+        * does not bring up secondary cores, so that when errata bits are set
+        * in bootloader, they are set only for boot cpu.  But on a SMP
+        * configuration, it should be equally done on every single core.
+        * Read the register from boot cpu here, and will replicate it into
+        * secondary cores when booting them.
+        */
+       asm("mrc p15, 0, %0, c15, c0, 1" : "=r" (g_diag_reg) : : "cc");
+       __cpuc_flush_dcache_area(&g_diag_reg, sizeof(g_diag_reg));
+       outer_clean_range(__pa(&g_diag_reg), __pa(&g_diag_reg + 1));
 }
 
 struct smp_operations  imx_smp_ops __initdata = {
index c2cae69e6d2bb9343798a7a3398ce174850d59cf..f389228975637cd85d3f9f9444ae30e0f34537cc 100644 (file)
@@ -528,12 +528,6 @@ void __init kirkwood_init_early(void)
 {
        orion_time_set_base(TIMER_VIRT_BASE);
 
-       /*
-        * Some Kirkwood devices allocate their coherent buffers from atomic
-        * context. Increase size of atomic coherent pool to make sure such
-        * the allocations won't fail.
-        */
-       init_dma_coherent_pool_size(SZ_1M);
        mvebu_mbus_init("marvell,kirkwood-mbus",
                        BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
                        DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ);
index 283abff902287357a27da54427a390432317bca2..e1267d6b468f1f6d0d0a0e7efa40391bff077183 100644 (file)
@@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void)
 static int __init ts219_pci_init(void)
 {
        if (machine_is_ts219())
-               kirkwood_pcie_init(KW_PCIE0);
+               kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0);
 
        return 0;
 }
index e11acbb0a46d4316c82f23cb333744cbab41d89f..80a8bcacd9d539c0a684e3beaa927b07dd720566 100644 (file)
@@ -15,6 +15,7 @@ config ARCH_MVEBU
        select MVEBU_CLK_GATING
        select MVEBU_MBUS
        select ZONE_DMA if ARM_LPAE
+       select ARCH_REQUIRE_GPIOLIB
 
 if ARCH_MVEBU
 
index 42a4cb3087e23ab04ea2e7e22971c1f38aeb9aa2..1c48890bb72b2c7ad1301cdc9f8bad007362bf57 100644 (file)
@@ -53,13 +53,6 @@ void __init armada_370_xp_init_early(void)
 {
        char *mbus_soc_name;
 
-       /*
-        * Some Armada 370/XP devices allocate their coherent buffers
-        * from atomic context. Increase size of atomic coherent pool
-        * to make sure such the allocations won't fail.
-        */
-       init_dma_coherent_pool_size(SZ_1M);
-
        /*
         * This initialization will be replaced by a DT-based
         * initialization once the mvebu-mbus driver gains DT support.
index 68ab858e27b754bea93264889ab9b020cf67d698..a94b3a718d1a771dda53f5e06542fb6a6989cee8 100644 (file)
@@ -345,6 +345,7 @@ static int __init omap1_system_dma_init(void)
                dev_err(&pdev->dev,
                        "%s: Memory allocation failed for d->chan!\n",
                        __func__);
+               ret = -ENOMEM;
                goto exit_release_d;
        }
 
index 6ebc7803bc3e37b48ff9c1b394a39e2a3dff9a28..af3544ce4f0273b8eb8b43c22295a5ae6ced76a3 100644 (file)
@@ -454,9 +454,29 @@ DEFINE_CLK_GATE(cefuse_fck, "sys_clkin_ck", &sys_clkin_ck, 0x0,
  */
 DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732);
 
-DEFINE_CLK_GATE(clkdiv32k_ick, "clkdiv32k_ck", &clkdiv32k_ck, 0x0,
-               AM33XX_CM_PER_CLKDIV32K_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT,
-               0x0, NULL);
+static struct clk clkdiv32k_ick;
+
+static const char *clkdiv32k_ick_parent_names[] = {
+       "clkdiv32k_ck",
+};
+
+static const struct clk_ops clkdiv32k_ick_ops = {
+       .enable         = &omap2_dflt_clk_enable,
+       .disable        = &omap2_dflt_clk_disable,
+       .is_enabled     = &omap2_dflt_clk_is_enabled,
+       .init           = &omap2_init_clk_clkdm,
+};
+
+static struct clk_hw_omap clkdiv32k_ick_hw = {
+       .hw     = {
+               .clk    = &clkdiv32k_ick,
+       },
+       .enable_reg     = AM33XX_CM_PER_CLKDIV32K_CLKCTRL,
+       .enable_bit     = AM33XX_MODULEMODE_SWCTRL_SHIFT,
+       .clkdm_name     = "clk_24mhz_clkdm",
+};
+
+DEFINE_STRUCT_CLK(clkdiv32k_ick, clkdiv32k_ick_parent_names, clkdiv32k_ick_ops);
 
 /* "usbotg_fck" is an additional clock and not really a modulemode */
 DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0,
index d25a95fe99216c582f97b0a0aee860c3573eaf68..7341eff63f56df8f58d5b72d8db9a71feb86112d 100644 (file)
@@ -1356,13 +1356,27 @@ static void _enable_sysc(struct omap_hwmod *oh)
 
        clkdm = _get_clkdm(oh);
        if (sf & SYSC_HAS_SIDLEMODE) {
+               if (oh->flags & HWMOD_SWSUP_SIDLE ||
+                   oh->flags & HWMOD_SWSUP_SIDLE_ACT) {
+                       idlemode = HWMOD_IDLEMODE_NO;
+               } else {
+                       if (sf & SYSC_HAS_ENAWAKEUP)
+                               _enable_wakeup(oh, &v);
+                       if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
+                               idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+                       else
+                               idlemode = HWMOD_IDLEMODE_SMART;
+               }
+
+               /*
+                * This is special handling for some IPs like
+                * 32k sync timer. Force them to idle!
+                */
                clkdm_act = (clkdm && clkdm->flags & CLKDM_ACTIVE_WITH_MPU);
                if (clkdm_act && !(oh->class->sysc->idlemodes &
                                   (SIDLE_SMART | SIDLE_SMART_WKUP)))
                        idlemode = HWMOD_IDLEMODE_FORCE;
-               else
-                       idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
-                               HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
@@ -1391,10 +1405,6 @@ static void _enable_sysc(struct omap_hwmod *oh)
            (sf & SYSC_HAS_CLOCKACTIVITY))
                _set_clockactivity(oh, oh->class->sysc->clockact, &v);
 
-       /* If slave is in SMARTIDLE, also enable wakeup */
-       if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
-               _enable_wakeup(oh, &v);
-
        _write_sysconfig(v, oh);
 
        /*
@@ -1430,13 +1440,16 @@ static void _idle_sysc(struct omap_hwmod *oh)
        sf = oh->class->sysc->sysc_flags;
 
        if (sf & SYSC_HAS_SIDLEMODE) {
-               /* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */
-               if (oh->flags & HWMOD_SWSUP_SIDLE ||
-                   !(oh->class->sysc->idlemodes &
-                     (SIDLE_SMART | SIDLE_SMART_WKUP)))
+               if (oh->flags & HWMOD_SWSUP_SIDLE) {
                        idlemode = HWMOD_IDLEMODE_FORCE;
-               else
-                       idlemode = HWMOD_IDLEMODE_SMART;
+               } else {
+                       if (sf & SYSC_HAS_ENAWAKEUP)
+                               _enable_wakeup(oh, &v);
+                       if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
+                               idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+                       else
+                               idlemode = HWMOD_IDLEMODE_SMART;
+               }
                _set_slave_idlemode(oh, idlemode, &v);
        }
 
@@ -1455,10 +1468,6 @@ static void _idle_sysc(struct omap_hwmod *oh)
                _set_master_standbymode(oh, idlemode, &v);
        }
 
-       /* If slave is in SMARTIDLE, also enable wakeup */
-       if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
-               _enable_wakeup(oh, &v);
-
        _write_sysconfig(v, oh);
 }
 
@@ -2065,7 +2074,7 @@ static int _omap4_get_context_lost(struct omap_hwmod *oh)
  * do so is present in the hwmod data, then call it and pass along the
  * return value; otherwise, return 0.
  */
-static int __init _enable_preprogram(struct omap_hwmod *oh)
+static int _enable_preprogram(struct omap_hwmod *oh)
 {
        if (!oh->class->enable_preprogram)
                return 0;
@@ -2245,42 +2254,6 @@ static int _idle(struct omap_hwmod *oh)
        return 0;
 }
 
-/**
- * omap_hwmod_set_ocp_autoidle - set the hwmod's OCP autoidle bit
- * @oh: struct omap_hwmod *
- * @autoidle: desired AUTOIDLE bitfield value (0 or 1)
- *
- * Sets the IP block's OCP autoidle bit in hardware, and updates our
- * local copy. Intended to be used by drivers that require
- * direct manipulation of the AUTOIDLE bits.
- * Returns -EINVAL if @oh is null or is not in the ENABLED state, or passes
- * along the return value from _set_module_autoidle().
- *
- * Any users of this function should be scrutinized carefully.
- */
-int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle)
-{
-       u32 v;
-       int retval = 0;
-       unsigned long flags;
-
-       if (!oh || oh->_state != _HWMOD_STATE_ENABLED)
-               return -EINVAL;
-
-       spin_lock_irqsave(&oh->_lock, flags);
-
-       v = oh->_sysc_cache;
-
-       retval = _set_module_autoidle(oh, autoidle, &v);
-
-       if (!retval)
-               _write_sysconfig(v, oh);
-
-       spin_unlock_irqrestore(&oh->_lock, flags);
-
-       return retval;
-}
-
 /**
  * _shutdown - shutdown an omap_hwmod
  * @oh: struct omap_hwmod *
@@ -3179,38 +3152,6 @@ int omap_hwmod_softreset(struct omap_hwmod *oh)
        return ret;
 }
 
-/**
- * omap_hwmod_set_slave_idlemode - set the hwmod's OCP slave idlemode
- * @oh: struct omap_hwmod *
- * @idlemode: SIDLEMODE field bits (shifted to bit 0)
- *
- * Sets the IP block's OCP slave idlemode in hardware, and updates our
- * local copy.  Intended to be used by drivers that have some erratum
- * that requires direct manipulation of the SIDLEMODE bits.  Returns
- * -EINVAL if @oh is null, or passes along the return value from
- * _set_slave_idlemode().
- *
- * XXX Does this function have any current users?  If not, we should
- * remove it; it is better to let the rest of the hwmod code handle this.
- * Any users of this function should be scrutinized carefully.
- */
-int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode)
-{
-       u32 v;
-       int retval = 0;
-
-       if (!oh)
-               return -EINVAL;
-
-       v = oh->_sysc_cache;
-
-       retval = _set_slave_idlemode(oh, idlemode, &v);
-       if (!retval)
-               _write_sysconfig(v, oh);
-
-       return retval;
-}
-
 /**
  * omap_hwmod_lookup - look up a registered omap_hwmod by name
  * @name: name of the omap_hwmod to look up
index fe5962921f07244e602429b758f0d5ccecb7398c..0c898f58ac9bb490e8873a24074e53c1480530a6 100644 (file)
@@ -463,6 +463,9 @@ struct omap_hwmod_omap4_prcm {
  *     is kept in force-standby mode. Failing to do so causes PM problems
  *     with musb on OMAP3630 at least. Note that musb has a dedicated register
  *     to control MSTANDBY signal when MIDLEMODE is set to force-standby.
+ * HWMOD_SWSUP_SIDLE_ACT: omap_hwmod code should manually bring the module
+ *     out of idle, but rely on smart-idle to the put it back in idle,
+ *     so the wakeups are still functional (Only known case for now is UART)
  */
 #define HWMOD_SWSUP_SIDLE                      (1 << 0)
 #define HWMOD_SWSUP_MSTANDBY                   (1 << 1)
@@ -476,6 +479,7 @@ struct omap_hwmod_omap4_prcm {
 #define HWMOD_EXT_OPT_MAIN_CLK                 (1 << 9)
 #define HWMOD_BLOCK_WFI                                (1 << 10)
 #define HWMOD_FORCE_MSTANDBY                   (1 << 11)
+#define HWMOD_SWSUP_SIDLE_ACT                  (1 << 12)
 
 /*
  * omap_hwmod._int_flags definitions
@@ -641,9 +645,6 @@ int omap_hwmod_read_hardreset(struct omap_hwmod *oh, const char *name);
 int omap_hwmod_enable_clocks(struct omap_hwmod *oh);
 int omap_hwmod_disable_clocks(struct omap_hwmod *oh);
 
-int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode);
-int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle);
-
 int omap_hwmod_reset(struct omap_hwmod *oh);
 void omap_hwmod_ocp_barrier(struct omap_hwmod *oh);
 
index c8c64b3e1acc009efa24b91da269034b8603e714..d05fc7b54567f0dbb6597a1d641ce130caf7ee9b 100644 (file)
@@ -512,6 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = {
        .mpu_irqs       = omap2_uart1_mpu_irqs,
        .sdma_reqs      = omap2_uart1_sdma_reqs,
        .main_clk       = "uart1_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
@@ -531,6 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = {
        .mpu_irqs       = omap2_uart2_mpu_irqs,
        .sdma_reqs      = omap2_uart2_sdma_reqs,
        .main_clk       = "uart2_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
@@ -550,6 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = {
        .mpu_irqs       = omap2_uart3_mpu_irqs,
        .sdma_reqs      = omap2_uart3_sdma_reqs,
        .main_clk       = "uart3_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
index 01d8f324450a951f89174b4cebdf82f8b9dad6b1..075f7cc510261ee959352daf07818370b4188ab9 100644 (file)
@@ -1995,6 +1995,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
        .name           = "uart1",
        .class          = &uart_class,
        .clkdm_name     = "l4_wkup_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = am33xx_uart1_irqs,
        .sdma_reqs      = uart1_edma_reqs,
        .main_clk       = "dpll_per_m2_div4_wkupdm_ck",
@@ -2015,6 +2016,7 @@ static struct omap_hwmod am33xx_uart2_hwmod = {
        .name           = "uart2",
        .class          = &uart_class,
        .clkdm_name     = "l4ls_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = am33xx_uart2_irqs,
        .sdma_reqs      = uart1_edma_reqs,
        .main_clk       = "dpll_per_m2_div4_ck",
@@ -2042,6 +2044,7 @@ static struct omap_hwmod am33xx_uart3_hwmod = {
        .name           = "uart3",
        .class          = &uart_class,
        .clkdm_name     = "l4ls_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = am33xx_uart3_irqs,
        .sdma_reqs      = uart3_edma_reqs,
        .main_clk       = "dpll_per_m2_div4_ck",
@@ -2062,6 +2065,7 @@ static struct omap_hwmod am33xx_uart4_hwmod = {
        .name           = "uart4",
        .class          = &uart_class,
        .clkdm_name     = "l4ls_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = am33xx_uart4_irqs,
        .sdma_reqs      = uart1_edma_reqs,
        .main_clk       = "dpll_per_m2_div4_ck",
@@ -2082,6 +2086,7 @@ static struct omap_hwmod am33xx_uart5_hwmod = {
        .name           = "uart5",
        .class          = &uart_class,
        .clkdm_name     = "l4ls_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = am33xx_uart5_irqs,
        .sdma_reqs      = uart1_edma_reqs,
        .main_clk       = "dpll_per_m2_div4_ck",
@@ -2102,6 +2107,7 @@ static struct omap_hwmod am33xx_uart6_hwmod = {
        .name           = "uart6",
        .class          = &uart_class,
        .clkdm_name     = "l4ls_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = am33xx_uart6_irqs,
        .sdma_reqs      = uart1_edma_reqs,
        .main_clk       = "dpll_per_m2_div4_ck",
index 4083606ea1da15e7efeb279d7c33c3f3cc7618b3..31c7126eb3bb65d724cfc71ed0c0733a4202e4d6 100644 (file)
@@ -490,6 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
        .mpu_irqs       = omap2_uart1_mpu_irqs,
        .sdma_reqs      = omap2_uart1_sdma_reqs,
        .main_clk       = "uart1_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
@@ -508,6 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
        .mpu_irqs       = omap2_uart2_mpu_irqs,
        .sdma_reqs      = omap2_uart2_sdma_reqs,
        .main_clk       = "uart2_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = CORE_MOD,
@@ -526,6 +528,7 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
        .mpu_irqs       = omap2_uart3_mpu_irqs,
        .sdma_reqs      = omap2_uart3_sdma_reqs,
        .main_clk       = "uart3_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = OMAP3430_PER_MOD,
@@ -555,6 +558,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = {
        .mpu_irqs       = uart4_mpu_irqs,
        .sdma_reqs      = uart4_sdma_reqs,
        .main_clk       = "uart4_fck",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .prcm           = {
                .omap2 = {
                        .module_offs = OMAP3430_PER_MOD,
index eaba9dc91a0d9824a530cf185537a28a881b5a24..848b6dc67590716f7663151f46a5809f097e9cf1 100644 (file)
@@ -3434,6 +3434,7 @@ static struct omap_hwmod omap44xx_uart1_hwmod = {
        .name           = "uart1",
        .class          = &omap44xx_uart_hwmod_class,
        .clkdm_name     = "l4_per_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = omap44xx_uart1_irqs,
        .sdma_reqs      = omap44xx_uart1_sdma_reqs,
        .main_clk       = "func_48m_fclk",
@@ -3462,6 +3463,7 @@ static struct omap_hwmod omap44xx_uart2_hwmod = {
        .name           = "uart2",
        .class          = &omap44xx_uart_hwmod_class,
        .clkdm_name     = "l4_per_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = omap44xx_uart2_irqs,
        .sdma_reqs      = omap44xx_uart2_sdma_reqs,
        .main_clk       = "func_48m_fclk",
@@ -3490,7 +3492,8 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {
        .name           = "uart3",
        .class          = &omap44xx_uart_hwmod_class,
        .clkdm_name     = "l4_per_clkdm",
-       .flags          = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+       .flags          = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET |
+                               HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = omap44xx_uart3_irqs,
        .sdma_reqs      = omap44xx_uart3_sdma_reqs,
        .main_clk       = "func_48m_fclk",
@@ -3519,6 +3522,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
        .name           = "uart4",
        .class          = &omap44xx_uart_hwmod_class,
        .clkdm_name     = "l4_per_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE_ACT,
        .mpu_irqs       = omap44xx_uart4_irqs,
        .sdma_reqs      = omap44xx_uart4_sdma_reqs,
        .main_clk       = "func_48m_fclk",
index 8396b5b7e912d91e48adbcc3b785041845b9f677..f6601563aa6903f93e04f9656a58593907f89b0e 100644 (file)
@@ -95,38 +95,9 @@ static void omap_uart_enable_wakeup(struct device *dev, bool enable)
                omap_hwmod_disable_wakeup(od->hwmods[0]);
 }
 
-/*
- * Errata i291: [UART]:Cannot Acknowledge Idle Requests
- * in Smartidle Mode When Configured for DMA Operations.
- * WA: configure uart in force idle mode.
- */
-static void omap_uart_set_noidle(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct omap_device *od = to_omap_device(pdev);
-
-       omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO);
-}
-
-static void omap_uart_set_smartidle(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct omap_device *od = to_omap_device(pdev);
-       u8 idlemode;
-
-       if (od->hwmods[0]->class->sysc->idlemodes & SIDLE_SMART_WKUP)
-               idlemode = HWMOD_IDLEMODE_SMART_WKUP;
-       else
-               idlemode = HWMOD_IDLEMODE_SMART;
-
-       omap_hwmod_set_slave_idlemode(od->hwmods[0], idlemode);
-}
-
 #else
 static void omap_uart_enable_wakeup(struct device *dev, bool enable)
 {}
-static void omap_uart_set_noidle(struct device *dev) {}
-static void omap_uart_set_smartidle(struct device *dev) {}
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_OMAP_MUX
@@ -299,8 +270,6 @@ void __init omap_serial_init_port(struct omap_board_data *bdata,
        omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
        omap_up.flags = UPF_BOOT_AUTOCONF;
        omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count;
-       omap_up.set_forceidle = omap_uart_set_smartidle;
-       omap_up.set_noidle = omap_uart_set_noidle;
        omap_up.enable_wakeup = omap_uart_enable_wakeup;
        omap_up.dma_rx_buf_size = info->dma_rx_buf_size;
        omap_up.dma_rx_timeout = info->dma_rx_timeout;
index b97fd672e89d74f40e3ebfc0b1e18c365be785f3..f8a6db9239bf3c3ff7a88c29925cdce0961590cb 100644 (file)
@@ -199,13 +199,6 @@ void __init orion5x_init_early(void)
 
        orion_time_set_base(TIMER_VIRT_BASE);
 
-       /*
-        * Some Orion5x devices allocate their coherent buffers from atomic
-        * context. Increase size of atomic coherent pool to make sure such
-        * the allocations won't fail.
-        */
-       init_dma_coherent_pool_size(SZ_1M);
-
        /* Initialize the MBUS driver */
        orion5x_pcie_id(&dev, &rev);
        if (dev == MV88F5281_DEV_ID)
index 91052855cc1216245eb71fe14cb13d5e5f85c63d..b9594e911ce7680d28448fa50cf854ceb91b33bf 100644 (file)
@@ -212,8 +212,8 @@ static struct platform_device *marzen_devices[] __initdata = {
 static struct usb_phy *phy;
 static int usb_power_on(struct platform_device *pdev)
 {
-       if (!phy)
-               return -EIO;
+       if (IS_ERR(phy))
+               return PTR_ERR(phy);
 
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
@@ -225,7 +225,7 @@ static int usb_power_on(struct platform_device *pdev)
 
 static void usb_power_off(struct platform_device *pdev)
 {
-       if (!phy)
+       if (IS_ERR(phy))
                return;
 
        usb_phy_shutdown(phy);
index d259c782d742873f0e0c53b1a43e7bcd85069286..5b045e302b4359d65b91ac31110dd566d2e10391 100644 (file)
@@ -1,5 +1,6 @@
 config ARCH_SUNXI
        bool "Allwinner A1X SOCs" if ARCH_MULTI_V7
+       select ARCH_REQUIRE_GPIOLIB
        select CLKSRC_MMIO
        select CLKSRC_OF
        select COMMON_CLK
index 9e8bdfa2b36915e4db7a8352753626e7f41b682e..31e69a019bdd999bd53338d669a090697122922b 100644 (file)
@@ -307,11 +307,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "missing register base\n");
-               return -ENOMEM;
-       }
-
        emc_regbase = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(emc_regbase))
                return PTR_ERR(emc_regbase);
index 6a4387e39df809f1b21d899b89d2212924ae19cd..b19b07204aafbeb53ced9f4226164ecd9c036cff 100644 (file)
@@ -51,6 +51,7 @@ config MACH_MOP500
        bool "U8500 Development platform, MOP500 versions"
        select I2C
        select I2C_NOMADIK
+       select REGULATOR
        select REGULATOR_FIXED_VOLTAGE
        select SOC_BUS
        select UX500_SOC_DB8500
index 3cd555ac6d0a3e5c81478dfbf71f79f10e72b73b..78389de94dde34f6eaee0f73c479d33eda4846dd 100644 (file)
@@ -623,7 +623,7 @@ static void __init mop500_init_machine(void)
        sdi0_reg_info.gpios[0].gpio = GPIO_SDMMC_1V8_3V_SEL;
 
        mop500_pinmaps_init();
-       parent = u8500_init_devices(&ab8500_platdata);
+       parent = u8500_init_devices();
 
        for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
                mop500_platform_devs[i]->dev.parent = parent;
@@ -660,7 +660,7 @@ static void __init snowball_init_machine(void)
        sdi0_reg_info.gpios[0].gpio = SNOWBALL_SDMMC_1V8_3V_GPIO;
 
        snowball_pinmaps_init();
-       parent = u8500_init_devices(&ab8500_platdata);
+       parent = u8500_init_devices();
 
        for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++)
                snowball_platform_devs[i]->dev.parent = parent;
@@ -698,7 +698,7 @@ static void __init hrefv60_init_machine(void)
        sdi0_reg_info.gpios[0].gpio = HREFV60_SDMMC_1V8_3V_GPIO;
 
        hrefv60_pinmaps_init();
-       parent = u8500_init_devices(&ab8500_platdata);
+       parent = u8500_init_devices();
 
        for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
                mop500_platform_devs[i]->dev.parent = parent;
index e90b5ab23b6daf7e691bbff5b74fd6c165e96518..46cca52890bcfd486f30018556059c12bcf22391 100644 (file)
@@ -206,7 +206,7 @@ static struct device * __init db8500_soc_device_init(void)
 /*
  * This function is called from the board init
  */
-struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500)
+struct device * __init u8500_init_devices(void)
 {
        struct device *parent;
        int i;
@@ -220,8 +220,6 @@ struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500)
        for (i = 0; i < ARRAY_SIZE(platform_devs); i++)
                platform_devs[i]->dev.parent = parent;
 
-       db8500_prcmu_device.dev.platform_data = ab8500;
-
        platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
 
        return parent;
@@ -278,7 +276,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),
        OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu",
                        &db8500_prcmu_pdata),
-       OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x", NULL),
+       OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x.0", NULL),
        /* Requires device name bindings. */
        OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE,
                "pinctrl-db8500", NULL),
index bddce2b493722f84c5821103a38eea4a85d8833d..cad3ca86c540f7eb67c454ce44f0317b80312ad3 100644 (file)
@@ -18,7 +18,7 @@
 void __init ux500_map_io(void);
 extern void __init u8500_map_io(void);
 
-extern struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500);
+extern struct device * __init u8500_init_devices(void);
 
 extern void __init ux500_init_irq(void);
 extern void __init ux500_init_late(void);
index 1dd281efc02035dac37cc971a0db4c69c98c8a11..f5c33df7a5971731844478e9a29024c660c35c52 100644 (file)
@@ -173,6 +173,7 @@ static const char * const vt8500_dt_compat[] = {
        "wm,wm8505",
        "wm,wm8750",
        "wm,wm8850",
+       NULL
 };
 
 DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)")
index 251f827271e918adf62b378d59aef9e7272d8b70..c019b7aaf776b7d622aa5c97146070aee57ef523 100644 (file)
@@ -383,7 +383,7 @@ static struct resource orion_ge10_shared_resources[] = {
 
 static struct platform_device orion_ge10_shared = {
        .name           = MV643XX_ETH_SHARED_NAME,
-       .id             = 1,
+       .id             = 2,
        .dev            = {
                .platform_data  = &orion_ge10_shared_data,
        },
@@ -398,8 +398,8 @@ static struct resource orion_ge10_resources[] = {
 
 static struct platform_device orion_ge10 = {
        .name           = MV643XX_ETH_NAME,
-       .id             = 1,
-       .num_resources  = 2,
+       .id             = 2,
+       .num_resources  = 1,
        .resource       = orion_ge10_resources,
        .dev            = {
                .coherent_dma_mask      = DMA_BIT_MASK(32),
@@ -432,7 +432,7 @@ static struct resource orion_ge11_shared_resources[] = {
 
 static struct platform_device orion_ge11_shared = {
        .name           = MV643XX_ETH_SHARED_NAME,
-       .id             = 1,
+       .id             = 3,
        .dev            = {
                .platform_data  = &orion_ge11_shared_data,
        },
@@ -447,8 +447,8 @@ static struct resource orion_ge11_resources[] = {
 
 static struct platform_device orion_ge11 = {
        .name           = MV643XX_ETH_NAME,
-       .id             = 1,
-       .num_resources  = 2,
+       .id             = 3,
+       .num_resources  = 1,
        .resource       = orion_ge11_resources,
        .dev            = {
                .coherent_dma_mask      = DMA_BIT_MASK(32),
index e06fc5fefa14c9ec7d0b46b002da64bc8cb425f8..d9a24f605a2b786dce8a85744c662f310f7d80a8 100644 (file)
@@ -10,6 +10,7 @@
 
 #ifndef __PLAT_COMMON_H
 #include <linux/mv643xx_eth.h>
+#include <linux/platform_data/usb-ehci-orion.h>
 
 struct dsa_platform_data;
 struct mv_sata_platform_data;
index ca07cb1b155a702ce5e3c0cc0de74cfc5a5fe3a4..79690f2f6d3f8a5164495c7758b392cc43bae268 100644 (file)
@@ -381,11 +381,6 @@ static int s3c_adc_probe(struct platform_device *pdev)
        }
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!regs) {
-               dev_err(dev, "failed to find registers\n");
-               return -ENXIO;
-       }
-
        adc->regs = devm_ioremap_resource(dev, regs);
        if (IS_ERR(adc->regs))
                return PTR_ERR(adc->regs);
index 30c2fe243f7658c0002c93ce4c9aaf154e1ca1db..0f9c3f431a5f6d1b1c0874febb98f06504f650aa 100644 (file)
@@ -311,9 +311,9 @@ struct platform_device s5p_device_jpeg = {
 #ifdef CONFIG_S5P_DEV_FIMD0
 static struct resource s5p_fimd0_resource[] = {
        [0] = DEFINE_RES_MEM(S5P_PA_FIMD0, SZ_32K),
-       [1] = DEFINE_RES_IRQ(IRQ_FIMD0_VSYNC),
-       [2] = DEFINE_RES_IRQ(IRQ_FIMD0_FIFO),
-       [3] = DEFINE_RES_IRQ(IRQ_FIMD0_SYSTEM),
+       [1] = DEFINE_RES_IRQ_NAMED(IRQ_FIMD0_VSYNC, "vsync"),
+       [2] = DEFINE_RES_IRQ_NAMED(IRQ_FIMD0_FIFO, "fifo"),
+       [3] = DEFINE_RES_IRQ_NAMED(IRQ_FIMD0_SYSTEM, "lcd_sys"),
 };
 
 struct platform_device s5p_device_fimd0 = {
index 323ce1a62bbfa3465230632d5d7d35c458f380fa..46e17492fd1f3ecccda92c660474c805707b788b 100644 (file)
@@ -60,7 +60,7 @@ ENTRY(vfp_testing_entry)
        str     r11, [r10, #TI_PREEMPT]
 #endif
        ldr     r0, VFP_arch_address
-       str     r5, [r0]                @ known non-zero value
+       str     r0, [r0]                @ set to non-zero value
        mov     pc, r9                  @ we have handled the fault
 ENDPROC(vfp_testing_entry)
 
index d30042e39974f949e6ab3cc81f88f7c6dfc51c4d..13609e01f4b786293219f2c9e865837c9262f76e 100644 (file)
@@ -152,11 +152,12 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
 }
 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
 
-static int __init xen_secondary_init(unsigned int cpu)
+static void __init xen_percpu_init(void *unused)
 {
        struct vcpu_register_vcpu_info info;
        struct vcpu_info *vcpup;
        int err;
+       int cpu = get_cpu();
 
        pr_info("Xen: initializing cpu%d\n", cpu);
        vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
@@ -165,14 +166,10 @@ static int __init xen_secondary_init(unsigned int cpu)
        info.offset = offset_in_page(vcpup);
 
        err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
-       if (err) {
-               pr_debug("register_vcpu_info failed: err=%d\n", err);
-       } else {
-               /* This cpu is using the registered vcpu info, even if
-                  later ones fail to. */
-               per_cpu(xen_vcpu, cpu) = vcpup;
-       }
-       return 0;
+       BUG_ON(err);
+       per_cpu(xen_vcpu, cpu) = vcpup;
+
+       enable_percpu_irq(xen_events_irq, 0);
 }
 
 static void xen_restart(char str, const char *cmd)
@@ -208,7 +205,6 @@ static int __init xen_guest_init(void)
        const char *version = NULL;
        const char *xen_prefix = "xen,xen-";
        struct resource res;
-       int i;
 
        node = of_find_compatible_node(NULL, NULL, "xen,xen");
        if (!node) {
@@ -265,19 +261,23 @@ static int __init xen_guest_init(void)
                                               sizeof(struct vcpu_info));
        if (xen_vcpu_info == NULL)
                return -ENOMEM;
-       for_each_online_cpu(i)
-               xen_secondary_init(i);
 
        gnttab_init();
        if (!xen_initial_domain())
                xenbus_probe(NULL);
 
+       return 0;
+}
+core_initcall(xen_guest_init);
+
+static int __init xen_pm_init(void)
+{
        pm_power_off = xen_power_off;
        arm_pm_restart = xen_restart;
 
        return 0;
 }
-core_initcall(xen_guest_init);
+subsys_initcall(xen_pm_init);
 
 static irqreturn_t xen_arm_callback(int irq, void *arg)
 {
@@ -285,11 +285,6 @@ static irqreturn_t xen_arm_callback(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static __init void xen_percpu_enable_events(void *unused)
-{
-       enable_percpu_irq(xen_events_irq, 0);
-}
-
 static int __init xen_init_events(void)
 {
        if (!xen_domain() || xen_events_irq < 0)
@@ -303,7 +298,7 @@ static int __init xen_init_events(void)
                return -EINVAL;
        }
 
-       on_each_cpu(xen_percpu_enable_events, NULL, 0);
+       on_each_cpu(xen_percpu_init, NULL, 0);
 
        return 0;
 }
index 48347dcf056681641936a8cedcfeb88272fc8f7e..56b3f6d447ae10b8d53f9cfb767826fe90a97c11 100644 (file)
@@ -122,8 +122,6 @@ endmenu
 
 menu "Kernel Features"
 
-source "kernel/time/Kconfig"
-
 config ARM64_64K_PAGES
        bool "Enable 64KB pages support"
        help
index c8eedc6049844dbb0d2142fa9aca90478e163025..5aceb83b3f5c3c5dd9dc1168a157bb0e41f1c8e9 100644 (file)
@@ -82,7 +82,7 @@
 
        .macro  enable_dbg_if_not_stepping, tmp
        mrs     \tmp, mdscr_el1
-       tbnz    \tmp, #1, 9990f
+       tbnz    \tmp, #0, 9990f
        enable_dbg
 9990:
        .endm
index 12f22492df4ced64e6268449a80598a2ae5676f2..58125bf008d3e647e5c89e6ba534827f0a0d5629 100644 (file)
@@ -389,7 +389,7 @@ __SYSCALL(364, sys_perf_event_open)
 __SYSCALL(365, compat_sys_recvmmsg)
 __SYSCALL(366, sys_accept4)
 __SYSCALL(367, sys_fanotify_init)
-__SYSCALL(368, compat_sys_fanotify_mark_wrapper)
+__SYSCALL(368, compat_sys_fanotify_mark)
 __SYSCALL(369, sys_prlimit64)
 __SYSCALL(370, sys_name_to_handle_at)
 __SYSCALL(371, compat_sys_open_by_handle_at)
index 7df1aad29b676d38f085d9ca8be52a2d0c960431..41b4f626d5548c10985313839892de44ad81e1ec 100644 (file)
@@ -34,6 +34,7 @@ EXPORT_SYMBOL(__strnlen_user);
 EXPORT_SYMBOL(__strncpy_from_user);
 
 EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(clear_page);
 
 EXPORT_SYMBOL(__copy_from_user);
 EXPORT_SYMBOL(__copy_to_user);
index 0c3ba9f51376d33656607c67f18e135895357079..f4726dc054b3bbcdd7c7a5d98d3733b6a893ea3e 100644 (file)
@@ -136,8 +136,6 @@ void disable_debug_monitors(enum debug_el el)
  */
 static void clear_os_lock(void *unused)
 {
-       asm volatile("msr mdscr_el1, %0" : : "r" (0));
-       isb();
        asm volatile("msr oslar_el1, %0" : : "r" (0));
        isb();
 }
index ac974f48a7a25145cd8024a7c8cb77a21cdccad2..fbb6e18436598142cf0e4ea7429b63965f2ada4b 100644 (file)
@@ -95,7 +95,7 @@ static void early_write(struct console *con, const char *s, unsigned n)
        }
 }
 
-static struct console early_console = {
+static struct console early_console_dev = {
        .name =         "earlycon",
        .write =        early_write,
        .flags =        CON_PRINTBUFFER | CON_BOOT,
@@ -145,7 +145,8 @@ static int __init setup_early_printk(char *buf)
                early_base = early_io_map(paddr, EARLYCON_IOBASE);
 
        printch = match->printch;
-       register_console(&early_console);
+       early_console = &early_console_dev;
+       register_console(&early_console_dev);
 
        return 0;
 }
index c7e047049f2c1e26db2534478b07db95ec29ede7..1d1314280a03a678c4ef63092712d49079386cdd 100644 (file)
@@ -390,6 +390,16 @@ el0_sync_compat:
        b.eq    el0_fpsimd_exc
        cmp     x24, #ESR_EL1_EC_UNKNOWN        // unknown exception in EL0
        b.eq    el0_undef
+       cmp     x24, #ESR_EL1_EC_CP15_32        // CP15 MRC/MCR trap
+       b.eq    el0_undef
+       cmp     x24, #ESR_EL1_EC_CP15_64        // CP15 MRRC/MCRR trap
+       b.eq    el0_undef
+       cmp     x24, #ESR_EL1_EC_CP14_MR        // CP14 MRC/MCR trap
+       b.eq    el0_undef
+       cmp     x24, #ESR_EL1_EC_CP14_LS        // CP14 LDC/STC trap
+       b.eq    el0_undef
+       cmp     x24, #ESR_EL1_EC_CP14_64        // CP14 MRRC/MCRR trap
+       b.eq    el0_undef
        cmp     x24, #ESR_EL1_EC_BREAKPT_EL0    // debug exception in EL0
        b.ge    el0_dbg
        b       el0_inv
index 6a9a5329259065a99ceea12c3fe391f5996c448b..add6ea616843139ff81a65f26c344ed9e69fd66d 100644 (file)
@@ -282,12 +282,13 @@ void __init setup_arch(char **cmdline_p)
 #endif
 }
 
-static int __init arm64_of_clk_init(void)
+static int __init arm64_device_init(void)
 {
        of_clk_init(NULL);
+       of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
        return 0;
 }
-arch_initcall(arm64_of_clk_init);
+arch_initcall(arm64_device_init);
 
 static DEFINE_PER_CPU(struct cpu, cpu_data);
 
@@ -305,13 +306,6 @@ static int __init topology_init(void)
 }
 subsys_initcall(topology_init);
 
-static int __init arm64_device_probe(void)
-{
-       of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-       return 0;
-}
-device_initcall(arm64_device_probe);
-
 static const char *hwcap_str[] = {
        "fp",
        "asimd",
index db01aa978c41e2162ed91714c91e074a361ecdd2..a1b19ed7467cf1c026147acaa2539b1dc6f60817 100644 (file)
@@ -104,13 +104,6 @@ compat_sys_fallocate_wrapper:
        b       sys_fallocate
 ENDPROC(compat_sys_fallocate_wrapper)
 
-compat_sys_fanotify_mark_wrapper:
-       orr     x2, x2, x3, lsl #32
-       mov     w3, w4
-       mov     w4, w5
-       b       sys_fanotify_mark
-ENDPROC(compat_sys_fanotify_mark_wrapper)
-
 #undef __SYSCALL
 #define __SYSCALL(x, y)                .quad   y       // x
 
index 61d7dd29f756c300926410cef60ab523a5a7a7b1..f30852d28590358c6780a22c14049f92a124bdb8 100644 (file)
@@ -267,7 +267,8 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
                return;
 #endif
 
-       if (show_unhandled_signals) {
+       if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
+           printk_ratelimit()) {
                pr_info("%s[%d]: undefined instruction: pc=%p\n",
                        current->comm, task_pid_nr(current), pc);
                dump_instr(KERN_INFO, regs);
@@ -294,7 +295,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
        }
 #endif
 
-       if (show_unhandled_signals) {
+       if (show_unhandled_signals && printk_ratelimit()) {
                pr_info("%s[%d]: syscall %d\n", current->comm,
                        task_pid_nr(current), (int)regs->syscallno);
                dump_instr("", regs);
@@ -310,14 +311,20 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
  */
 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 {
+       siginfo_t info;
+       void __user *pc = (void __user *)instruction_pointer(regs);
        console_verbose();
 
        pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
                handler[reason], esr);
+       __show_regs(regs);
+
+       info.si_signo = SIGILL;
+       info.si_errno = 0;
+       info.si_code  = ILL_ILLOPC;
+       info.si_addr  = pc;
 
-       die("Oops - bad mode", regs, 0);
-       local_irq_disable();
-       panic("bad mode");
+       arm64_notify_die("Oops - bad mode", regs, &info, 0);
 }
 
 void __pte_error(const char *file, int line, unsigned long val)
index abe69b80cf7f674d9eb25b884a33e78a223aeb65..48a386094fa3cf98a7e8af3ae8d3b9ba5cce6c21 100644 (file)
@@ -52,7 +52,7 @@ loop1:
        add     x2, x2, #4                      // add 4 (line length offset)
        mov     x4, #0x3ff
        and     x4, x4, x1, lsr #3              // find maximum number on the way size
-       clz     x5, x4                          // find bit position of way size increment
+       clz     w5, w4                          // find bit position of way size increment
        mov     x7, #0x7fff
        and     x7, x7, x1, lsr #13             // extract max number of the index size
 loop2:
index 98af6e760cce6781a5b117d786bcd16fc136a0b1..1426468b77f3bb7b6df96d604851449db13ed0da 100644 (file)
@@ -113,7 +113,8 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
 {
        struct siginfo si;
 
-       if (show_unhandled_signals) {
+       if (show_unhandled_signals && unhandled_signal(tsk, sig) &&
+           printk_ratelimit()) {
                pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
                        tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
                        addr, esr);
index f1d8b9bbfdadc5a64a63d781051e6675aa39e322..a82ae8868077f9f32749ccfcc490d166d6fddd1c 100644 (file)
@@ -119,8 +119,7 @@ ENTRY(__cpu_setup)
 
        mov     x0, #3 << 20
        msr     cpacr_el1, x0                   // Enable FP/ASIMD
-       mov     x0, #1
-       msr     oslar_el1, x0                   // Set the debug OS lock
+       msr     mdscr_el1, xzr                  // Reset mdscr_el1
        tlbi    vmalle1is                       // invalidate I + D TLBs
        /*
         * Memory region attributes for LPAE:
index bdc35589277f721805eed5675fe0bfabd8dc0aec..549903cfc2cbe6f223025ff1b04017939f7dd336 100644 (file)
@@ -205,6 +205,11 @@ config ARCH_DISCONTIGMEM_ENABLE
 config ARCH_SPARSEMEM_ENABLE
        def_bool n
 
+config NODES_SHIFT
+       int
+       default "2"
+       depends on NEED_MULTIPLE_NODES
+
 source "mm/Kconfig"
 
 config OWNERSHIP_TRACE
index 4dd4f78d3dcc80a0799a96ab7fc629fe09b728ac..d22af851f3f638b09fdc4394a05e2cebf8a10201 100644 (file)
@@ -2,3 +2,4 @@
 generic-y      += clkdev.h
 generic-y      += exec.h
 generic-y      += trace_clock.h
+generic-y      += param.h
diff --git a/arch/avr32/include/asm/numnodes.h b/arch/avr32/include/asm/numnodes.h
deleted file mode 100644 (file)
index 0b864d7..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_AVR32_NUMNODES_H
-#define __ASM_AVR32_NUMNODES_H
-
-/* Max 4 nodes */
-#define NODES_SHIFT    2
-
-#endif /* __ASM_AVR32_NUMNODES_H */
diff --git a/arch/avr32/include/asm/param.h b/arch/avr32/include/asm/param.h
deleted file mode 100644 (file)
index 009a167..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_AVR32_PARAM_H
-#define __ASM_AVR32_PARAM_H
-
-#include <uapi/asm/param.h>
-
-# define HZ            CONFIG_HZ
-# define USER_HZ       100             /* User interfaces are in "ticks" */
-# define CLOCKS_PER_SEC        (USER_HZ)       /* frequency at which times() counts */
-#endif /* __ASM_AVR32_PARAM_H */
index df53e7a467740a34846a4852c93122f3ef82a18e..3b85eaddf525f2be65d5772be3f28051afac7b43 100644 (file)
@@ -33,3 +33,4 @@ header-y += termbits.h
 header-y += termios.h
 header-y += types.h
 header-y += unistd.h
+generic-y += param.h
diff --git a/arch/avr32/include/uapi/asm/param.h b/arch/avr32/include/uapi/asm/param.h
deleted file mode 100644 (file)
index d28aa5e..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _UAPI__ASM_AVR32_PARAM_H
-#define _UAPI__ASM_AVR32_PARAM_H
-
-
-#ifndef HZ
-# define HZ            100
-#endif
-
-/* TODO: Should be configurable */
-#define EXEC_PAGESIZE  4096
-
-#ifndef NOGROUP
-# define NOGROUP       (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64
-
-#endif /* _UAPI__ASM_AVR32_PARAM_H */
index 596f7305d93f017c43af7fc5f965ea4d0b988246..2c9412908024d4ce88d8945cf953b0d24437dcaf 100644 (file)
@@ -264,7 +264,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
                        break;
                case R_AVR32_GOT18SW:
                        if ((relocation & 0xfffe0003) != 0
-                           && (relocation & 0xfffc0003) != 0xffff0000)
+                           && (relocation & 0xfffc0000) != 0xfffc0000)
                                return reloc_overflow(module, "R_AVR32_GOT18SW",
                                                     relocation);
                        relocation >>= 2;
index 66cf00095b8487210b3187cbf41072bc3d80406b..1fce08632ad763abbe68a640a442cea1e0353a7f 100644 (file)
@@ -141,11 +141,11 @@ archclean:
 
 INSTALL_PATH ?= /tftpboot
 boot := arch/$(ARCH)/boot
-BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip
+BOOT_TARGETS = uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip
 PHONY += $(BOOT_TARGETS) install
-KBUILD_IMAGE := $(boot)/vmImage
+KBUILD_IMAGE := $(boot)/uImage
 
-all: vmImage
+all: uImage
 
 $(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
index f7d27d50d02c83fa35ad4606c11ecbd13a555904..3efaa094fb90e44050ed17f62117a60ca098ae8b 100644 (file)
@@ -6,7 +6,7 @@
 # for more details.
 #
 
-targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip
+targets := uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip
 extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.xip
 
 ifeq ($(CONFIG_RAMKERNEL),y)
@@ -39,22 +39,22 @@ quiet_cmd_mk_bin_xip = BIN     $@
 $(obj)/vmlinux.bin.xip: $(obj)/vmlinux.bin FORCE
        $(call if_changed,mk_bin_xip)
 
-$(obj)/vmImage.bin: $(obj)/vmlinux.bin
+$(obj)/uImage.bin: $(obj)/vmlinux.bin
        $(call if_changed,uimage,none)
 
-$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
        $(call if_changed,uimage,bzip2)
 
-$(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
        $(call if_changed,uimage,gzip)
 
-$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
        $(call if_changed,uimage,lzma)
 
-$(obj)/vmImage.lzo: $(obj)/vmlinux.bin.lzo
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
        $(call if_changed,uimage,lzo)
 
-$(obj)/vmImage.xip: $(obj)/vmlinux.bin.xip
+$(obj)/uImage.xip: $(obj)/vmlinux.bin.xip
        $(call if_changed,uimage,none)
 
 suffix-y                      := bin
@@ -64,7 +64,7 @@ suffix-$(CONFIG_KERNEL_LZMA)  := lzma
 suffix-$(CONFIG_KERNEL_LZO)   := lzo
 suffix-$(CONFIG_ROMKERNEL)    := xip
 
-$(obj)/vmImage: $(obj)/vmImage.$(suffix-y)
+$(obj)/uImage: $(obj)/uImage.$(suffix-y)
        @ln -sf $(notdir $<) $@
 
 install:
index c8db653c72d2c05f75046026cf1bc1621599d92e..a107a98e99783e4cccb9d47a87bd40c1f6de010d 100644 (file)
@@ -11,7 +11,9 @@
 
 #ifdef CONFIG_SMP
 
+#include <asm/barrier.h>
 #include <linux/linkage.h>
+#include <linux/types.h>
 
 asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
 asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
index 6a4cfe2d33679692117308c5ab0d0395f0ec2091..a99957ea9e9b504b66b4e31f6660ddf5efc18c8e 100644 (file)
@@ -24,18 +24,27 @@ struct bfin_sd_host {
 #define CMD_INT_E          (1 << 8)    /* Command Interrupt */
 #define CMD_PEND_E         (1 << 9)    /* Command Pending */
 #define CMD_E              (1 << 10)   /* Command Enable */
+#ifdef RSI_BLKSZ
+#define CMD_CRC_CHECK_D    (1 << 11)   /* CRC Check is disabled */
+#define CMD_DATA0_BUSY     (1 << 12)   /* Check for Busy State on the DATA0 pin */
+#endif
 
 /* SDH_PWR_CTL bitmasks */
+#ifndef RSI_BLKSZ
 #define PWR_ON             0x3         /* Power On */
 #define SD_CMD_OD          (1 << 6)    /* Open Drain Output */
 #define ROD_CTL            (1 << 7)    /* Rod Control */
+#endif
 
 /* SDH_CLK_CTL bitmasks */
 #define CLKDIV             0xff        /* MC_CLK Divisor */
 #define CLK_E              (1 << 8)    /* MC_CLK Bus Clock Enable */
 #define PWR_SV_E           (1 << 9)    /* Power Save Enable */
 #define CLKDIV_BYPASS      (1 << 10)   /* Bypass Divisor */
-#define WIDE_BUS           (1 << 11)   /* Wide Bus Mode Enable */
+#define BUS_MODE_MASK      0x1800      /* Bus Mode Mask */
+#define STD_BUS_1          0x000       /* Standard Bus 1 bit mode */
+#define WIDE_BUS_4         0x800       /* Wide Bus 4 bit mode */
+#define BYTE_BUS_8         0x1000      /* Byte Bus 8 bit mode */
 
 /* SDH_RESP_CMD bitmasks */
 #define RESP_CMD           0x3f        /* Response Command */
@@ -45,7 +54,13 @@ struct bfin_sd_host {
 #define DTX_DIR            (1 << 1)    /* Data Transfer Direction */
 #define DTX_MODE           (1 << 2)    /* Data Transfer Mode */
 #define DTX_DMA_E          (1 << 3)    /* Data Transfer DMA Enable */
+#ifndef RSI_BLKSZ
 #define DTX_BLK_LGTH       (0xf << 4)  /* Data Transfer Block Length */
+#else
+
+/* Bit masks for SDH_BLK_SIZE */
+#define DTX_BLK_LGTH       0x1fff      /* Data Transfer Block Length */
+#endif
 
 /* SDH_STATUS bitmasks */
 #define CMD_CRC_FAIL       (1 << 0)    /* CMD CRC Fail */
@@ -114,10 +129,14 @@ struct bfin_sd_host {
 /* SDH_E_STATUS bitmasks */
 #define SDIO_INT_DET       (1 << 1)    /* SDIO Int Detected */
 #define SD_CARD_DET        (1 << 4)    /* SD Card Detect */
+#define SD_CARD_BUSYMODE   (1 << 31)   /* Card is in Busy mode */
+#define SD_CARD_SLPMODE    (1 << 30)   /* Card in Sleep Mode */
+#define SD_CARD_READY      (1 << 17)   /* Card Ready */
 
 /* SDH_E_MASK bitmasks */
 #define SDIO_MSK           (1 << 1)    /* Mask SDIO Int Detected */
-#define SCD_MSK            (1 << 6)    /* Mask Card Detect */
+#define SCD_MSK            (1 << 4)    /* Mask Card Detect */
+#define CARD_READY_MSK     (1 << 16)   /* Mask Card Ready */
 
 /* SDH_CFG bitmasks */
 #define CLKS_EN            (1 << 0)    /* Clocks Enable */
@@ -126,7 +145,15 @@ struct bfin_sd_host {
 #define SD_RST             (1 << 4)    /* SDMMC Reset */
 #define PUP_SDDAT          (1 << 5)    /* Pull-up SD_DAT */
 #define PUP_SDDAT3         (1 << 6)    /* Pull-up SD_DAT3 */
+#ifndef RSI_BLKSZ
 #define PD_SDDAT3          (1 << 7)    /* Pull-down SD_DAT3 */
+#else
+#define PWR_ON             0x600       /* Power On */
+#define SD_CMD_OD          (1 << 11)   /* Open Drain Output */
+#define BOOT_EN            (1 << 12)   /* Boot Enable */
+#define BOOT_MODE          (1 << 13)   /* Alternate Boot Mode */
+#define BOOT_ACK_EN        (1 << 14)   /* Boot ACK is expected */
+#endif
 
 /* SDH_RD_WAIT_EN bitmasks */
 #define RWR                (1 << 0)    /* Read Wait Request */
index 8a0fed16058f217cfb6badb41dc176faa40b70af..0ca40dd44724200fc0d098a579e1c9f1b50d6c57 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm-generic/bitops/non-atomic.h>
 #else
 
+#include <asm/barrier.h>
 #include <asm/byteorder.h>     /* swab32 */
 #include <linux/linkage.h>
 
index fe0ca03a1cb2dc2552d0c564ec8ca03a17991625..ca67145c6a45987600407d44f80919b0e718b25e 100644 (file)
@@ -622,10 +622,12 @@ do { \
 #define PAGE_SIZE_4KB      0x00010000  /* 4 KB page size */
 #define PAGE_SIZE_1MB      0x00020000  /* 1 MB page size */
 #define PAGE_SIZE_4MB      0x00030000  /* 4 MB page size */
+#ifdef CONFIG_BF60x
 #define PAGE_SIZE_16KB     0x00040000  /* 16 KB page size */
 #define PAGE_SIZE_64KB     0x00050000  /* 64 KB page size */
 #define PAGE_SIZE_16MB     0x00060000  /* 16 MB page size */
 #define PAGE_SIZE_64MB     0x00070000  /* 64 MB page size */
+#endif
 #define CPLB_L1SRAM        0x00000020  /* 0=SRAM mapped in L1, 0=SRAM not
                                         * mapped to L1
                                         */
index 9b33e7247864c2a14f236fac89ba30e40961c028..c865b33eeb68bc271c4a8e9f14e72d75b5c3edef 100644 (file)
 struct ddr_config {
        u32 ddr_clk;
        u32 dmc_ddrctl;
+       u32 dmc_effctl;
        u32 dmc_ddrcfg;
        u32 dmc_ddrtr0;
        u32 dmc_ddrtr1;
@@ -348,6 +349,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [0] = {
                .ddr_clk    = 125,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20705212,
                .dmc_ddrtr1 = 0x201003CF,
@@ -358,6 +360,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [1] = {
                .ddr_clk    = 133,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20806313,
                .dmc_ddrtr1 = 0x2013040D,
@@ -368,6 +371,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [2] = {
                .ddr_clk    = 150,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20A07323,
                .dmc_ddrtr1 = 0x20160492,
@@ -378,6 +382,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [3] = {
                .ddr_clk    = 166,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20A07323,
                .dmc_ddrtr1 = 0x2016050E,
@@ -388,6 +393,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [4] = {
                .ddr_clk    = 200,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20a07323,
                .dmc_ddrtr1 = 0x2016050f,
@@ -398,6 +404,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [5] = {
                .ddr_clk    = 225,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20E0A424,
                .dmc_ddrtr1 = 0x302006DB,
@@ -408,6 +415,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [6] = {
                .ddr_clk    = 250,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20E0A424,
                .dmc_ddrtr1 = 0x3020079E,
@@ -469,6 +477,7 @@ static inline void init_dmc(u32 dmc_clk)
                        bfin_write_DMC0_TR2(ddr_config_table[i].dmc_ddrtr2);
                        bfin_write_DMC0_MR(ddr_config_table[i].dmc_ddrmr);
                        bfin_write_DMC0_EMR1(ddr_config_table[i].dmc_ddrmr1);
+                       bfin_write_DMC0_EFFCTL(ddr_config_table[i].dmc_effctl);
                        bfin_write_DMC0_CTL(ddr_config_table[i].dmc_ddrctl);
                        break;
                }
index 34e96ce02aa9671701197e7bc3b8f65902fbb5d5..b49a53b583d582acbf8670776aa87c6559d0c0e7 100644 (file)
@@ -30,6 +30,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
 {
        int i_d, i_i;
        unsigned long addr;
+       unsigned long cplb_pageflags, cplb_pagesize;
 
        struct cplb_entry *d_tbl = dcplb_tbl[cpu];
        struct cplb_entry *i_tbl = icplb_tbl[cpu];
@@ -49,11 +50,20 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
        /* Cover kernel memory with 4M pages.  */
        addr = 0;
 
-       for (; addr < memory_start; addr += 4 * 1024 * 1024) {
+#ifdef PAGE_SIZE_16MB
+       cplb_pageflags = PAGE_SIZE_16MB;
+       cplb_pagesize = SIZE_16M;
+#else
+       cplb_pageflags = PAGE_SIZE_4MB;
+       cplb_pagesize = SIZE_4M;
+#endif
+
+
+       for (; addr < memory_start; addr += cplb_pagesize) {
                d_tbl[i_d].addr = addr;
-               d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
+               d_tbl[i_d++].data = SDRAM_DGENERIC | cplb_pageflags;
                i_tbl[i_i].addr = addr;
-               i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
+               i_tbl[i_i++].data = SDRAM_IGENERIC | cplb_pageflags;
        }
 
 #ifdef CONFIG_ROMKERNEL
index e854f9066cbde005413bf0659e59fbed6a3b0423..79cc0f6dcdd5c26fa53137914ccc1bed0fdfd5ab 100644 (file)
@@ -145,7 +145,7 @@ MGR_ATTR static int dcplb_miss(int cpu)
        unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
        int status = bfin_read_DCPLB_STATUS();
        int idx;
-       unsigned long d_data, base, addr1, eaddr;
+       unsigned long d_data, base, addr1, eaddr, cplb_pagesize, cplb_pageflags;
 
        nr_dcplb_miss[cpu]++;
        if (unlikely(status & FAULT_USERSUPV))
@@ -167,18 +167,37 @@ MGR_ATTR static int dcplb_miss(int cpu)
        if (unlikely(d_data == 0))
                return CPLB_NO_ADDR_MATCH;
 
-       addr1 = addr & ~(SIZE_4M - 1);
        addr &= ~(SIZE_1M - 1);
        d_data |= PAGE_SIZE_1MB;
-       if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
+
+       /* BF60x support large than 4M CPLB page size */
+#ifdef PAGE_SIZE_16MB
+       cplb_pageflags = PAGE_SIZE_16MB;
+       cplb_pagesize = SIZE_16M;
+#else
+       cplb_pageflags = PAGE_SIZE_4MB;
+       cplb_pagesize = SIZE_4M;
+#endif
+
+find_pagesize:
+       addr1 = addr & ~(cplb_pagesize - 1);
+       if (addr1 >= base && (addr1 + cplb_pagesize) <= eaddr) {
                /*
                 * This works because
                 * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
                 */
-               d_data |= PAGE_SIZE_4MB;
+               d_data |= cplb_pageflags;
                addr = addr1;
+               goto found_pagesize;
+       } else {
+               if (cplb_pagesize > SIZE_4M) {
+                       cplb_pageflags = PAGE_SIZE_4MB;
+                       cplb_pagesize = SIZE_4M;
+                       goto find_pagesize;
+               }
        }
 
+found_pagesize:
 #ifdef CONFIG_BF60x
        if ((addr >= ASYNC_BANK0_BASE)
                && (addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
index 404045dcc5e4bc321c30ddf0eecc1a259e5312ce..5b80d59e66e57b11a3592378abc6914adc938f28 100644 (file)
 #include <asm/cplbinit.h>
 #include <asm/blackfin.h>
 
-static char const page_strtbl[][3] = { "1K", "4K", "1M", "4M" };
-#define page(flags)    (((flags) & 0x30000) >> 16)
+static char const page_strtbl[][4] = {
+       "1K", "4K", "1M", "4M",
+#ifdef CONFIG_BF60x
+       "16K", "64K", "16M", "64M",
+#endif
+};
+#define page(flags)    (((flags) & 0x70000) >> 16)
 #define strpage(flags) page_strtbl[page(flags)]
 
 struct cplbinfo_data {
index fb96e607adcf815890de6ce475c55a3cfd76a42b..107b306b06f10819fe1fc6c3d1e3e87cdaa3afc6 100644 (file)
@@ -1314,7 +1314,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                        seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
        }
 
-       seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
+       seq_printf(m, "\ncpu MHz\t\t: %lu.%06lu/%lu.%06lu\n",
                cclk/1000000, cclk%1000000,
                sclk/1000000, sclk%1000000);
        seq_printf(m, "bogomips\t: %lu.%02lu\n"
index 95114ed395ac6939d6a82551c289de52fa943edf..6a3a14bcd3a1ee338be3c90d68f3672575904d39 100644 (file)
@@ -455,6 +455,7 @@ static struct platform_device bfin_async_nand_device = {
 static void bfin_plat_nand_init(void)
 {
        gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat");
+       gpio_direction_input(BFIN_NAND_PLAT_READY);
 }
 #else
 static void bfin_plat_nand_init(void) {}
index a4fce0370c1db2455dfeb16f3526b993333bab38..755f0dc120100a7f359d1137059c3f094cb10d75 100644 (file)
@@ -764,7 +764,6 @@ static struct platform_device i2c_bfin_twi1_device = {
        .num_resources = ARRAY_SIZE(bfin_twi1_resource),
        .resource = bfin_twi1_resource,
 };
-#endif /* CONFIG_BF542 */
 #endif /* CONFIG_I2C_BLACKFIN_TWI */
 
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
index 4954cf3f7e16d82ec9dba7b5bc1841362e465c94..102ee4025ac9fb373fd7192cb08fb564b660d4d1 100644 (file)
 #define bfin_write_DMC0_EMR1(val) bfin_write32(DMC0_EMR1, val)
 #define bfin_read_DMC0_CTL() bfin_read32(DMC0_CTL)
 #define bfin_write_DMC0_CTL(val) bfin_write32(DMC0_CTL, val)
+#define bfin_read_DMC0_EFFCTL() bfin_read32(DMC0_EFFCTL)
+#define bfin_write_DMC0_EFFCTL(val) bfin_write32(DMC0_EFFCTL, val)
 #define bfin_read_DMC0_STAT() bfin_read32(DMC0_STAT)
 #define bfin_write_DMC0_STAT(val) bfin_write32(DMC0_STAT, val)
 #define bfin_read_DMC0_DLLCTL() bfin_read32(DMC0_DLLCTL)
index c3ffe3e54edc1a0457359b2c198d555330d09bf7..ef3a9de01954511a352fa5b24285136789425e21 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm/machvec.h>
 
-#ifdef CONFIG_SMP
-# define tlb_fast_mode(tlb)    ((tlb)->nr == ~0U)
-#else
-# define tlb_fast_mode(tlb)    (1)
-#endif
-
 /*
  * If we can't allocate a page to make a big batch of page pointers
  * to work on, then just handle a few from the on-stack structure.
@@ -60,7 +54,7 @@
 
 struct mmu_gather {
        struct mm_struct        *mm;
-       unsigned int            nr;             /* == ~0U => fast mode */
+       unsigned int            nr;
        unsigned int            max;
        unsigned char           fullmm;         /* non-zero means full mm flush */
        unsigned char           need_flush;     /* really unmapped some PTEs? */
@@ -103,6 +97,7 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
 static inline void
 ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
 {
+       unsigned long i;
        unsigned int nr;
 
        if (!tlb->need_flush)
@@ -141,13 +136,11 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
 
        /* lastly, release the freed pages */
        nr = tlb->nr;
-       if (!tlb_fast_mode(tlb)) {
-               unsigned long i;
-               tlb->nr = 0;
-               tlb->start_addr = ~0UL;
-               for (i = 0; i < nr; ++i)
-                       free_page_and_swap_cache(tlb->pages[i]);
-       }
+
+       tlb->nr = 0;
+       tlb->start_addr = ~0UL;
+       for (i = 0; i < nr; ++i)
+               free_page_and_swap_cache(tlb->pages[i]);
 }
 
 static inline void __tlb_alloc_page(struct mmu_gather *tlb)
@@ -167,20 +160,7 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_m
        tlb->mm = mm;
        tlb->max = ARRAY_SIZE(tlb->local);
        tlb->pages = tlb->local;
-       /*
-        * Use fast mode if only 1 CPU is online.
-        *
-        * It would be tempting to turn on fast-mode for full_mm_flush as well.  But this
-        * doesn't work because of speculative accesses and software prefetching: the page
-        * table of "mm" may (and usually is) the currently active page table and even
-        * though the kernel won't do any user-space accesses during the TLB shoot down, a
-        * compiler might use speculation or lfetch.fault on what happens to be a valid
-        * user-space address.  This in turn could trigger a TLB miss fault (or a VHPT
-        * walk) and re-insert a TLB entry we just removed.  Slow mode avoids such
-        * problems.  (We could make fast-mode work by switching the current task to a
-        * different "mm" during the shootdown.) --davidm 08/02/2002
-        */
-       tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
+       tlb->nr = 0;
        tlb->fullmm = full_mm_flush;
        tlb->start_addr = ~0UL;
 }
@@ -214,11 +194,6 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
        tlb->need_flush = 1;
 
-       if (tlb_fast_mode(tlb)) {
-               free_page_and_swap_cache(page);
-               return 1; /* avoid calling tlb_flush_mmu */
-       }
-
        if (!tlb->nr && tlb->pages == tlb->local)
                __tlb_alloc_page(tlb);
 
index d266787725b468ab94286aeb3e7cca62698c5d6c..33013dfcd3e1d58fb0b0f3495ce1956c76233340 100644 (file)
@@ -223,13 +223,25 @@ config M5307
        help
          Motorola ColdFire 5307 processor support.
 
+config M53xx
+       bool
+
 config M532x
        bool "MCF532x"
        depends on !MMU
+       select M53xx
        select HAVE_CACHE_CB
        help
          Freescale (Motorola) ColdFire 532x processor support.
 
+config M537x
+       bool "MCF537x"
+       depends on !MMU
+       select M53xx
+       select HAVE_CACHE_CB
+       help
+         Freescale ColdFire 537x processor support.
+
 config M5407
        bool "MCF5407"
        depends on !MMU
index 7240584d343974847fbaab832bcf50388d8d2ba3..b9ab0a69561cac3de87657e06700f8c0cf456413 100644 (file)
@@ -358,6 +358,13 @@ config COBRA5329
        help
          Support for the senTec COBRA5329 board.
 
+config M5373EVB
+       bool "Freescale M5373EVB board support"
+       depends on M537x
+       select FREESCALE
+       help
+         Support for the Freescale M5373EVB board.
+
 config M5407C3
        bool "Motorola M5407C3 board support"
        depends on M5407
@@ -539,15 +546,6 @@ config ROMVEC
          68000 type variants the vectors are at the base of the boot device
          on system startup.
 
-config ROMVECSIZE
-       hex "Size of ROM vector region (in bytes)"
-       default "0x400"
-       depends on ROM
-       help
-         Define the size of the vector region in ROM. For most 68000
-         variants this would be 0x400 bytes in size. Set to 0 if you do
-         not want a vector region at the start of the ROM.
-
 config ROMSTART
        hex "Address of the base of system image in ROM"
        default "0x400"
index 2f02acfb8edf4647e28ada0e48801b68c0c965f3..7f7830f2c5bcb488745c0326e36a1889a733e04b 100644 (file)
@@ -45,6 +45,7 @@ cpuflags-$(CONFIG_M5441x)     := $(call cc-option,-mcpu=54455,-mcfv4e)
 cpuflags-$(CONFIG_M54xx)       := $(call cc-option,-mcpu=5475,-m5200)
 cpuflags-$(CONFIG_M5407)       := $(call cc-option,-mcpu=5407,-m5200)
 cpuflags-$(CONFIG_M532x)       := $(call cc-option,-mcpu=532x,-m5307)
+cpuflags-$(CONFIG_M537x)       := $(call cc-option,-mcpu=537x,-m5307)
 cpuflags-$(CONFIG_M5307)       := $(call cc-option,-mcpu=5307,-m5200)
 cpuflags-$(CONFIG_M528x)       := $(call cc-option,-mcpu=528x,-m5307)
 cpuflags-$(CONFIG_M5275)       := $(call cc-option,-mcpu=5275,-m5307)
index 90d3109c82f402df0356d43be23d9d965af68630..19325e117eeaaec7b844e1e50d328719077e3261 100644 (file)
@@ -1,55 +1,78 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-amiga"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_AMIGA=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
 CONFIG_M68060=y
-CONFIG_BINFMT_AOUT=m
-CONFIG_BINFMT_MISC=m
+CONFIG_AMIGA=y
 CONFIG_ZORRO=y
 CONFIG_AMIGA_PCMCIA=y
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
 CONFIG_ZORRO_NAMES=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -57,25 +80,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -86,6 +121,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -99,22 +136,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -124,7 +170,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -133,18 +178,30 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_AMIGA=m
@@ -154,11 +211,13 @@ CONFIG_AMIGA_FLOPPY=y
 CONFIG_AMIGA_Z2RAM=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_BLK_DEV_GAYLE=y
 CONFIG_BLK_DEV_BUDDHA=y
@@ -172,57 +231,77 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_A3000_SCSI=y
 CONFIG_A2091_SCSI=y
 CONFIG_GVP11_SCSI=y
 CONFIG_SCSI_A4000T=y
 CONFIG_SCSI_ZORRO7XX=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
-CONFIG_ARIADNE=y
+# CONFIG_NET_VENDOR_3COM is not set
 CONFIG_A2065=y
+CONFIG_ARIADNE=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FUJITSU is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_HYDRA=y
-CONFIG_ZORRO8390=y
 CONFIG_APNE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_ZORRO8390=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 CONFIG_KEYBOARD_AMIGA=y
 # CONFIG_KEYBOARD_ATKBD is not set
 # CONFIG_MOUSE_PS2 is not set
@@ -233,11 +312,14 @@ CONFIG_INPUT_MISC=y
 CONFIG_INPUT_M68K_BEEP=m
 # CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_PRINTER=m
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PPS_CLIENT_PARPORT=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_CIRRUS=y
@@ -252,48 +334,64 @@ CONFIG_SOUND=m
 CONFIG_DMASOUND_PAULA=m
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MSM6242=m
+CONFIG_RTC_DRV_RP5C01=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
 CONFIG_AMIGA_BUILTIN_SERIAL=y
 CONFIG_SERIAL_CONSOLE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -332,10 +430,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -345,19 +456,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -373,6 +481,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 8f4f657fdbc67987daf0bcba1948390e43f6c163..14dc6ccda7f45349c82c0dd1f0d113831b3ac07b 100644 (file)
@@ -1,55 +1,76 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-apollo"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_APOLLO=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
 CONFIG_M68060=y
+CONFIG_APOLLO=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -57,25 +78,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -86,6 +119,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -99,22 +134,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -124,7 +168,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -133,21 +176,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -162,57 +218,74 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
 CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -221,47 +294,61 @@ CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_CLUT224 is not set
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -300,10 +387,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -313,19 +413,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -341,6 +438,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 4571d33903fed1c1a1cc75f165e0c250e598183b..6d5370c914b265123b6963df409ca3b82d17f8cf 100644 (file)
@@ -1,53 +1,75 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-atari"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_ATARI=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
 CONFIG_M68060=y
+CONFIG_ATARI=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_STRAM_PROC=y
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -55,25 +77,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -84,6 +118,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -97,22 +133,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -122,7 +167,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -131,18 +175,30 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_ATARI=m
@@ -150,11 +206,13 @@ CONFIG_PARPORT_1284=y
 CONFIG_ATARI_FLOPPY=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_BLK_DEV_FALCON_IDE=y
 CONFIG_RAID_ATTRS=m
@@ -167,63 +225,81 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_ATARI_SCSI=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
-CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_VETH=m
 CONFIG_ATARILANCE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 CONFIG_KEYBOARD_ATARI=y
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_ATARI=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_M68K_BEEP=m
-# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_PRINTER=m
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PPS_CLIENT_PARPORT=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_ATARI=y
@@ -233,47 +309,64 @@ CONFIG_SOUND=m
 CONFIG_DMASOUND_ATARI=m
 CONFIG_HID=m
 CONFIG_HIDRAW=y
-# CONFIG_USB_SUPPORT is not set
+CONFIG_UHID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+CONFIG_NATFEAT=y
+CONFIG_NFBLOCK=y
+CONFIG_NFCON=y
+CONFIG_NFETH=y
 CONFIG_ATARI_DSP56K=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -312,10 +405,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -325,19 +431,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -353,6 +456,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=y
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 12f211733ba02e8f58aa121e777b4b99c7cd6631..c015ddb6fd80635c81affec06369ced3f62185f8 100644 (file)
@@ -1,53 +1,74 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-bvme6000"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_VME=y
-CONFIG_BVME6000=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68040=y
 CONFIG_M68060=y
+CONFIG_VME=y
+CONFIG_BVME6000=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -55,25 +76,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -84,6 +117,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -97,22 +132,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -122,7 +166,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -131,21 +174,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -160,103 +216,131 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_BVME6000_SCSI=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
 CONFIG_BVME6000_NET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_SERIAL=m
-CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -295,10 +379,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -308,19 +405,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -336,7 +430,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
-CONFIG_CRC32=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 215389a5407fa215af1501ed2ea7d9a4d95fca5a..ec7382d8afff5390a4210a45ba348637605c4f59 100644 (file)
@@ -1,54 +1,76 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-hp300"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_HP300=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
 CONFIG_M68060=y
+CONFIG_HP300=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -56,25 +78,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -85,6 +119,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -98,22 +134,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -123,7 +168,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -132,21 +176,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -161,59 +218,77 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
 CONFIG_HPLANCE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
 CONFIG_INPUT_MISC=y
 CONFIG_HP_SDC_RTC=m
-# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_SERPORT=m
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -222,47 +297,60 @@ CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_VGA16 is not set
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -301,10 +389,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -314,19 +415,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -342,6 +440,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index cb9dfb30b6747c1fb472c0292af103021f23f001..7d46fbec70424dc84fbe07a9ab1e77c7d1f2bec2 100644 (file)
@@ -1,49 +1,75 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-mac"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MAC=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
+CONFIG_M68KFPU_EMU=y
+CONFIG_MAC=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -51,25 +77,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -80,6 +118,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -93,22 +133,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -118,7 +167,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -127,31 +175,45 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
 CONFIG_IPDDP_DECAP=y
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
-CONFIG_BLK_DEV_SWIM=y
+CONFIG_BLK_DEV_SWIM=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_BLK_DEV_MAC_IDE=y
 CONFIG_RAID_ATTRS=m
@@ -164,29 +226,30 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MAC_SCSI=y
 CONFIG_SCSI_MAC_ESP=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_ADB=y
 CONFIG_ADB_MACII=y
-CONFIG_ADB_MACIISI=y
 CONFIG_ADB_IOP=y
 CONFIG_ADB_PMU68K=y
 CONFIG_ADB_CUDA=y
@@ -194,46 +257,61 @@ CONFIG_INPUT_ADBHID=y
 CONFIG_MAC_EMUMOUSEBTN=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MAC8390=y
-CONFIG_MAC89x0=m
-CONFIG_MACSONIC=m
 CONFIG_MACMACE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+CONFIG_MAC89x0=y
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+CONFIG_MACSONIC=y
+CONFIG_MAC8390=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_M68K_BEEP=m
 CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_PMACZILOG=y
 CONFIG_SERIAL_PMACZILOG_TTYS=y
 CONFIG_SERIAL_PMACZILOG_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_VALKYRIE=y
@@ -242,46 +320,60 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=y
-CONFIG_HFSPLUS_FS=y
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
+CONFIG_NFS_FS=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -320,10 +412,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -333,19 +438,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -361,6 +463,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 8d5def4a31e026e657ea78429a3fa36ac3d4fe42..0f795d8e65fafbc28b9d765911ac2416db5f3c6c 100644 (file)
@@ -1,15 +1,29 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-multi"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
+CONFIG_M68020=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_M68KFPU_EMU=y
 CONFIG_AMIGA=y
 CONFIG_ATARI=y
 CONFIG_MAC=y
@@ -21,48 +35,50 @@ CONFIG_BVME6000=y
 CONFIG_HP300=y
 CONFIG_SUN3X=y
 CONFIG_Q40=y
-CONFIG_M68020=y
-CONFIG_M68040=y
-CONFIG_M68060=y
-CONFIG_BINFMT_AOUT=m
-CONFIG_BINFMT_MISC=m
 CONFIG_ZORRO=y
 CONFIG_AMIGA_PCMCIA=y
-CONFIG_STRAM_PROC=y
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
 CONFIG_ZORRO_NAMES=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -70,25 +86,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -99,6 +127,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -112,22 +142,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -137,7 +176,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -146,22 +184,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
 CONFIG_IPDDP_DECAP=y
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_AMIGA=m
@@ -170,15 +220,17 @@ CONFIG_PARPORT_ATARI=m
 CONFIG_PARPORT_1284=y
 CONFIG_AMIGA_FLOPPY=y
 CONFIG_ATARI_FLOPPY=y
-CONFIG_BLK_DEV_SWIM=y
+CONFIG_BLK_DEV_SWIM=m
 CONFIG_AMIGA_Z2RAM=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_BLK_DEV_GAYLE=y
 CONFIG_BLK_DEV_BUDDHA=y
@@ -195,11 +247,9 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_A3000_SCSI=y
 CONFIG_A2091_SCSI=y
 CONFIG_GVP11_SCSI=y
@@ -213,21 +263,24 @@ CONFIG_MVME16x_SCSI=y
 CONFIG_BVME6000_SCSI=y
 CONFIG_SUN3X_ESP=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_ADB=y
 CONFIG_ADB_MACII=y
-CONFIG_ADB_MACIISI=y
 CONFIG_ADB_IOP=y
 CONFIG_ADB_PMU68K=y
 CONFIG_ADB_CUDA=y
@@ -235,49 +288,64 @@ CONFIG_INPUT_ADBHID=y
 CONFIG_MAC_EMUMOUSEBTN=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
-CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
-CONFIG_ARIADNE=y
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
 CONFIG_A2065=y
-CONFIG_HYDRA=y
-CONFIG_ZORRO8390=y
-CONFIG_APNE=y
-CONFIG_MAC8390=y
-CONFIG_MAC89x0=y
-CONFIG_MACSONIC=y
-CONFIG_MACMACE=y
-CONFIG_MVME147_NET=y
-CONFIG_MVME16x_NET=y
-CONFIG_BVME6000_NET=y
+CONFIG_ARIADNE=y
 CONFIG_ATARILANCE=y
-CONFIG_SUN3LANCE=y
 CONFIG_HPLANCE=y
+CONFIG_MVME147_NET=y
+CONFIG_SUN3LANCE=y
+CONFIG_MACMACE=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+CONFIG_MAC89x0=y
+# CONFIG_NET_VENDOR_FUJITSU is not set
+# CONFIG_NET_VENDOR_HP is not set
+CONFIG_BVME6000_NET=y
+CONFIG_MVME16x_NET=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+CONFIG_MACSONIC=y
+CONFIG_HYDRA=y
+CONFIG_MAC8390=y
 CONFIG_NE2000=m
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_APNE=y
+CONFIG_ZORRO8390=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 CONFIG_KEYBOARD_AMIGA=y
 CONFIG_KEYBOARD_ATARI=y
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_SUNKBD=y
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
 CONFIG_MOUSE_AMIGA=m
 CONFIG_MOUSE_ATARI=m
@@ -285,18 +353,20 @@ CONFIG_INPUT_JOYSTICK=y
 CONFIG_JOYSTICK_AMIGA=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_M68K_BEEP=m
-CONFIG_HP_SDC_RTC=y
-# CONFIG_SERIO_SERPORT is not set
+CONFIG_HP_SDC_RTC=m
 CONFIG_SERIO_Q40KBD=y
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_PMACZILOG=y
 CONFIG_SERIAL_PMACZILOG_TTYS=y
 CONFIG_SERIAL_PMACZILOG_CONSOLE=y
 CONFIG_PRINTER=m
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PPS_CLIENT_PARPORT=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_CIRRUS=y
@@ -316,7 +386,20 @@ CONFIG_DMASOUND_PAULA=m
 CONFIG_DMASOUND_Q40=m
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MSM6242=m
+CONFIG_RTC_DRV_RP5C01=m
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+CONFIG_NATFEAT=y
+CONFIG_NFBLOCK=y
+CONFIG_NFCON=y
+CONFIG_NFETH=y
 CONFIG_ATARI_DSP56K=m
 CONFIG_AMIGA_BUILTIN_SERIAL=y
 CONFIG_SERIAL_CONSOLE=y
@@ -324,42 +407,49 @@ CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=y
-CONFIG_HFSPLUS_FS=y
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -398,10 +488,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -411,19 +514,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -439,6 +539,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=y
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index e2af46f530c1c589d94e429149a78bc934f7d9f3..5586c6529fce367fb22061b87c708b9ba10a1f61 100644 (file)
@@ -1,52 +1,73 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-mvme147"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
+CONFIG_M68030=y
 CONFIG_VME=y
 CONFIG_MVME147=y
-CONFIG_M68030=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -54,25 +75,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -83,6 +116,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -96,22 +131,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -121,7 +165,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -130,21 +173,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -159,103 +215,132 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MVME147_SCSI=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
 CONFIG_MVME147_NET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_SERIAL=m
-CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -294,10 +379,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -307,19 +405,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -335,6 +430,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 7c9402b2097fcb89b81202970bb1bc88a6ef90d4..e5e8262bbacdd0a94c4f0b1e0e89c06166ddd8ec 100644 (file)
@@ -1,53 +1,74 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-mvme16x"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_VME=y
-CONFIG_MVME16x=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68040=y
 CONFIG_M68060=y
+CONFIG_VME=y
+CONFIG_MVME16x=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -55,25 +76,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -84,6 +117,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -97,22 +132,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -122,7 +166,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -131,21 +174,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -160,103 +216,131 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MVME16x_SCSI=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
 CONFIG_MVME16x_NET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_SERIAL=m
-CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -295,10 +379,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -308,19 +405,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -336,6 +430,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index 19d23db690a4789bcf3d9369850170bd6c9f40bd..8982370e8b42166a96637687c0aa9bbe0ddc5731 100644 (file)
@@ -1,49 +1,74 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-q40"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_Q40=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SUN_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_M68040=y
 CONFIG_M68060=y
+CONFIG_Q40=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -51,25 +76,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -80,6 +117,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -93,22 +132,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -118,7 +166,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -127,26 +174,40 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_IDE=y
+CONFIG_IDE_GD_ATAPI=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_BLK_DEV_Q40IDE=y
 CONFIG_RAID_ATTRS=m
@@ -159,61 +220,82 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FUJITSU is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_NE2000=m
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_M68K_BEEP=m
-CONFIG_SERIO=m
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_Q40KBD=m
+CONFIG_SERIO_Q40KBD=y
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -222,46 +304,61 @@ CONFIG_SOUND=m
 CONFIG_DMASOUND_Q40=m
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -300,10 +397,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -313,19 +423,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -341,6 +448,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index ca6c0b4cab7754be95b0ed9bf8564cd243287abb..54674d61e00141069e1fb5113e22e19784fc5c01 100644 (file)
@@ -1,50 +1,71 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-sun3"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_SUN3=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -52,25 +73,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -81,6 +114,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -94,22 +129,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -119,7 +163,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -128,21 +171,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -157,107 +213,136 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_SUN3_SCSI=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
 CONFIG_SUN3LANCE=y
+# CONFIG_NET_CADENCE is not set
 CONFIG_SUN3_82586=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_SUNKBD=y
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
-# CONFIG_SERIO_SERPORT is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -296,10 +381,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -309,19 +407,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -337,6 +432,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index c80941c7759e2f7530c61b2b3a73545dea1892af..832d9539f44194faff9d4e6940223f1d938850fc 100644 (file)
@@ -1,50 +1,71 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-sun3x"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SYSV68_PARTITION=y
+CONFIG_IOSCHED_DEADLINE=m
 CONFIG_SUN3X=y
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PROC_HARDWARE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
 CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_ZONES=y
+# CONFIG_NF_CONNTRACK_PROCFS is not set
 # CONFIG_NF_CT_PROTO_DCCP is not set
 CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
@@ -52,25 +73,37 @@ CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
 CONFIG_NF_CONNTRACK_PPTP=m
 CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
 CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
 CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
 CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
 CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
@@ -81,6 +114,8 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
 CONFIG_NETFILTER_XT_MATCH_MARK=m
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -94,22 +129,31 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
 CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_NF_NAT_IPV4=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -119,7 +163,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -128,21 +171,34 @@ CONFIG_IP6_NF_MATCH_OPTS=m
 CONFIG_IP6_NF_MATCH_HL=m
 CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_RDS=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
 CONFIG_ATALK=m
+CONFIG_BATMAN_ADV=m
+CONFIG_BATMAN_ADV_DAT=y
+# CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
@@ -157,106 +213,136 @@ CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_SCSI_SAS_ATTRS=m
 CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_SUN3X_ESP=y
 CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID456=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_CACHE=m
 CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
-CONFIG_MACVLAN=m
 CONFIG_EQUALIZER=m
+CONFIG_NET_TEAM=m
+CONFIG_NET_TEAM_MODE_BROADCAST=m
+CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
+CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
+CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_VXLAN=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
 CONFIG_SUN3LANCE=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
 CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_SLIP=m
 CONFIG_SLIP_COMPRESSED=y
 CONFIG_SLIP_SMART=y
 CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_SUNKBD=y
-CONFIG_MOUSE_PS2=m
+# CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_SERIAL=m
-# CONFIG_SERIO_SERPORT is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 # CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=m
-CONFIG_GEN_RTC_X=y
+CONFIG_NTP_PPS=y
+CONFIG_PPS_CLIENT_LDISC=m
+CONFIG_PTP_1588_CLOCK=m
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_HID=m
 CONFIG_HIDRAW=y
+CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
 # CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PROC_HARDWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_JFS_FS=m
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
-# CONFIG_OCFS2_FS_STATS is not set
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
+CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
-CONFIG_MINIX_FS=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_MINIX_FS=m
+CONFIG_OMFS_FS=m
 CONFIG_HPFS_FS=m
+CONFIG_QNX4FS_FS=m
+CONFIG_QNX6FS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
+CONFIG_CIFS=m
+# CONFIG_CIFS_DEBUG is not set
 CONFIG_CODA_FS=m
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_CODEPAGE_737=m
@@ -295,10 +381,23 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_MAC_ROMAN=m
+CONFIG_NLS_MAC_CELTIC=m
+CONFIG_NLS_MAC_CENTEURO=m
+CONFIG_NLS_MAC_CROATIAN=m
+CONFIG_NLS_MAC_CYRILLIC=m
+CONFIG_NLS_MAC_GAELIC=m
+CONFIG_NLS_MAC_GREEK=m
+CONFIG_NLS_MAC_ICELAND=m
+CONFIG_NLS_MAC_INUIT=m
+CONFIG_NLS_MAC_ROMANIAN=m
+CONFIG_NLS_MAC_TURKISH=m
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
@@ -308,19 +407,16 @@ CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -336,6 +432,14 @@ CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC16=m
 CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_XZ_DEC_TEST=m
index c7933e41f10d7a5d864731467bc6435d0eb0b5a5..09d77a862da3d961029bdbd260557a33f472523f 100644 (file)
@@ -6,7 +6,6 @@ generic-y += device.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
-generic-y += futex.h
 generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
index a73998528d26dddbc8db3b10979a5156d8a25dca..66a36bd51aa12d120d1502610e0885411a3058a2 100644 (file)
@@ -480,23 +480,6 @@ typedef struct scc_enet {
 #define SICR_ENET_CLKRT        ((uint)0x0000003d)
 #endif
 
-#ifdef CONFIG_RPXLITE
-/* This ENET stuff is for the MPC850 with ethernet on SCC2.  Some of
- * this may be unique to the RPX-Lite configuration.
- * Note TENA is on Port B.
- */
-#define PA_ENET_RXD    ((ushort)0x0004)
-#define PA_ENET_TXD    ((ushort)0x0008)
-#define PA_ENET_TCLK   ((ushort)0x0200)
-#define PA_ENET_RCLK   ((ushort)0x0800)
-#define PB_ENET_TENA   ((uint)0x00002000)
-#define PC_ENET_CLSN   ((ushort)0x0040)
-#define PC_ENET_RENA   ((ushort)0x0080)
-
-#define SICR_ENET_MASK ((uint)0x0000ff00)
-#define SICR_ENET_CLKRT        ((uint)0x00003d00)
-#endif
-
 #ifdef CONFIG_BSEIP
 /* This ENET stuff is for the MPC823 with ethernet on SCC2.
  * This is unique to the BSE ip-Engine board.
diff --git a/arch/m68k/include/asm/dbg.h b/arch/m68k/include/asm/dbg.h
deleted file mode 100644 (file)
index 27af327..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#define DEBUG 1
-#ifdef CONFIG_COLDFIRE
-#define        BREAK asm volatile ("halt")
-#else
-#define BREAK *(volatile unsigned char *)0xdeadbee0 = 0
-#endif
index 0ff3fc6a6d9aaafda260cb0d39a70b431e6aa422..429fe26e320c9813afdf88011195552498a90032 100644 (file)
@@ -39,7 +39,7 @@
 #define MAX_M68K_DMA_CHANNELS 4
 #elif defined(CONFIG_M5272)
 #define MAX_M68K_DMA_CHANNELS 1
-#elif defined(CONFIG_M532x)
+#elif defined(CONFIG_M53xx)
 #define MAX_M68K_DMA_CHANNELS 0
 #else
 #define MAX_M68K_DMA_CHANNELS 2
diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h
new file mode 100644 (file)
index 0000000..bc868af
--- /dev/null
@@ -0,0 +1,94 @@
+#ifndef _ASM_M68K_FUTEX_H
+#define _ASM_M68K_FUTEX_H
+
+#ifdef __KERNEL__
+#if !defined(CONFIG_MMU)
+#include <asm-generic/futex.h>
+#else  /* CONFIG_MMU */
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
+{
+       u32 val;
+
+       if (unlikely(get_user(val, uaddr) != 0))
+               return -EFAULT;
+
+       if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
+               return -EFAULT;
+
+       *uval = val;
+
+       return 0;
+}
+
+static inline int
+futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+       int op = (encoded_op >> 28) & 7;
+       int cmp = (encoded_op >> 24) & 15;
+       int oparg = (encoded_op << 8) >> 20;
+       int cmparg = (encoded_op << 20) >> 20;
+       int oldval, ret;
+       u32 tmp;
+
+       if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+               oparg = 1 << oparg;
+
+       pagefault_disable();    /* implies preempt_disable() */
+
+       ret = -EFAULT;
+       if (unlikely(get_user(oldval, uaddr) != 0))
+               goto out_pagefault_enable;
+
+       ret = 0;
+       tmp = oldval;
+
+       switch (op) {
+       case FUTEX_OP_SET:
+               tmp = oparg;
+               break;
+       case FUTEX_OP_ADD:
+               tmp += oparg;
+               break;
+       case FUTEX_OP_OR:
+               tmp |= oparg;
+               break;
+       case FUTEX_OP_ANDN:
+               tmp &= ~oparg;
+               break;
+       case FUTEX_OP_XOR:
+               tmp ^= oparg;
+               break;
+       default:
+               ret = -ENOSYS;
+       }
+
+       if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
+               ret = -EFAULT;
+
+out_pagefault_enable:
+       pagefault_enable();     /* subsumes preempt_enable() */
+
+       if (ret == 0) {
+               switch (cmp) {
+               case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+               case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+               case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+               case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+               case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+               case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+               default: ret = -ENOSYS;
+               }
+       }
+       return ret;
+}
+
+#endif /* CONFIG_MMU */
+#endif /* __KERNEL__ */
+#endif /* _ASM_M68K_FUTEX_H */
index cd952b0a8bd32d81db35ef6382b9baa0af2bf410..3177ce8331d6916d5c83362f6810d4fd28728cd5 100644 (file)
@@ -55,8 +55,8 @@
 #define        CACHE_SIZE      0x2000          /* 8k of unified cache */
 #define        ICACHE_SIZE     CACHE_SIZE
 #define        DCACHE_SIZE     CACHE_SIZE
-#elif defined(CONFIG_M532x)
-#define        CACHE_SIZE      0x4000          /* 32k of unified cache */
+#elif defined(CONFIG_M53xx)
+#define        CACHE_SIZE      0x4000          /* 16k of unified cache */
 #define        ICACHE_SIZE     CACHE_SIZE
 #define        DCACHE_SIZE     CACHE_SIZE
 #endif
similarity index 99%
rename from arch/m68k/include/asm/m532xsim.h
rename to arch/m68k/include/asm/m53xxsim.h
index 8668e47ced0e6ebbd019b183f2f6dde0c6197cc7..faa1a2133bfdad99ce5b2342042a82df58017122 100644 (file)
@@ -1,15 +1,15 @@
 /****************************************************************************/
 
 /*
- *     m532xsim.h -- ColdFire 5329 registers
+ *     m53xxsim.h -- ColdFire 5329 registers
  */
 
 /****************************************************************************/
-#ifndef        m532xsim_h
-#define        m532xsim_h
+#ifndef        m53xxsim_h
+#define        m53xxsim_h
 /****************************************************************************/
 
-#define        CPU_NAME                "COLDFIRE(m532x)"
+#define        CPU_NAME                "COLDFIRE(m53xx)"
 #define        CPU_INSTR_PER_JIFFY     3
 #define        MCF_BUSCLK              (MCF_CLK / 3)
 
 /*
  *  QSPI module.
  */
-#define        MCFQSPI_BASE            0xFC058000      /* Base address of QSPI */
+#define        MCFQSPI_BASE            0xFC05C000      /* Base address of QSPI */
 #define        MCFQSPI_SIZE            0x40            /* Size of QSPI region */
 
 #define        MCFQSPI_CS0             84
 #define MCFEPORT_EPFR                 (0xFC094006)
 
 /********************************************************************/
-#endif /* m532xsim_h */
+#endif /* m53xxsim_h */
index 192bbfeabf70c1047288286b37a7ee686d0d32f0..6d13cae44af530240b99b83e4248288ee939ae69 100644 (file)
  */
 #define ACR0_MODE      (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \
                         ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP)
+#if defined(CONFIG_CACHE_COPYBACK)
 #define ACR1_MODE      (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
-                        ACR_ENABLE+ACR_SUPER+ACR_SP)
+                        ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_CP)
+#else
+#define ACR1_MODE      (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
+                        ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_WT)
+#endif
 #define ACR2_MODE      0
 #define ACR3_MODE      (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
                         ACR_ENABLE+ACR_SUPER+ACR_SP)
index fa1059f50dfcd1bef9b98c0b482839325c642ddb..c41ebf45f1d0f576a384d1a789fc2940b3e3a460 100644 (file)
@@ -104,7 +104,7 @@ static inline void gpio_free(unsigned gpio)
 #if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
     defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
     defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M532x) || defined(CONFIG_M54xx) || \
+    defined(CONFIG_M53xx) || defined(CONFIG_M54xx) || \
     defined(CONFIG_M5441x)
 
 /* These parts have GPIO organized by 8 bit ports */
@@ -139,7 +139,7 @@ static inline void gpio_free(unsigned gpio)
 
 #if defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
     defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+    defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 /*
  * These parts have an 'Edge' Port module (external interrupt/GPIO) which uses
  * read-modify-write to change an output and a GPIO module which has separate
@@ -195,7 +195,7 @@ static inline u32 __mcfgpio_ppdr(unsigned gpio)
                return MCFSIM2_GPIO1READ;
 #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
       defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-      defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+      defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #if !defined(CONFIG_M5441x)
        if (gpio < 8)
                return MCFEPORT_EPPDR;
@@ -237,7 +237,7 @@ static inline u32 __mcfgpio_podr(unsigned gpio)
                return MCFSIM2_GPIO1WRITE;
 #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
       defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-      defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+      defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #if !defined(CONFIG_M5441x)
        if (gpio < 8)
                return MCFEPORT_EPDR;
@@ -279,7 +279,7 @@ static inline u32 __mcfgpio_pddr(unsigned gpio)
                return MCFSIM2_GPIO1ENABLE;
 #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
       defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-      defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+      defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #if !defined(CONFIG_M5441x)
        if (gpio < 8)
                return MCFEPORT_EPDDR;
index a04fd9b2714c7d39b1365337b63ee55942a1f9f3..bc867de8a1e9a232f879c7c12eaf042a04e7c43e 100644 (file)
@@ -36,8 +36,8 @@
 #elif defined(CONFIG_M5307)
 #include <asm/m5307sim.h>
 #include <asm/mcfintc.h>
-#elif defined(CONFIG_M532x)
-#include <asm/m532xsim.h>
+#elif defined(CONFIG_M53xx)
+#include <asm/m53xxsim.h>
 #elif defined(CONFIG_M5407)
 #include <asm/m5407sim.h>
 #include <asm/mcfintc.h>
index da2fa43c2e458438ec61c6246faf5379ae9f356c..089f0f150bbfc06169a720e2ddb9b9d3d563658c 100644 (file)
@@ -19,7 +19,7 @@
 #define        MCFTIMER_TRR            0x04            /* Timer Reference (r/w) */
 #define        MCFTIMER_TCR            0x08            /* Timer Capture reg (r/w) */
 #define        MCFTIMER_TCN            0x0C            /* Timer Counter reg (r/w) */
-#if defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #define        MCFTIMER_TER            0x03            /* Timer Event reg (r/w) */
 #else
 #define        MCFTIMER_TER            0x11            /* Timer Event reg (r/w) */
index d197e7ff62c5635535db183ac46077d1478eacf9..ac85f16534af929c33d0f1daf1f3a565488b6af4 100644 (file)
@@ -2752,11 +2752,9 @@ func_return      get_new_page
 #ifdef CONFIG_MAC
 
 L(scc_initable_mac):
-       .byte   9,12            /* Reset */
        .byte   4,0x44          /* x16, 1 stopbit, no parity */
        .byte   3,0xc0          /* receiver: 8 bpc */
        .byte   5,0xe2          /* transmitter: 8 bpc, assert dtr/rts */
-       .byte   9,0             /* no interrupts */
        .byte   10,0            /* NRZ */
        .byte   11,0x50         /* use baud rate generator */
        .byte   12,1,13,0       /* 38400 baud */
@@ -2899,6 +2897,7 @@ func_start        serial_init,%d0/%d1/%a0/%a1
        is_not_mac(L(serial_init_not_mac))
 
 #ifdef SERIAL_DEBUG
+
 /* You may define either or both of these. */
 #define MAC_USE_SCC_A /* Modem port */
 #define MAC_USE_SCC_B /* Printer port */
@@ -2908,9 +2907,21 @@ func_start       serial_init,%d0/%d1/%a0/%a1
 #define mac_scc_cha_b_data_offset      0x4
 #define mac_scc_cha_a_data_offset      0x6
 
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
+       movel   %pc@(L(mac_sccbase)),%a0
+       /* Reset SCC device */
+       moveb   #9,%a0@(mac_scc_cha_a_ctrl_offset)
+       moveb   #0xc0,%a0@(mac_scc_cha_a_ctrl_offset)
+       /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */
+       /* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */
+       movel   #35,%d0
+5:
+       subq    #1,%d0
+       jne     5b
+#endif
+
 #ifdef MAC_USE_SCC_A
        /* Initialize channel A */
-       movel   %pc@(L(mac_sccbase)),%a0
        lea     %pc@(L(scc_initable_mac)),%a1
 5:     moveb   %a1@+,%d0
        jmi     6f
@@ -2922,9 +2933,6 @@ func_start        serial_init,%d0/%d1/%a0/%a1
 
 #ifdef MAC_USE_SCC_B
        /* Initialize channel B */
-#ifndef MAC_USE_SCC_A  /* Load mac_sccbase only if needed */
-       movel   %pc@(L(mac_sccbase)),%a0
-#endif /* MAC_USE_SCC_A */
        lea     %pc@(L(scc_initable_mac)),%a1
 7:     moveb   %a1@+,%d0
        jmi     8f
@@ -2933,6 +2941,7 @@ func_start        serial_init,%d0/%d1/%a0/%a1
        jra     7b
 8:
 #endif /* MAC_USE_SCC_B */
+
 #endif /* SERIAL_DEBUG */
 
        jra     L(serial_init_done)
@@ -3006,17 +3015,17 @@ func_start      serial_putc,%d0/%d1/%a0/%a1
 
 #ifdef SERIAL_DEBUG
 
-#ifdef MAC_USE_SCC_A
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
        movel   %pc@(L(mac_sccbase)),%a1
+#endif
+
+#ifdef MAC_USE_SCC_A
 3:     btst    #2,%a1@(mac_scc_cha_a_ctrl_offset)
        jeq     3b
        moveb   %d0,%a1@(mac_scc_cha_a_data_offset)
 #endif /* MAC_USE_SCC_A */
 
 #ifdef MAC_USE_SCC_B
-#ifndef MAC_USE_SCC_A  /* Load mac_sccbase only if needed */
-       movel   %pc@(L(mac_sccbase)),%a1
-#endif /* MAC_USE_SCC_A */
 4:     btst    #2,%a1@(mac_scc_cha_b_ctrl_offset)
        jeq     4b
        moveb   %d0,%a1@(mac_scc_cha_b_data_offset)
index 02591a109f8c0965ffea5116fbfec998078a6109..68f0fac60099277eac637851f3415e092c8fb921 100644 (file)
@@ -25,7 +25,7 @@ obj-$(CONFIG_M527x)   += m527x.o pit.o intc-2.o reset.o
 obj-$(CONFIG_M5272)    += m5272.o intc-5272.o timers.o
 obj-$(CONFIG_M528x)    += m528x.o pit.o intc-2.o reset.o
 obj-$(CONFIG_M5307)    += m5307.o timers.o intc.o reset.o
-obj-$(CONFIG_M532x)    += m532x.o timers.o intc-simr.o reset.o
+obj-$(CONFIG_M53xx)    += m53xx.o timers.o intc-simr.o reset.o
 obj-$(CONFIG_M5407)    += m5407.o timers.o intc.o reset.o
 obj-$(CONFIG_M54xx)    += m54xx.o sltimers.o intc-2.o
 obj-$(CONFIG_M5441x)   += m5441x.o pit.o intc-simr.o reset.o
similarity index 98%
rename from arch/m68k/platform/coldfire/m532x.c
rename to arch/m68k/platform/coldfire/m53xx.c
index 7951d1d43357a16ac953b584f71c5c7efd6bd526..5286f98fbed075cbe73298c02bd6b568beb332c2 100644 (file)
@@ -1,7 +1,7 @@
 /***************************************************************************/
 
 /*
- *     linux/arch/m68knommu/platform/532x/config.c
+ *     m53xx.c -- platform support for ColdFire 53xx based boards
  *
  *     Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
  *     Copyright (C) 2000, Lineo (www.lineo.com)
@@ -118,7 +118,8 @@ static struct clk * const enable_clks[] __initconst = {
        &__clk_0_24,    /* mcfuart.0 */
        &__clk_0_25,    /* mcfuart.1 */
        &__clk_0_26,    /* mcfuart.2 */
-
+       &__clk_0_28,    /* mcftmr.0 */
+       &__clk_0_29,    /* mcftmr.1 */
        &__clk_0_32,    /* mcfpit.0 */
        &__clk_0_33,    /* mcfpit.1 */
        &__clk_0_37,    /* mcfeport.0 */
@@ -134,8 +135,6 @@ static struct clk * const disable_clks[] __initconst = {
        &__clk_0_17,    /* edma */
        &__clk_0_22,    /* mcfi2c.0 */
        &__clk_0_23,    /* mcfqspi.0 */
-       &__clk_0_28,    /* mcftmr.0 */
-       &__clk_0_29,    /* mcftmr.1 */
        &__clk_0_30,    /* mcftmr.2 */
        &__clk_0_31,    /* mcftmr.3 */
        &__clk_0_34,    /* mcfpit.2 */
@@ -153,7 +152,7 @@ static struct clk * const disable_clks[] __initconst = {
 };
 
 
-static void __init m532x_clk_init(void)
+static void __init m53xx_clk_init(void)
 {
        unsigned i;
 
@@ -169,7 +168,7 @@ static void __init m532x_clk_init(void)
 
 #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
-static void __init m532x_qspi_init(void)
+static void __init m53xx_qspi_init(void)
 {
        /* setup QSPS pins for QSPI with gpio CS control */
        writew(0x01f0, MCFGPIO_PAR_QSPI);
@@ -179,7 +178,7 @@ static void __init m532x_qspi_init(void)
 
 /***************************************************************************/
 
-static void __init m532x_uarts_init(void)
+static void __init m53xx_uarts_init(void)
 {
        /* UART GPIO initialization */
        writew(readw(MCFGPIO_PAR_UART) | 0x0FFF, MCFGPIO_PAR_UART);
@@ -187,7 +186,7 @@ static void __init m532x_uarts_init(void)
 
 /***************************************************************************/
 
-static void __init m532x_fec_init(void)
+static void __init m53xx_fec_init(void)
 {
        u8 v;
 
@@ -217,11 +216,11 @@ void __init config_BSP(char *commandp, int size)
        }
 #endif
        mach_sched_init = hw_timer_init;
-       m532x_clk_init();
-       m532x_uarts_init();
-       m532x_fec_init();
+       m53xx_clk_init();
+       m53xx_uarts_init();
+       m53xx_fec_init();
 #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
-       m532x_qspi_init();
+       m53xx_qspi_init();
 #endif
 
 #ifdef CONFIG_BDM_DISABLE
index 51f6d2af807f8dd2c401138bc3894b8ae920f34d..d06068e457643fc804f0b068679da230e640becb 100644 (file)
@@ -36,7 +36,7 @@
  */
 void coldfire_profile_init(void);
 
-#if defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #define        __raw_readtrr   __raw_readl
 #define        __raw_writetrr  __raw_writel
 #else
index d2b097a652d9193f58917f2a256b4e3af09b540c..3649a8b150c0cf46f0161066a262c9c11e81e9a3 100644 (file)
@@ -17,7 +17,6 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_EFI_PARTITION is not set
-CONFIG_OPT_LIB_ASM=y
 CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
 CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
 CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
index 0f553bc009a0a848541a3410960ea959e074bd70..ffea82a16d2cb0897c7d2d4b6a79a67cf1130f44 100644 (file)
@@ -102,21 +102,23 @@ do { \
 
 #define flush_cache_range(vma, start, len) do { } while (0)
 
-#define copy_to_user_page(vma, page, vaddr, dst, src, len)             \
-do {                                                                   \
-       u32 addr = virt_to_phys(dst);                                   \
-       memcpy((dst), (src), (len));                                    \
-       if (vma->vm_flags & VM_EXEC) {                                  \
-               invalidate_icache_range((unsigned) (addr),              \
-                                       (unsigned) (addr) + PAGE_SIZE); \
-               flush_dcache_range((unsigned) (addr),                   \
-                                       (unsigned) (addr) + PAGE_SIZE); \
-       }                                                               \
-} while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len)           \
-do {                                                                   \
-       memcpy((dst), (src), (len));                                    \
-} while (0)
+static inline void copy_to_user_page(struct vm_area_struct *vma,
+                                    struct page *page, unsigned long vaddr,
+                                    void *dst, void *src, int len)
+{
+       u32 addr = virt_to_phys(dst);
+       memcpy(dst, src, len);
+       if (vma->vm_flags & VM_EXEC) {
+               invalidate_icache_range(addr, addr + PAGE_SIZE);
+               flush_dcache_range(addr, addr + PAGE_SIZE);
+       }
+}
+
+static inline void copy_from_user_page(struct vm_area_struct *vma,
+                                      struct page *page, unsigned long vaddr,
+                                      void *dst, void *src, int len)
+{
+       memcpy(dst, src, len);
+}
 
 #endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */
index ff8cde159d9a4809abac1994537b5425c71c9cb4..01848f056f439d251f9f61dcc0806a8d47aad4ba 100644 (file)
@@ -105,7 +105,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 
        __asm__ __volatile__ ("1:       lwx     %1, %3, r0;             \
                                        cmp     %2, %1, %4;             \
-                                       beqi    %2, 3f;                 \
+                                       bnei    %2, 3f;                 \
                                2:      swx     %5, %3, r0;             \
                                        addic   %2, r0, 0;              \
                                        bnei    %2, 1b;                 \
index 8cb8a8566edea40d3df6aaa20a373fbd23390138..2565cb94f32f0857bb4d925527c2ee9c77c7ea89 100644 (file)
@@ -123,11 +123,11 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
  * inb_p/inw_p/...
  * The macros don't do byte-swapping.
  */
-#define inb(port)              readb((u8 *)((port)))
+#define inb(port)              readb((u8 *)((unsigned long)(port)))
 #define outb(val, port)                writeb((val), (u8 *)((unsigned long)(port)))
-#define inw(port)              readw((u16 *)((port)))
+#define inw(port)              readw((u16 *)((unsigned long)(port)))
 #define outw(val, port)                writew((val), (u16 *)((unsigned long)(port)))
-#define inl(port)              readl((u32 *)((port)))
+#define inl(port)              readl((u32 *)((unsigned long)(port)))
 #define outl(val, port)                writel((val), (u32 *)((unsigned long)(port)))
 
 #define inb_p(port)            inb((port))
index 41cc841091b084e73692220324d30fd44ac0b018..d52abb6812fabb082c86196d97be0ee90cf728d0 100644 (file)
@@ -153,7 +153,5 @@ extern void __init xilinx_pci_init(void);
 static inline void __init xilinx_pci_init(void) { return; }
 #endif
 
-#include <asm-generic/pci-dma-compat.h>
-
 #endif /* __KERNEL__ */
 #endif /* __ASM_MICROBLAZE_PCI_H */
index a1ab5f0009efcd7bc84d4dd17aabedb915707653..04e49553bdf978c534519550382a5dca8e98e6ed 100644 (file)
@@ -90,17 +90,25 @@ static inline int ___range_ok(unsigned long addr, unsigned long size)
 
 #else
 
-/*
- * Address is valid if:
- *  - "addr", "addr + size" and "size" are all below the limit
- */
-#define access_ok(type, addr, size) \
-       (get_fs().seg >= (((unsigned long)(addr)) | \
-               (size) | ((unsigned long)(addr) + (size))))
-
-/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
- type?"WRITE":"READ",addr,size,get_fs().seg)) */
-
+static inline int access_ok(int type, const void __user *addr,
+                                                       unsigned long size)
+{
+       if (!size)
+               goto ok;
+
+       if ((get_fs().seg < ((unsigned long)addr)) ||
+                       (get_fs().seg < ((unsigned long)addr + size - 1))) {
+               pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+                       type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
+                       (u32)get_fs().seg);
+               return 0;
+       }
+ok:
+       pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+                       type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
+                       (u32)get_fs().seg);
+       return 1;
+}
 #endif
 
 #ifdef CONFIG_MMU
index 4254514b4c8cf8cf3819c66ad90eb4e18145c696..a6e44410672dc036ed7009126cce80e846411127 100644 (file)
@@ -140,7 +140,7 @@ do {                                                                        \
 /* It is used only first parameter for OP - for wic, wdc */
 #define CACHE_RANGE_LOOP_1(start, end, line_length, op)                        \
 do {                                                                   \
-       int volatile temp;                                              \
+       int volatile temp = 0;                                          \
        int align = ~(line_length - 1);                                 \
        end = ((end & align) == end) ? end - line_length : end & align; \
        WARN_ON(end - start < 0);                                       \
index 0b2299bcb94817f59e34399adc20b05e96c0f3bd..410398f6db555a1df3be57620454dc61c47f20fa 100644 (file)
@@ -37,6 +37,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
        {"8.20.a", 0x15},
        {"8.20.b", 0x16},
        {"8.30.a", 0x17},
+       {"8.40.a", 0x18},
+       {"8.40.b", 0x19},
        {NULL, 0},
 };
 
@@ -57,6 +59,9 @@ const struct family_string_key family_string_lookup[] = {
        {"virtex6", 0xe},
        /* FIXME There is no key code defined for spartan2 */
        {"spartan2", 0xf0},
+       {"kintex7", 0x10},
+       {"artix7", 0x11},
+       {"zynq7000", 0x12},
        {NULL, 0},
 };
 
index eef84de5e8c83bbb5bbee4c2db4efa6e488ceded..fcc797feb9dbd5df1b654b1a5ace439c35167553 100644 (file)
@@ -112,16 +112,16 @@ no_fdt_arg:
  * copy command line directly to cmd_line placed in data section.
  */
        beqid   r5, skip        /* Skip if NULL pointer */
-       or      r6, r0, r0              /* incremment */
+       or      r11, r0, r0             /* incremment */
        ori     r4, r0, cmd_line        /* load address of command line */
        tophys(r4,r4)                   /* convert to phys address */
        ori     r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
 _copy_command_line:
        /* r2=r5+r6 - r5 contain pointer to command line */
-       lbu             r2, r5, r6
+       lbu     r2, r5, r11
        beqid   r2, skip                /* Skip if no data */
-       sb              r2, r4, r6              /* addr[r4+r6]= r2*/
-       addik   r6, r6, 1               /* increment counting */
+       sb      r2, r4, r11             /* addr[r4+r6]= r2 */
+       addik   r11, r11, 1             /* increment counting */
        bgtid   r3, _copy_command_line  /* loop for all entries       */
        addik   r3, r3, -1              /* decrement loop */
        addik   r5, r4, 0               /* add new space for command line */
@@ -131,13 +131,13 @@ skip:
 
 #ifdef NOT_COMPILE
 /* save bram context */
-       or      r6, r0, r0                              /* incremment */
+       or      r11, r0, r0                             /* incremment */
        ori     r4, r0, TOPHYS(_bram_load_start)        /* save bram context */
        ori     r3, r0, (LMB_SIZE - 4)
 _copy_bram:
-       lw      r7, r0, r             /* r7 = r0 + r6 */
-       sw      r7, r4, r6              /* addr[r4 + r6] = r7*/
-       addik   r6, r6, 4               /* increment counting */
+       lw      r7, r0, r11             /* r7 = r0 + r6 */
+       sw      r7, r4, r11             /* addr[r4 + r6] = r7 */
+       addik   r11, r11, 4             /* increment counting */
        bgtid   r3, _copy_bram          /* loop for all entries */
        addik   r3, r3, -4              /* descrement loop */
 #endif
@@ -303,8 +303,8 @@ jump_over2:
         * the exception vectors, using a 4k real==virtual mapping.
         */
        /* Use temporary TLB_ID for LMB - clear this temporary mapping later */
-       ori     r6, r0, MICROBLAZE_LMB_TLB_ID
-       mts     rtlbx,r6
+       ori     r11, r0, MICROBLAZE_LMB_TLB_ID
+       mts     rtlbx,r11
 
        ori     r4,r0,(TLB_WR | TLB_EX)
        ori     r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
index 8778adf72bd36ac8b81ed41c15284596079467a4..d85fa3a2b0f828b6023daf3ba7daaa674fb0d863 100644 (file)
@@ -172,4 +172,6 @@ void __init init_IRQ(void)
         * and commits this patch.  ~~gcl */
        root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops,
                                                        (void *)intr_mask);
+
+       irq_set_default_host(root_domain);
 }
index a55893807274cf6964f8a3ffe683b7dfb20e5dcf..7d1a9c8b1f3dfff37cad0e5a87d9372f9030a3e8 100644 (file)
@@ -160,3 +160,8 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
        return 0; /* MicroBlaze has no separate FPU registers */
 }
 #endif /* CONFIG_MMU */
+
+void arch_cpu_idle(void)
+{
+       local_irq_enable();
+}
index 4ec137d13ad793cf9dd36521225547f41d8a21d9..b38ae3acfeb45adbe98f95c05c9c52e42721a215 100644 (file)
@@ -404,10 +404,11 @@ asmlinkage void __init mmu_init(void)
 
 #if defined(CONFIG_BLK_DEV_INITRD)
        /* Remove the init RAM disk from the available memory. */
-/*     if (initrd_start) {
-               mem_pieces_remove(&phys_avail, __pa(initrd_start),
-                                 initrd_end - initrd_start, 1);
-       }*/
+       if (initrd_start) {
+               unsigned long size;
+               size = initrd_end - initrd_start;
+               memblock_reserve(virt_to_phys(initrd_start), size);
+       }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
        /* Initialize the MMU hardware */
index 9ea521e4959ef062e3bae1d1ac31be242d88a5fc..bdb8ea100e73ee8637c8ea587378000292b0b37b 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
-#include <linux/pci.h>
 #include <linux/export.h>
 
 #include <asm/processor.h>
index 7dd65cfae83759562e43ab20bb03be071ae56ce5..d2cfe45f332b419b5c463b78eb4f44a40e0386c4 100644 (file)
@@ -17,3 +17,7 @@ obj- := $(platform-)
 obj-y += kernel/
 obj-y += mm/
 obj-y += math-emu/
+
+ifdef CONFIG_KVM
+obj-y += kvm/
+endif
index a90cfc702bb1a31cade6c98d0bd4eb0ead652f8d..7a58ab933b206a397c56b65f6a608fc4f9a1d8eb 100644 (file)
@@ -304,7 +304,6 @@ config MIPS_MALTA
        select HW_HAS_PCI
        select I8253
        select I8259
-       select MIPS_BOARDS_GEN
        select MIPS_BONITO64
        select MIPS_CPU_SCACHE
        select PCI_GT64XXX_PCI0
@@ -335,12 +334,12 @@ config MIPS_SEAD3
        select BOOT_RAW
        select CEVT_R4K
        select CSRC_R4K
+       select CSRC_GIC
        select CPU_MIPSR2_IRQ_VI
        select CPU_MIPSR2_IRQ_EI
        select DMA_NONCOHERENT
        select IRQ_CPU
        select IRQ_GIC
-       select MIPS_BOARDS_GEN
        select MIPS_CPU_SCACHE
        select MIPS_MSC
        select SYS_HAS_CPU_MIPS32_R1
@@ -352,6 +351,7 @@ config MIPS_SEAD3
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_SUPPORTS_SMARTMIPS
+       select SYS_SUPPORTS_MICROMIPS
        select USB_ARCH_HAS_EHCI
        select USB_EHCI_BIG_ENDIAN_DESC
        select USB_EHCI_BIG_ENDIAN_MMIO
@@ -910,6 +910,9 @@ config CEVT_GT641XX
 config CEVT_R4K
        bool
 
+config CEVT_GIC
+       bool
+
 config CEVT_SB1250
        bool
 
@@ -982,9 +985,6 @@ config MIPS_MSC
 config MIPS_NILE4
        bool
 
-config MIPS_DISABLE_OBSOLETE_IDE
-       bool
-
 config SYNC_R4K
        bool
 
@@ -1075,9 +1075,6 @@ config IRQ_GT641XX
 config IRQ_GIC
        bool
 
-config MIPS_BOARDS_GEN
-       bool
-
 config PCI_GT64XXX_PCI0
        bool
 
@@ -1147,7 +1144,7 @@ config BOOT_ELF32
 
 config MIPS_L1_CACHE_SHIFT
        int
-       default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL
+       default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL || SOC_RT288X
        default "6" if MIPS_CPU_SCACHE
        default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
        default "5"
@@ -1236,6 +1233,7 @@ config CPU_MIPS32_R2
        select CPU_HAS_PREFETCH
        select CPU_SUPPORTS_32BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
+       select HAVE_KVM
        help
          Choose this option to build a kernel for release 2 or later of the
          MIPS32 architecture.  Most modern embedded systems with a 32-bit
@@ -1736,6 +1734,20 @@ config 64BIT
 
 endchoice
 
+config KVM_GUEST
+       bool "KVM Guest Kernel"
+       help
+         Select this option if building a guest kernel for KVM (Trap & Emulate) mode
+
+config KVM_HOST_FREQ
+       int "KVM Host Processor Frequency (MHz)"
+       depends on KVM_GUEST
+       default 500
+       help
+         Select this option if building a guest kernel for KVM to skip
+         RTC emulation when determining guest CPU Frequency.  Instead, the guest
+         processor frequency is automatically derived from the host frequency.
+
 choice
        prompt "Kernel page size"
        default PAGE_SIZE_4KB
@@ -1811,6 +1823,15 @@ config FORCE_MAX_ZONEORDER
          The page size is not necessarily 4KB.  Keep this in mind
          when choosing a value for this option.
 
+config CEVT_GIC
+       bool "Use GIC global counter for clock events"
+       depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC)
+       help
+         Use the GIC global counter for the clock events. The R4K clock
+         event driver is always present, so if the platform ends up not
+         detecting a GIC, it will fall back to the R4K timer for the
+         generation of clock events.
+
 config BOARD_SCACHE
        bool
 
@@ -2016,6 +2037,7 @@ config SB1_PASS_2_1_WORKAROUNDS
        depends on CPU_SB1 && CPU_SB1_PASS_2
        default y
 
+
 config 64BIT_PHYS_ADDR
        bool
 
@@ -2034,6 +2056,13 @@ config CPU_HAS_SMARTMIPS
          you don't know you probably don't have SmartMIPS and should say N
          here.
 
+config CPU_MICROMIPS
+       depends on SYS_SUPPORTS_MICROMIPS
+       bool "Build kernel using microMIPS ISA"
+       help
+         When this option is enabled the kernel will be built using the
+         microMIPS ISA
+
 config CPU_HAS_WB
        bool
 
@@ -2096,6 +2125,9 @@ config SYS_SUPPORTS_HIGHMEM
 config SYS_SUPPORTS_SMARTMIPS
        bool
 
+config SYS_SUPPORTS_MICROMIPS
+       bool
+
 config ARCH_FLATMEM_ENABLE
        def_bool y
        depends on !NUMA && !CPU_LOONGSON2
@@ -2556,3 +2588,5 @@ source "security/Kconfig"
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+source "arch/mips/kvm/Kconfig"
index 6f7978f95090f5dbdfe5bf761b008becf0df6cb2..dd58a04ef4bca5dc4a6604d283722aeb428e861c 100644 (file)
@@ -114,6 +114,7 @@ cflags-$(CONFIG_CPU_BIG_ENDIAN)             += $(shell $(CC) -dumpmachine |grep -q 'mips.*e
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
 
 cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,-msmartmips)
+cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips -mno-jals)
 
 cflags-$(CONFIG_SB1XXX_CORELIS)        += $(call cc-option,-mno-sched-prolog) \
                                   -fno-omit-frame-pointer
index c8862bdc2ff281eeb270e615c95a685edbcddcde..7032ac7ecd1bb2f8083b848ca5ca01572dc4cd44 100644 (file)
@@ -31,7 +31,6 @@ config MIPS_DB1000
        select ALCHEMY_GPIOINT_AU1000
        select DMA_NONCOHERENT
        select HW_HAS_PCI
-       select MIPS_DISABLE_OBSOLETE_IDE
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
@@ -41,7 +40,6 @@ config MIPS_DB1235
        select ARCH_REQUIRE_GPIOLIB
        select HW_HAS_PCI
        select DMA_COHERENT
-       select MIPS_DISABLE_OBSOLETE_IDE
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
 
@@ -57,7 +55,6 @@ config MIPS_GPR
        select ALCHEMY_GPIOINT_AU1000
        select HW_HAS_PCI
        select DMA_NONCOHERENT
-       select MIPS_DISABLE_OBSOLETE_IDE
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
 
index fa1bdd1aea15875989e187c6d14728e245058f75..b3afcdd8d77af9380c154227a17c6a5ee944d8d2 100644 (file)
@@ -5,32 +5,14 @@ platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
 
 
 #
-# AMD Alchemy Pb1100 eval board
-#
-platform-$(CONFIG_MIPS_PB1100) += alchemy/devboards/
-load-$(CONFIG_MIPS_PB1100)     += 0xffffffff80100000
-
-#
-# AMD Alchemy Pb1500 eval board
-#
-platform-$(CONFIG_MIPS_PB1500) += alchemy/devboards/
-load-$(CONFIG_MIPS_PB1500)     += 0xffffffff80100000
-
-#
-# AMD Alchemy Pb1550 eval board
-#
-platform-$(CONFIG_MIPS_PB1550) += alchemy/devboards/
-load-$(CONFIG_MIPS_PB1550)     += 0xffffffff80100000
-
-#
-# AMD Alchemy Db1000/Db1500/Db1100 eval boards
+# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100 eval boards
 #
 platform-$(CONFIG_MIPS_DB1000) += alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1000)   += -I$(srctree)/arch/mips/include/asm/mach-db1x00
 load-$(CONFIG_MIPS_DB1000)     += 0xffffffff80100000
 
 #
-# AMD Alchemy Db1200/Pb1200/Db1550/Db1300 eval boards
+# AMD Alchemy Db1200/Pb1200/Db1550/Pb1550/Db1300 eval boards
 #
 platform-$(CONFIG_MIPS_DB1235) += alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1235)   += -I$(srctree)/arch/mips/include/asm/mach-db1x00
index cb0f6afb73894ef16019b7a53861330a49d72d27..9edc35ff8cf1420e9576f195f92658e71517c073 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/i2c.h>
 #include <linux/i2c-gpio.h>
 #include <asm/bootinfo.h>
+#include <asm/idle.h>
 #include <asm/reboot.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <prom.h>
index 38afb11ba2c4605e5c370f979eba3bc86a97621b..93fa586d52e2d480d6157cc0fcffdf962cad6534 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 
+#include <asm/idle.h>
 #include <asm/processor.h>
 #include <asm/time.h>
 #include <asm/mach-au1x00/au1000.h>
index 28abfeef09d6ff21c74c3975721ed70a3fa45d7e..92dfa481205b066ddf4ca29a62506bcab139e568 100644 (file)
@@ -30,7 +30,6 @@
 #include <asm/sections.h>
 
 #include <asm/mach-ar7/ar7.h>
-#include <asm/mips-boards/prom.h>
 
 static int __init memsize(void)
 {
index d5b3c9057018d8ccdbd2de891d8498de3b1ca76d..8be4e856b8b8942e903c286c2004fb46c4d0df9d 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/clk.h>
 
 #include <asm/bootinfo.h>
+#include <asm/idle.h>
 #include <asm/time.h>          /* for mips_hpt_frequency */
 #include <asm/reboot.h>                /* for _machine_{restart,halt} */
 #include <asm/mips_machine.h>
@@ -51,20 +52,6 @@ static void ath79_halt(void)
                cpu_wait();
 }
 
-static void __init ath79_detect_mem_size(void)
-{
-       unsigned long size;
-
-       for (size = ATH79_MEM_SIZE_MIN; size < ATH79_MEM_SIZE_MAX;
-            size <<= 1) {
-               if (!memcmp(ath79_detect_mem_size,
-                           ath79_detect_mem_size + size, 1024))
-                       break;
-       }
-
-       add_memory_region(0, size, BOOT_MEM_RAM);
-}
-
 static void __init ath79_detect_sys_type(void)
 {
        char *chip = "????";
@@ -212,7 +199,7 @@ void __init plat_mem_setup(void)
                                         AR71XX_DDR_CTRL_SIZE);
 
        ath79_detect_sys_type();
-       ath79_detect_mem_size();
+       detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
        ath79_clocks_init();
 
        _machine_restart = ath79_restart;
index d03e8799d1cf8c09455c60e0029f5371cf67de7c..5639662fd5031a005534181569e4ef628b6a5640 100644 (file)
@@ -25,6 +25,10 @@ config BCM63XX_CPU_6358
        bool "support 6358 CPU"
        select HW_HAS_PCI
 
+config BCM63XX_CPU_6362
+       bool "support 6362 CPU"
+       select HW_HAS_PCI
+
 config BCM63XX_CPU_6368
        bool "support 6368 CPU"
        select HW_HAS_PCI
index 9aa7d44898ed11cff9c70112e0e142923e27f2f4..a9505c4867e8dd1054646643abdf1640172b0dce 100644 (file)
@@ -726,11 +726,11 @@ void __init board_prom_init(void)
        u32 val;
 
        /* read base address of boot chip select (0)
-        * 6328 does not have MPI but boots from a fixed address
+        * 6328/6362 do not have MPI but boot from a fixed address
         */
-       if (BCMCPU_IS_6328())
+       if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) {
                val = 0x18000000;
-       else {
+       else {
                val = bcm_mpi_readl(MPI_CSBASE_REG(0));
                val &= MPI_CSBASE_BASE_MASK;
        }
index b9e948d594300281cc4199c3a89e5756630e111d..c726a97fc7987d5cb87dfd04868ce3bf12e66cec 100644 (file)
 #include <bcm63xx_io.h>
 #include <bcm63xx_regs.h>
 #include <bcm63xx_reset.h>
-#include <bcm63xx_clk.h>
+
+struct clk {
+       void            (*set)(struct clk *, int);
+       unsigned int    rate;
+       unsigned int    usage;
+       int             id;
+};
 
 static DEFINE_MUTEX(clocks_mutex);
 
@@ -119,11 +125,18 @@ static struct clk clk_ephy = {
  */
 static void enetsw_set(struct clk *clk, int enable)
 {
-       if (!BCMCPU_IS_6368())
+       if (BCMCPU_IS_6328())
+               bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable);
+       else if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable);
+       else if (BCMCPU_IS_6368())
+               bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
+                               CKCTL_6368_SWPKT_USB_EN |
+                               CKCTL_6368_SWPKT_SAR_EN,
+                               enable);
+       else
                return;
-       bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
-                       CKCTL_6368_SWPKT_USB_EN |
-                       CKCTL_6368_SWPKT_SAR_EN, enable);
+
        if (enable) {
                /* reset switch core afer clock change */
                bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
@@ -160,6 +173,8 @@ static void usbh_set(struct clk *clk, int enable)
                bcm_hwclock_set(CKCTL_6328_USBH_EN, enable);
        else if (BCMCPU_IS_6348())
                bcm_hwclock_set(CKCTL_6348_USBH_EN, enable);
+       else if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_USBH_EN, enable);
        else if (BCMCPU_IS_6368())
                bcm_hwclock_set(CKCTL_6368_USBH_EN, enable);
 }
@@ -175,6 +190,8 @@ static void usbd_set(struct clk *clk, int enable)
 {
        if (BCMCPU_IS_6328())
                bcm_hwclock_set(CKCTL_6328_USBD_EN, enable);
+       else if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_USBD_EN, enable);
        else if (BCMCPU_IS_6368())
                bcm_hwclock_set(CKCTL_6368_USBD_EN, enable);
 }
@@ -196,6 +213,8 @@ static void spi_set(struct clk *clk, int enable)
                mask = CKCTL_6348_SPI_EN;
        else if (BCMCPU_IS_6358())
                mask = CKCTL_6358_SPI_EN;
+       else if (BCMCPU_IS_6362())
+               mask = CKCTL_6362_SPI_EN;
        else
                /* BCMCPU_IS_6368 */
                mask = CKCTL_6368_SPI_EN;
@@ -236,7 +255,10 @@ static struct clk clk_xtm = {
  */
 static void ipsec_set(struct clk *clk, int enable)
 {
-       bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
+       if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_IPSEC_EN, enable);
+       else if (BCMCPU_IS_6368())
+               bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
 }
 
 static struct clk clk_ipsec = {
@@ -249,7 +271,10 @@ static struct clk clk_ipsec = {
 
 static void pcie_set(struct clk *clk, int enable)
 {
-       bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
+       if (BCMCPU_IS_6328())
+               bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
+       else if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_PCIE_EN, enable);
 }
 
 static struct clk clk_pcie = {
@@ -315,9 +340,9 @@ struct clk *clk_get(struct device *dev, const char *id)
                return &clk_periph;
        if (BCMCPU_IS_6358() && !strcmp(id, "pcm"))
                return &clk_pcm;
-       if (BCMCPU_IS_6368() && !strcmp(id, "ipsec"))
+       if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec"))
                return &clk_ipsec;
-       if (BCMCPU_IS_6328() && !strcmp(id, "pcie"))
+       if ((BCMCPU_IS_6328() || BCMCPU_IS_6362()) && !strcmp(id, "pcie"))
                return &clk_pcie;
        return ERR_PTR(-ENOENT);
 }
index a7afb289b15aa03f93d6665ad55969ba97cfe6a5..79fe32df5e96c99da07d763e0990e07ee0321260 100644 (file)
@@ -25,7 +25,7 @@ const int *bcm63xx_irqs;
 EXPORT_SYMBOL(bcm63xx_irqs);
 
 static u16 bcm63xx_cpu_id;
-static u16 bcm63xx_cpu_rev;
+static u8 bcm63xx_cpu_rev;
 static unsigned int bcm63xx_cpu_freq;
 static unsigned int bcm63xx_memory_size;
 
@@ -71,6 +71,15 @@ static const int bcm6358_irqs[] = {
 
 };
 
+static const unsigned long bcm6362_regs_base[] = {
+       __GEN_CPU_REGS_TABLE(6362)
+};
+
+static const int bcm6362_irqs[] = {
+       __GEN_CPU_IRQ_TABLE(6362)
+
+};
+
 static const unsigned long bcm6368_regs_base[] = {
        __GEN_CPU_REGS_TABLE(6368)
 };
@@ -87,7 +96,7 @@ u16 __bcm63xx_get_cpu_id(void)
 
 EXPORT_SYMBOL(__bcm63xx_get_cpu_id);
 
-u16 bcm63xx_get_cpu_rev(void)
+u8 bcm63xx_get_cpu_rev(void)
 {
        return bcm63xx_cpu_rev;
 }
@@ -169,6 +178,42 @@ static unsigned int detect_cpu_clock(void)
                return (16 * 1000000 * n1 * n2) / m1;
        }
 
+       case BCM6362_CPU_ID:
+       {
+               unsigned int tmp, mips_pll_fcvo;
+
+               tmp = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
+               mips_pll_fcvo = (tmp & STRAPBUS_6362_FCVO_MASK)
+                               >> STRAPBUS_6362_FCVO_SHIFT;
+               switch (mips_pll_fcvo) {
+               case 0x03:
+               case 0x0b:
+               case 0x13:
+               case 0x1b:
+                       return 240000000;
+               case 0x04:
+               case 0x0c:
+               case 0x14:
+               case 0x1c:
+                       return 160000000;
+               case 0x05:
+               case 0x0e:
+               case 0x16:
+               case 0x1e:
+               case 0x1f:
+                       return 400000000;
+               case 0x06:
+                       return 440000000;
+               case 0x07:
+               case 0x17:
+                       return 384000000;
+               case 0x15:
+               case 0x1d:
+                       return 200000000;
+               default:
+                       return 320000000;
+               }
+       }
        case BCM6368_CPU_ID:
        {
                unsigned int tmp, p1, p2, ndiv, m1;
@@ -205,7 +250,7 @@ static unsigned int detect_memory_size(void)
        unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0;
        u32 val;
 
-       if (BCMCPU_IS_6328())
+       if (BCMCPU_IS_6328() || BCMCPU_IS_6362())
                return bcm_ddr_readl(DDR_CSEND_REG) << 24;
 
        if (BCMCPU_IS_6345()) {
@@ -240,53 +285,27 @@ static unsigned int detect_memory_size(void)
 
 void __init bcm63xx_cpu_init(void)
 {
-       unsigned int tmp, expected_cpu_id;
+       unsigned int tmp;
        struct cpuinfo_mips *c = &current_cpu_data;
        unsigned int cpu = smp_processor_id();
+       u32 chipid_reg;
 
        /* soc registers location depends on cpu type */
-       expected_cpu_id = 0;
+       chipid_reg = 0;
 
        switch (c->cputype) {
        case CPU_BMIPS3300:
-               if ((read_c0_prid() & 0xff00) == PRID_IMP_BMIPS3300_ALT) {
-                       expected_cpu_id = BCM6348_CPU_ID;
-                       bcm63xx_regs_base = bcm6348_regs_base;
-                       bcm63xx_irqs = bcm6348_irqs;
-               } else {
+               if ((read_c0_prid() & 0xff00) != PRID_IMP_BMIPS3300_ALT)
                        __cpu_name[cpu] = "Broadcom BCM6338";
-                       expected_cpu_id = BCM6338_CPU_ID;
-                       bcm63xx_regs_base = bcm6338_regs_base;
-                       bcm63xx_irqs = bcm6338_irqs;
-               }
-               break;
+               /* fall-through */
        case CPU_BMIPS32:
-               expected_cpu_id = BCM6345_CPU_ID;
-               bcm63xx_regs_base = bcm6345_regs_base;
-               bcm63xx_irqs = bcm6345_irqs;
+               chipid_reg = BCM_6345_PERF_BASE;
                break;
        case CPU_BMIPS4350:
-               if ((read_c0_prid() & 0xf0) == 0x10) {
-                       expected_cpu_id = BCM6358_CPU_ID;
-                       bcm63xx_regs_base = bcm6358_regs_base;
-                       bcm63xx_irqs = bcm6358_irqs;
-               } else {
-                       /* all newer chips have the same chip id location */
-                       u16 chip_id = bcm_readw(BCM_6368_PERF_BASE);
-
-                       switch (chip_id) {
-                       case BCM6328_CPU_ID:
-                               expected_cpu_id = BCM6328_CPU_ID;
-                               bcm63xx_regs_base = bcm6328_regs_base;
-                               bcm63xx_irqs = bcm6328_irqs;
-                               break;
-                       case BCM6368_CPU_ID:
-                               expected_cpu_id = BCM6368_CPU_ID;
-                               bcm63xx_regs_base = bcm6368_regs_base;
-                               bcm63xx_irqs = bcm6368_irqs;
-                               break;
-                       }
-               }
+               if ((read_c0_prid() & 0xf0) == 0x10)
+                       chipid_reg = BCM_6345_PERF_BASE;
+               else
+                       chipid_reg = BCM_6368_PERF_BASE;
                break;
        }
 
@@ -294,20 +313,47 @@ void __init bcm63xx_cpu_init(void)
         * really early to panic, but delaying panic would not help since we
         * will never get any working console
         */
-       if (!expected_cpu_id)
+       if (!chipid_reg)
                panic("unsupported Broadcom CPU");
 
-       /*
-        * bcm63xx_regs_base is set, we can access soc registers
-        */
-
-       /* double check CPU type */
-       tmp = bcm_perf_readl(PERF_REV_REG);
+       /* read out CPU type */
+       tmp = bcm_readl(chipid_reg);
        bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT;
        bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT;
 
-       if (bcm63xx_cpu_id != expected_cpu_id)
-               panic("bcm63xx CPU id mismatch");
+       switch (bcm63xx_cpu_id) {
+       case BCM6328_CPU_ID:
+               bcm63xx_regs_base = bcm6328_regs_base;
+               bcm63xx_irqs = bcm6328_irqs;
+               break;
+       case BCM6338_CPU_ID:
+               bcm63xx_regs_base = bcm6338_regs_base;
+               bcm63xx_irqs = bcm6338_irqs;
+               break;
+       case BCM6345_CPU_ID:
+               bcm63xx_regs_base = bcm6345_regs_base;
+               bcm63xx_irqs = bcm6345_irqs;
+               break;
+       case BCM6348_CPU_ID:
+               bcm63xx_regs_base = bcm6348_regs_base;
+               bcm63xx_irqs = bcm6348_irqs;
+               break;
+       case BCM6358_CPU_ID:
+               bcm63xx_regs_base = bcm6358_regs_base;
+               bcm63xx_irqs = bcm6358_irqs;
+               break;
+       case BCM6362_CPU_ID:
+               bcm63xx_regs_base = bcm6362_regs_base;
+               bcm63xx_irqs = bcm6362_irqs;
+               break;
+       case BCM6368_CPU_ID:
+               bcm63xx_regs_base = bcm6368_regs_base;
+               bcm63xx_irqs = bcm6368_irqs;
+               break;
+       default:
+               panic("unsupported broadcom CPU %x", bcm63xx_cpu_id);
+               break;
+       }
 
        bcm63xx_cpu_freq = detect_cpu_clock();
        bcm63xx_memory_size = detect_memory_size();
index 58371c7deac20e0e3c53695ee75e99ec5ce91e85..588d1ec622e404fd2fbbe0a5d9a6868477b2c41b 100644 (file)
@@ -77,6 +77,12 @@ static int __init bcm63xx_detect_flash_type(void)
                        return BCM63XX_FLASH_TYPE_PARALLEL;
                else
                        return BCM63XX_FLASH_TYPE_SERIAL;
+       case BCM6362_CPU_ID:
+               val = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
+               if (val & STRAPBUS_6362_BOOT_SEL_SERIAL)
+                       return BCM63XX_FLASH_TYPE_SERIAL;
+               else
+                       return BCM63XX_FLASH_TYPE_NAND;
        case BCM6368_CPU_ID:
                val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
                switch (val & STRAPBUS_6368_BOOT_SEL_MASK) {
index e97fd60e92ef289e215908b7327889435f3f702f..3065bb61820d5befea57f31580c91c15afcc666b 100644 (file)
 /*
  * register offsets
  */
-static const unsigned long bcm6338_regs_spi[] = {
-       __GEN_SPI_REGS_TABLE(6338)
-};
-
 static const unsigned long bcm6348_regs_spi[] = {
        __GEN_SPI_REGS_TABLE(6348)
 };
@@ -34,23 +30,15 @@ static const unsigned long bcm6358_regs_spi[] = {
        __GEN_SPI_REGS_TABLE(6358)
 };
 
-static const unsigned long bcm6368_regs_spi[] = {
-       __GEN_SPI_REGS_TABLE(6368)
-};
-
 const unsigned long *bcm63xx_regs_spi;
 EXPORT_SYMBOL(bcm63xx_regs_spi);
 
 static __init void bcm63xx_spi_regs_init(void)
 {
-       if (BCMCPU_IS_6338())
-               bcm63xx_regs_spi = bcm6338_regs_spi;
-       if (BCMCPU_IS_6348())
+       if (BCMCPU_IS_6338() || BCMCPU_IS_6348())
                bcm63xx_regs_spi = bcm6348_regs_spi;
-       if (BCMCPU_IS_6358())
+       if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
                bcm63xx_regs_spi = bcm6358_regs_spi;
-       if (BCMCPU_IS_6368())
-               bcm63xx_regs_spi = bcm6368_regs_spi;
 }
 #else
 static __init void bcm63xx_spi_regs_init(void) { }
@@ -93,13 +81,13 @@ int __init bcm63xx_spi_register(void)
        spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
 
        if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
-               spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;
-               spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE;
-               spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT;
-               spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;
+               spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1;
+               spi_pdata.fifo_size = SPI_6348_MSG_DATA_SIZE;
+               spi_pdata.msg_type_shift = SPI_6348_MSG_TYPE_SHIFT;
+               spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH;
        }
 
-       if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
+       if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) {
                spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
                spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
                spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
index da24c2bd9b7cde3cfbfa10caaa6f1ae70f6a95a5..c0ab3887f42ef8b355eed9971f04188be0b966d8 100644 (file)
@@ -82,6 +82,17 @@ static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused;
 #define ext_irq_cfg_reg1       PERF_EXTIRQ_CFG_REG_6358
 #define ext_irq_cfg_reg2       0
 #endif
+#ifdef CONFIG_BCM63XX_CPU_6362
+#define irq_stat_reg           PERF_IRQSTAT_6362_REG
+#define irq_mask_reg           PERF_IRQMASK_6362_REG
+#define irq_bits               64
+#define is_ext_irq_cascaded    1
+#define ext_irq_start          (BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE)
+#define ext_irq_end            (BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE)
+#define ext_irq_count          4
+#define ext_irq_cfg_reg1       PERF_EXTIRQ_CFG_REG_6362
+#define ext_irq_cfg_reg2       0
+#endif
 #ifdef CONFIG_BCM63XX_CPU_6368
 #define irq_stat_reg           PERF_IRQSTAT_6368_REG
 #define irq_mask_reg           PERF_IRQMASK_6368_REG
@@ -170,6 +181,16 @@ static void bcm63xx_init_irq(void)
                ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
                break;
+       case BCM6362_CPU_ID:
+               irq_stat_addr += PERF_IRQSTAT_6362_REG;
+               irq_mask_addr += PERF_IRQMASK_6362_REG;
+               irq_bits = 64;
+               ext_irq_count = 4;
+               is_ext_irq_cascaded = 1;
+               ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+               ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
+               ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
+               break;
        case BCM6368_CPU_ID:
                irq_stat_addr += PERF_IRQSTAT_6368_REG;
                irq_mask_addr += PERF_IRQMASK_6368_REG;
@@ -458,6 +479,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d,
        case BCM6338_CPU_ID:
        case BCM6345_CPU_ID:
        case BCM6358_CPU_ID:
+       case BCM6362_CPU_ID:
        case BCM6368_CPU_ID:
                if (levelsense)
                        reg |= EXTIRQ_CFG_LEVELSENSE(irq);
index 10eaff4580710d4eae4879691502c55ebef3f1d6..fd698087fbfd9367b777e1391aaabb53789b2936 100644 (file)
@@ -36,6 +36,8 @@ void __init prom_init(void)
                mask = CKCTL_6348_ALL_SAFE_EN;
        else if (BCMCPU_IS_6358())
                mask = CKCTL_6358_ALL_SAFE_EN;
+       else if (BCMCPU_IS_6362())
+               mask = CKCTL_6362_ALL_SAFE_EN;
        else if (BCMCPU_IS_6368())
                mask = CKCTL_6368_ALL_SAFE_EN;
        else
index 68a31bb90cbf6de79acda74449b0bbfa749a9f21..317931c6cf58f9305b001e25d735fcf189f18dff 100644 (file)
 #define BCM6358_RESET_PCIE     0
 #define BCM6358_RESET_PCIE_EXT 0
 
+#define BCM6362_RESET_SPI      SOFTRESET_6362_SPI_MASK
+#define BCM6362_RESET_ENET     0
+#define BCM6362_RESET_USBH     SOFTRESET_6362_USBH_MASK
+#define BCM6362_RESET_USBD     SOFTRESET_6362_USBS_MASK
+#define BCM6362_RESET_DSL      0
+#define BCM6362_RESET_SAR      SOFTRESET_6362_SAR_MASK
+#define BCM6362_RESET_EPHY     SOFTRESET_6362_EPHY_MASK
+#define BCM6362_RESET_ENETSW   SOFTRESET_6362_ENETSW_MASK
+#define BCM6362_RESET_PCM      SOFTRESET_6362_PCM_MASK
+#define BCM6362_RESET_MPI      0
+#define BCM6362_RESET_PCIE      (SOFTRESET_6362_PCIE_MASK | \
+                                SOFTRESET_6362_PCIE_CORE_MASK)
+#define BCM6362_RESET_PCIE_EXT SOFTRESET_6362_PCIE_EXT_MASK
+
 #define BCM6368_RESET_SPI      SOFTRESET_6368_SPI_MASK
 #define BCM6368_RESET_ENET     0
 #define BCM6368_RESET_USBH     SOFTRESET_6368_USBH_MASK
@@ -119,6 +133,10 @@ static const u32 bcm6358_reset_bits[] = {
        __GEN_RESET_BITS_TABLE(6358)
 };
 
+static const u32 bcm6362_reset_bits[] = {
+       __GEN_RESET_BITS_TABLE(6362)
+};
+
 static const u32 bcm6368_reset_bits[] = {
        __GEN_RESET_BITS_TABLE(6368)
 };
@@ -140,6 +158,9 @@ static int __init bcm63xx_reset_bits_init(void)
        } else if (BCMCPU_IS_6358()) {
                reset_reg = PERF_SOFTRESET_6358_REG;
                bcm63xx_reset_bits = bcm6358_reset_bits;
+       } else if (BCMCPU_IS_6362()) {
+               reset_reg = PERF_SOFTRESET_6362_REG;
+               bcm63xx_reset_bits = bcm6362_reset_bits;
        } else if (BCMCPU_IS_6368()) {
                reset_reg = PERF_SOFTRESET_6368_REG;
                bcm63xx_reset_bits = bcm6368_reset_bits;
@@ -182,6 +203,13 @@ static const u32 bcm63xx_reset_bits[] = {
 #define reset_reg PERF_SOFTRESET_6358_REG
 #endif
 
+#ifdef CONFIG_BCM63XX_CPU_6362
+static const u32 bcm63xx_reset_bits[] = {
+       __GEN_RESET_BITS_TABLE(6362)
+};
+#define reset_reg PERF_SOFTRESET_6362_REG
+#endif
+
 #ifdef CONFIG_BCM63XX_CPU_6368
 static const u32 bcm63xx_reset_bits[] = {
        __GEN_RESET_BITS_TABLE(6368)
index 35e18e98beb96b04151999aac8bf037129efc324..24a24445db64edd7d1388c35f3bd471e049489a9 100644 (file)
@@ -83,6 +83,9 @@ void bcm63xx_machine_reboot(void)
        case BCM6358_CPU_ID:
                perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358;
                break;
+       case BCM6362_CPU_ID:
+               perf_regs[0] = PERF_EXTIRQ_CFG_REG_6362;
+               break;
        }
 
        for (i = 0; i < 2; i++) {
@@ -126,7 +129,7 @@ static void __bcm63xx_machine_reboot(char *p)
 const char *get_system_type(void)
 {
        static char buf[128];
-       snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%04X)",
+       snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%02X)",
                 board_get_name(),
                 bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev());
        return buf;
index 156aa6143e1117646ab7b81ccf57ca623a258513..a22f06a6f7cac31e3506619371615b8f4e1c8d4b 100644 (file)
@@ -1032,9 +1032,8 @@ static int octeon_irq_gpio_map_common(struct irq_domain *d,
        if (!octeon_irq_virq_in_range(virq))
                return -EINVAL;
 
-       hw += gpiod->base_hwirq;
-       line = hw >> 6;
-       bit = hw & 63;
+       line = (hw + gpiod->base_hwirq) >> 6;
+       bit = (hw + gpiod->base_hwirq) & 63;
        if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
                return -EINVAL;
 
index 516b4428df4ecd3ac5b45a6397f8ffe4a2e070c1..4eedd481dd007bba3b25bfd733fca34cbe895730 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/io.h>
 #include <linux/leds.h>
 
+#include <asm/idle.h>
 #include <asm/processor.h>
 
 #include <cobalt.h>
index face9d26e6d5a1558c93cd149e60eb742fad8619..bac26b971c5e86342dc751bf946434a6ceead08f 100644 (file)
@@ -228,7 +228,6 @@ CONFIG_HIDRAW=y
 CONFIG_USB_HID=y
 CONFIG_USB_SUPPORT=y
 CONFIG_USB=y
-CONFIG_USB_SUSPEND=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
 CONFIG_USB_EHCI_TT_NEWSCHED=y
index 14752dde754018170930faa0aabfbd049b2a4312..e2b4ad55462f3477f6c4b5e63d54057064b92fde 100644 (file)
@@ -344,7 +344,6 @@ CONFIG_UHID=y
 CONFIG_USB_HIDDEV=y
 CONFIG_USB=y
 CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_SUSPEND=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
index b6acd2f256b682f4c3ab37774eb3ba70bcee7595..343bebc4b63b981724a17b770dc71b895d4dc225 100644 (file)
@@ -300,7 +300,6 @@ CONFIG_USB=y
 CONFIG_USB_DEVICEFS=y
 # CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_SUSPEND=y
 CONFIG_USB_OTG_WHITELIST=y
 CONFIG_USB_MON=y
 CONFIG_USB_EHCI_HCD=y
index cd732e5b4fd5f856df3f46a0a2ee83d0d7491f66..ce1d3eeeb7373373fa0f07056ff3ca39e0f0c496 100644 (file)
@@ -2,30 +2,21 @@ CONFIG_MIPS_MALTA=y
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_CPU_MIPS32_R2=y
 CONFIG_MIPS_MT_SMP=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
 CONFIG_HZ_100=y
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_LOG_BUF_SHIFT=15
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_RELAY=y
 CONFIG_NAMESPACES=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
-CONFIG_PID_NS=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_RELAY=y
 CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
 CONFIG_PCI=y
-CONFIG_PM=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
@@ -41,8 +32,6 @@ CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
 CONFIG_IP_MROUTE=y
 CONFIG_IP_PIMSM_V1=y
 CONFIG_IP_PIMSM_V2=y
@@ -65,7 +54,6 @@ CONFIG_IPV6_MROUTE=y
 CONFIG_IPV6_PIMSM_V2=y
 CONFIG_NETWORK_SECMARK=y
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -136,23 +124,15 @@ CONFIG_IP_VS_DH=m
 CONFIG_IP_VS_SH=m
 CONFIG_IP_VS_SED=m
 CONFIG_IP_VS_NQ=m
-CONFIG_IP_VS_FTP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -162,8 +142,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
-CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
 CONFIG_IP6_NF_MATCH_FRAG=m
@@ -173,7 +151,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
@@ -247,12 +224,10 @@ CONFIG_MAC80211=m
 CONFIG_MAC80211_RC_PID=y
 CONFIG_MAC80211_RC_DEFAULT_PID=y
 CONFIG_MAC80211_MESH=y
-CONFIG_MAC80211_LEDS=y
 CONFIG_RFKILL=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_CONNECTOR=m
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_OOPS=m
@@ -271,7 +246,6 @@ CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
-# CONFIG_MISC_DEVICES is not set
 CONFIG_IDE=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_IDE_GENERIC=y
@@ -317,13 +291,19 @@ CONFIG_DM_MIRROR=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_NETDEVICES=y
-CONFIG_IFB=m
-CONFIG_DUMMY=m
 CONFIG_BONDING=m
-CONFIG_MACVLAN=m
+CONFIG_DUMMY=m
 CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
 CONFIG_MARVELL_PHY=m
 CONFIG_DAVICOM_PHY=m
 CONFIG_QSEMI_PHY=m
@@ -334,14 +314,6 @@ CONFIG_SMSC_PHY=m
 CONFIG_BROADCOM_PHY=m
 CONFIG_ICPLUS_PHY=m
 CONFIG_REALTEK_PHY=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_NET_ETHERNET=y
-CONFIG_AX88796=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-CONFIG_TC35815=m
-CONFIG_CHELSIO_T3=m
-CONFIG_NETXEN_NIC=m
 CONFIG_ATMEL=m
 CONFIG_PCI_ATMEL=m
 CONFIG_PRISM54=m
@@ -352,15 +324,7 @@ CONFIG_HOSTAP_PLX=m
 CONFIG_HOSTAP_PCI=m
 CONFIG_IPW2100=m
 CONFIG_IPW2100_MONITOR=y
-CONFIG_IPW2200=m
-CONFIG_IPW2200_MONITOR=y
-CONFIG_IPW2200_PROMISCUOUS=y
-CONFIG_IPW2200_QOS=y
 CONFIG_LIBERTAS=m
-CONFIG_HERMES=m
-CONFIG_PLX_HERMES=m
-CONFIG_TMD_HERMES=m
-CONFIG_NORTEL_HERMES=m
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO_I8042 is not set
@@ -373,12 +337,6 @@ CONFIG_FB_CIRRUS=y
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_HID=m
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGER_TIMER=m
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_BACKLIGHT=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_CMOS=y
 CONFIG_UIO=m
@@ -398,7 +356,6 @@ CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
 CONFIG_QUOTA=y
 CONFIG_QFMT_V2=y
-CONFIG_AUTOFS_FS=y
 CONFIG_FUSE_FS=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -425,7 +382,6 @@ CONFIG_ROMFS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
@@ -466,7 +422,6 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_LRW=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
new file mode 100644 (file)
index 0000000..341bb47
--- /dev/null
@@ -0,0 +1,456 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_MIPS_DYN_TRANS=y
+CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
+CONFIG_VHOST_NET=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
new file mode 100644 (file)
index 0000000..2b8558b
--- /dev/null
@@ -0,0 +1,453 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_KVM_GUEST=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
new file mode 100644 (file)
index 0000000..93057a7
--- /dev/null
@@ -0,0 +1,195 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_VPE_LOADER=y
+CONFIG_MIPS_VPE_APSP_API=y
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="aprp"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmtc_defconfig b/arch/mips/configs/maltasmtc_defconfig
new file mode 100644 (file)
index 0000000..4e54b75
--- /dev/null
@@ -0,0 +1,196 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_MT_SMTC=y
+# CONFIG_MIPS_MT_FPAFF is not set
+CONFIG_NR_CPUS=9
+CONFIG_HZ_48=y
+CONFIG_LOCALVERSION="smtc"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
new file mode 100644 (file)
index 0000000..8a66602
--- /dev/null
@@ -0,0 +1,199 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_SCHED_SMT=y
+CONFIG_MIPS_CMP=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="cmp"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
new file mode 100644 (file)
index 0000000..9868fc9
--- /dev/null
@@ -0,0 +1,194 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="up"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
index e3eec68d91326fe9784304b32cb988b5f7e62059..0abe681c11a07abdf4945aa46b8beeeac310fa65 100644 (file)
@@ -2,7 +2,6 @@ CONFIG_MIPS_SEAD3=y
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_CPU_MIPS32_R2=y
 CONFIG_HZ_100=y
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_NO_HZ=y
@@ -115,10 +114,8 @@ CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_ISO8859_15=y
 CONFIG_NLS_UTF8=y
 # CONFIG_FTRACE is not set
-CONFIG_CRYPTO=y
 CONFIG_CRYPTO_CBC=y
 CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_ARC4=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 # CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3micro_defconfig b/arch/mips/configs/sead3micro_defconfig
new file mode 100644 (file)
index 0000000..2a0da5b
--- /dev/null
@@ -0,0 +1,122 @@
+CONFIG_MIPS_SEAD3=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MICROMIPS=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_SMSC911X=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_ICPLUS_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_CONSOLE_TRANSLATIONS is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=32
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_SPI=y
+CONFIG_SENSORS_ADT7475=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_DEBUG=y
+CONFIG_MMC_SPI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_M41T80=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
index 84befc968fc4a3aee396b6220b00525cac141d0a..5291505167774d3da7000092e26512c7e29e454d 100644 (file)
@@ -2,4 +2,6 @@
 # Makefile for generic prom monitor library routines under Linux.
 #
 
+lib-y                  += cmdline.o
+
 lib-$(CONFIG_64BIT)    += call_o32.o
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
new file mode 100644 (file)
index 0000000..ffd0345
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/addrspace.h>
+#include <asm/fw/fw.h>
+
+int fw_argc;
+int *_fw_argv;
+int *_fw_envp;
+
+void __init fw_init_cmdline(void)
+{
+       int i;
+
+       /* Validate command line parameters. */
+       if ((fw_arg0 >= CKSEG0) || (fw_arg1 < CKSEG0)) {
+               fw_argc = 0;
+               _fw_argv = NULL;
+       } else {
+               fw_argc = (fw_arg0 & 0x0000ffff);
+               _fw_argv = (int *)fw_arg1;
+       }
+
+       /* Validate environment pointer. */
+       if (fw_arg2 < CKSEG0)
+               _fw_envp = NULL;
+       else
+               _fw_envp = (int *)fw_arg2;
+
+       for (i = 1; i < fw_argc; i++) {
+               strlcat(arcs_cmdline, fw_argv(i), COMMAND_LINE_SIZE);
+               if (i < (fw_argc - 1))
+                       strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+       }
+}
+
+char * __init fw_getcmdline(void)
+{
+       return &(arcs_cmdline[0]);
+}
+
+char *fw_getenv(char *envname)
+{
+       char *result = NULL;
+
+       if (_fw_envp != NULL) {
+               /*
+                * Return a pointer to the given environment variable.
+                * YAMON uses "name", "value" pairs, while U-Boot uses
+                * "name=value".
+                */
+               int i, yamon, index = 0;
+
+               yamon = (strchr(fw_envp(index), '=') == NULL);
+               i = strlen(envname);
+
+               while (fw_envp(index)) {
+                       if (strncmp(envname, fw_envp(index), i) == 0) {
+                               if (yamon) {
+                                       result = fw_envp(index + 1);
+                                       break;
+                               } else if (fw_envp(index)[i] == '=') {
+                                       result = (fw_envp(index + 1) + i);
+                                       break;
+                               }
+                       }
+
+                       /* Increment array index. */
+                       if (yamon)
+                               index += 2;
+                       else
+                               index += 1;
+               }
+       }
+
+       return result;
+}
+
+unsigned long fw_getenvl(char *envname)
+{
+       unsigned long envl = 0UL;
+       char *str;
+       long val;
+       int tmp;
+
+       str = fw_getenv(envname);
+       if (str) {
+               tmp = kstrtol(str, 0, &val);
+               envl = (unsigned long)val;
+       }
+
+       return envl;
+}
index 164a21e65b421de9b7265a1bd403a3895e8092c7..879691d194af426f5532d47c4e467491bf785038 100644 (file)
@@ -296,6 +296,7 @@ symbol              =       value
 #define LONG_SUBU      subu
 #define LONG_L         lw
 #define LONG_S         sw
+#define LONG_SP                swp
 #define LONG_SLL       sll
 #define LONG_SLLV      sllv
 #define LONG_SRL       srl
@@ -318,6 +319,7 @@ symbol              =       value
 #define LONG_SUBU      dsubu
 #define LONG_L         ld
 #define LONG_S         sd
+#define LONG_SP                sdp
 #define LONG_SLL       dsll
 #define LONG_SLLV      dsllv
 #define LONG_SRL       dsrl
index b71dd5b160854a0e8f7fb54e4845af0ba535d748..4d2cdea5aa37f46e05e1e06e7f8d378bf9331c13 100644 (file)
@@ -104,6 +104,7 @@ struct boot_mem_map {
 extern struct boot_mem_map boot_mem_map;
 
 extern void add_memory_region(phys_t start, phys_t size, long type);
+extern void detect_memory_region(phys_t start, phys_t sz_min,  phys_t sz_max);
 
 extern void prom_init(void);
 extern void prom_free_prom_memory(void);
index 888766ae1f8598f1ff98373c6dbc7a6658307fc8..e28a3e0eb3cb6407b4ad6d6ac649e20a876ab69d 100644 (file)
 #include <asm/ptrace.h>
 #include <asm/inst.h>
 
+extern int __isa_exception_epc(struct pt_regs *regs);
+extern int __compute_return_epc(struct pt_regs *regs);
+extern int __compute_return_epc_for_insn(struct pt_regs *regs,
+                                        union mips_instruction insn);
+extern int __microMIPS_compute_return_epc(struct pt_regs *regs);
+extern int __MIPS16e_compute_return_epc(struct pt_regs *regs);
+
+
 static inline int delay_slot(struct pt_regs *regs)
 {
        return regs->cp0_cause & CAUSEF_BD;
@@ -18,20 +26,27 @@ static inline int delay_slot(struct pt_regs *regs)
 
 static inline unsigned long exception_epc(struct pt_regs *regs)
 {
-       if (!delay_slot(regs))
+       if (likely(!delay_slot(regs)))
                return regs->cp0_epc;
 
+       if (get_isa16_mode(regs->cp0_epc))
+               return __isa_exception_epc(regs);
+
        return regs->cp0_epc + 4;
 }
 
 #define BRANCH_LIKELY_TAKEN 0x0001
 
-extern int __compute_return_epc(struct pt_regs *regs);
-extern int __compute_return_epc_for_insn(struct pt_regs *regs,
-                                        union mips_instruction insn);
-
 static inline int compute_return_epc(struct pt_regs *regs)
 {
+       if (get_isa16_mode(regs->cp0_epc)) {
+               if (cpu_has_mmips)
+                       return __microMIPS_compute_return_epc(regs);
+               if (cpu_has_mips16)
+                       return __MIPS16e_compute_return_epc(regs);
+               return regs->cp0_epc;
+       }
+
        if (!delay_slot(regs)) {
                regs->cp0_epc += 4;
                return 0;
@@ -40,4 +55,19 @@ static inline int compute_return_epc(struct pt_regs *regs)
        return __compute_return_epc(regs);
 }
 
+static inline int MIPS16e_compute_return_epc(struct pt_regs *regs,
+                                            union mips16e_instruction *inst)
+{
+       if (likely(!delay_slot(regs))) {
+               if (inst->ri.opcode == MIPS16e_extend_op) {
+                       regs->cp0_epc += 4;
+                       return 0;
+               }
+               regs->cp0_epc += 2;
+               return 0;
+       }
+
+       return __MIPS16e_compute_return_epc(regs);
+}
+
 #endif /* _ASM_BRANCH_H */
index c9456e7a7283dfa15b507f021bb3f8f6696a45ca..778e32d817bc7da48056167edb4e137547ca9ef4 100644 (file)
@@ -6,8 +6,6 @@
 #include <linux/seq_file.h>
 #include <linux/clk.h>
 
-extern void (*cpu_wait) (void);
-
 struct clk;
 
 struct clk_ops {
index 1a57e8b4d0924b488d0e72b6c79dcb6516ffe53f..e5ec8fcd8afaf9d822167605a8f4aaed22e9486f 100644 (file)
 #ifndef cpu_has_pindexed_dcache
 #define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
 #endif
+#ifndef cpu_has_local_ebase
+#define cpu_has_local_ebase    1
+#endif
 
 /*
  * I-Cache snoops remote store.         This only matters on SMP.  Some multiprocessors
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
new file mode 100644 (file)
index 0000000..242cbb3
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
+ *
+ */
+#ifndef __ASM_DMA_COHERENCE_H
+#define __ASM_DMA_COHERENCE_H
+
+extern int coherentio;
+extern int hw_coherentio;
+
+#endif
index f8fc74b6cb47430b540d072833d62872192349d8..84238c574d5e6bff1db1db79d58a5ddbd4436422 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_DMA_MAPPING_H
 
 #include <asm/scatterlist.h>
+#include <asm/dma-coherence.h>
 #include <asm/cache.h>
 #include <asm-generic/dma-coherent.h>
 
index 3b4092705567b7b4e65dd2680febc6a5a8e540cc..2abb587d5ab40fd41aa950d1987d5f854f60e531 100644 (file)
@@ -54,6 +54,12 @@ do {                                                                 \
 extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
        unsigned long cpc);
 extern int do_dsemulret(struct pt_regs *xcp);
+extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
+                                   struct mips_fpu_struct *ctx, int has_fpu,
+                                   void *__user *fault_addr);
+int process_fpemu_return(int sig, void __user *fault_addr);
+int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+                    unsigned long *contpc);
 
 /*
  * Instruction inserted following the badinst to further tag the sequence
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
new file mode 100644 (file)
index 0000000..d6c50a7
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ */
+#ifndef __ASM_FW_H_
+#define __ASM_FW_H_
+
+#include <asm/bootinfo.h>      /* For cleaner code... */
+
+enum fw_memtypes {
+       fw_dontuse,
+       fw_code,
+       fw_free,
+};
+
+typedef struct {
+       unsigned long base;     /* Within KSEG0 */
+       unsigned int size;      /* bytes */
+       enum fw_memtypes type;  /* fw_memtypes */
+} fw_memblock_t;
+
+/* Maximum number of memory block descriptors. */
+#define FW_MAX_MEMBLOCKS       32
+
+extern int fw_argc;
+extern int *_fw_argv;
+extern int *_fw_envp;
+
+/*
+ * Most firmware like YAMON, PMON, etc. pass arguments and environment
+ * variables as 32-bit pointers. These take care of sign extension.
+ */
+#define fw_argv(index)         ((char *)(long)_fw_argv[(index)])
+#define fw_envp(index)         ((char *)(long)_fw_envp[(index)])
+
+extern void fw_init_cmdline(void);
+extern char *fw_getcmdline(void);
+extern fw_memblock_t *fw_getmdesc(void);
+extern void fw_meminit(void);
+extern char *fw_getenv(char *name);
+extern unsigned long fw_getenvl(char *name);
+extern void fw_init_early_console(char port);
+
+#endif /* __ASM_FW_H_ */
index bdc9786ab5a7bcbf278b3fe1f77a73ae49dd2767..7153b32de18e6692e2792be19a34b886d1121ba8 100644 (file)
 #define GIC_VPE_WD_COUNT0_OFS          0x0094
 #define GIC_VPE_WD_INITIAL0_OFS                0x0098
 #define GIC_VPE_COMPARE_LO_OFS         0x00a0
-#define GIC_VPE_COMPARE_HI             0x00a4
+#define GIC_VPE_COMPARE_HI_OFS         0x00a4
 
 #define GIC_VPE_EIC_SHADOW_SET_BASE    0x0100
 #define GIC_VPE_EIC_SS(intr) \
@@ -359,7 +359,11 @@ struct gic_shared_intr_map {
 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
 #define GIC_PIN_TO_VEC_OFFSET  (1)
 
-extern int gic_present;
+#include <linux/clocksource.h>
+#include <linux/irq.h>
+
+extern unsigned int gic_present;
+extern unsigned int gic_frequency;
 extern unsigned long _gic_base;
 extern unsigned int gic_irq_base;
 extern unsigned int gic_irq_flags[];
@@ -368,18 +372,20 @@ extern struct gic_shared_intr_map gic_shared_intr_map[];
 extern void gic_init(unsigned long gic_base_addr,
        unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
        unsigned int intrmap_size, unsigned int irqbase);
-
 extern void gic_clocksource_init(unsigned int);
-extern unsigned int gic_get_int(void);
+extern unsigned int gic_compare_int (void);
+extern cycle_t gic_read_count(void);
+extern cycle_t gic_read_compare(void);
+extern void gic_write_compare(cycle_t cnt);
 extern void gic_send_ipi(unsigned int intr);
 extern unsigned int plat_ipi_call_int_xlate(unsigned int);
 extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
 extern void gic_bind_eic_interrupt(int irq, int set);
 extern unsigned int gic_get_timer_pending(void);
+extern unsigned int gic_get_int(void);
 extern void gic_enable_interrupt(int irq_vec);
 extern void gic_disable_interrupt(int irq_vec);
 extern void gic_irq_ack(struct irq_data *d);
 extern void gic_finish_irq(struct irq_data *d);
 extern void gic_platform_init(int irqs, struct irq_chip *irq_controller);
-
 #endif /* _ASM_GICREGS_H */
index 44d6a5bde4a1cb9e8db930a11d1b07b23f383507..e3ee92d4dbe750c7aa05a5488f7443cdd64fb387 100644 (file)
 #ifndef _ASM_HAZARDS_H
 #define _ASM_HAZARDS_H
 
-#ifdef __ASSEMBLY__
-#define ASMMACRO(name, code...) .macro name; code; .endm
-#else
-
-#include <asm/cpu-features.h>
-
-#define ASMMACRO(name, code...)                                                \
-__asm__(".macro " #name "; " #code "; .endm");                         \
-                                                                       \
-static inline void name(void)                                          \
-{                                                                      \
-       __asm__ __volatile__ (#name);                                   \
-}
-
-/*
- * MIPS R2 instruction hazard barrier.  Needs to be called as a subroutine.
- */
-extern void mips_ihb(void);
-
-#endif
+#include <linux/stringify.h>
 
-ASMMACRO(_ssnop,
-        sll    $0, $0, 1
-       )
+#define ___ssnop                                                       \
+       sll     $0, $0, 1
 
-ASMMACRO(_ehb,
-        sll    $0, $0, 3
-       )
+#define ___ehb                                                         \
+       sll     $0, $0, 3
 
 /*
  * TLB hazards
@@ -48,24 +27,24 @@ ASMMACRO(_ehb,
  * MIPSR2 defines ehb for hazard avoidance
  */
 
-ASMMACRO(mtc0_tlbw_hazard,
-        _ehb
-       )
-ASMMACRO(tlbw_use_hazard,
-        _ehb
-       )
-ASMMACRO(tlb_probe_hazard,
-        _ehb
-       )
-ASMMACRO(irq_enable_hazard,
-        _ehb
-       )
-ASMMACRO(irq_disable_hazard,
-       _ehb
-       )
-ASMMACRO(back_to_back_c0_hazard,
-        _ehb
-       )
+#define __mtc0_tlbw_hazard                                             \
+       ___ehb
+
+#define __tlbw_use_hazard                                              \
+       ___ehb
+
+#define __tlb_probe_hazard                                             \
+       ___ehb
+
+#define __irq_enable_hazard                                            \
+       ___ehb
+
+#define __irq_disable_hazard                                           \
+       ___ehb
+
+#define __back_to_back_c0_hazard                                       \
+       ___ehb
+
 /*
  * gcc has a tradition of misscompiling the previous construct using the
  * address of a label as argument to inline assembler. Gas otoh has the
@@ -94,24 +73,42 @@ do {                                                                        \
  * These are slightly complicated by the fact that we guarantee R1 kernels to
  * run fine on R2 processors.
  */
-ASMMACRO(mtc0_tlbw_hazard,
-       _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(tlbw_use_hazard,
-       _ssnop; _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(tlb_probe_hazard,
-        _ssnop; _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(irq_enable_hazard,
-        _ssnop; _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(irq_disable_hazard,
-       _ssnop; _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(back_to_back_c0_hazard,
-        _ssnop; _ssnop; _ssnop; _ehb
-       )
+
+#define __mtc0_tlbw_hazard                                             \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __tlbw_use_hazard                                              \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __tlb_probe_hazard                                             \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __irq_enable_hazard                                            \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __irq_disable_hazard                                           \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __back_to_back_c0_hazard                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
 /*
  * gcc has a tradition of misscompiling the previous construct using the
  * address of a label as argument to inline assembler. Gas otoh has the
@@ -147,18 +144,18 @@ do {                                                                      \
  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
  */
 
-ASMMACRO(mtc0_tlbw_hazard,
-       )
-ASMMACRO(tlbw_use_hazard,
-       )
-ASMMACRO(tlb_probe_hazard,
-       )
-ASMMACRO(irq_enable_hazard,
-       )
-ASMMACRO(irq_disable_hazard,
-       )
-ASMMACRO(back_to_back_c0_hazard,
-       )
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard
+
+#define __back_to_back_c0_hazard
+
 #define instruction_hazard() do { } while (0)
 
 #elif defined(CONFIG_CPU_SB1)
@@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard,
 /*
  * Mostly like R4000 for historic reasons
  */
-ASMMACRO(mtc0_tlbw_hazard,
-       )
-ASMMACRO(tlbw_use_hazard,
-       )
-ASMMACRO(tlb_probe_hazard,
-       )
-ASMMACRO(irq_enable_hazard,
-       )
-ASMMACRO(irq_disable_hazard,
-        _ssnop; _ssnop; _ssnop
-       )
-ASMMACRO(back_to_back_c0_hazard,
-       )
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard                                           \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop
+
+#define __back_to_back_c0_hazard
+
 #define instruction_hazard() do { } while (0)
 
 #else
@@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard,
  * hazard so this is nice trick to have an optimal code for a range of
  * processors.
  */
-ASMMACRO(mtc0_tlbw_hazard,
-       nop; nop
-       )
-ASMMACRO(tlbw_use_hazard,
-       nop; nop; nop
-       )
-ASMMACRO(tlb_probe_hazard,
-        nop; nop; nop
-       )
-ASMMACRO(irq_enable_hazard,
-        _ssnop; _ssnop; _ssnop;
-       )
-ASMMACRO(irq_disable_hazard,
-       nop; nop; nop
-       )
-ASMMACRO(back_to_back_c0_hazard,
-        _ssnop; _ssnop; _ssnop;
-       )
+#define __mtc0_tlbw_hazard                                             \
+       nop;                                                            \
+       nop
+
+#define __tlbw_use_hazard                                              \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define __tlb_probe_hazard                                             \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define __irq_enable_hazard                                            \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop
+
+#define __irq_disable_hazard                                           \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define __back_to_back_c0_hazard                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop
+
 #define instruction_hazard() do { } while (0)
 
 #endif
@@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard,
 /* FPU hazards */
 
 #if defined(CONFIG_CPU_SB1)
-ASMMACRO(enable_fpu_hazard,
-        .set   push;
-        .set   mips64;
-        .set   noreorder;
-        _ssnop;
-        bnezl  $0, .+4;
-        _ssnop;
-        .set   pop
-)
-ASMMACRO(disable_fpu_hazard,
-)
+
+#define __enable_fpu_hazard                                            \
+       .set    push;                                                   \
+       .set    mips64;                                                 \
+       .set    noreorder;                                              \
+       ___ssnop;                                                       \
+       bnezl   $0, .+4;                                                \
+       ___ssnop;                                                       \
+       .set    pop
+
+#define __disable_fpu_hazard
 
 #elif defined(CONFIG_CPU_MIPSR2)
-ASMMACRO(enable_fpu_hazard,
-        _ehb
-)
-ASMMACRO(disable_fpu_hazard,
-        _ehb
-)
+
+#define __enable_fpu_hazard                                            \
+       ___ehb
+
+#define __disable_fpu_hazard                                           \
+       ___ehb
+
 #else
-ASMMACRO(enable_fpu_hazard,
-        nop; nop; nop; nop
-)
-ASMMACRO(disable_fpu_hazard,
-        _ehb
-)
+
+#define __enable_fpu_hazard                                            \
+       nop;                                                            \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define __disable_fpu_hazard                                           \
+       ___ehb
+
 #endif
 
+#ifdef __ASSEMBLY__
+
+#define _ssnop ___ssnop
+#define        _ehb ___ehb
+#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
+#define tlbw_use_hazard __tlbw_use_hazard
+#define tlb_probe_hazard __tlb_probe_hazard
+#define irq_enable_hazard __irq_enable_hazard
+#define irq_disable_hazard __irq_disable_hazard
+#define back_to_back_c0_hazard __back_to_back_c0_hazard
+#define enable_fpu_hazard __enable_fpu_hazard
+#define disable_fpu_hazard __disable_fpu_hazard
+
+#else
+
+#define _ssnop()                                                       \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(___ssnop)                                           \
+       );                                                              \
+} while (0)
+
+#define        _ehb()                                                          \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(___ehb)                                             \
+       );                                                              \
+} while (0)
+
+
+#define mtc0_tlbw_hazard()                                             \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__mtc0_tlbw_hazard)                                 \
+       );                                                              \
+} while (0)
+
+
+#define tlbw_use_hazard()                                              \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__tlbw_use_hazard)                                  \
+       );                                                              \
+} while (0)
+
+
+#define tlb_probe_hazard()                                             \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__tlb_probe_hazard)                                 \
+       );                                                              \
+} while (0)
+
+
+#define irq_enable_hazard()                                            \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__irq_enable_hazard)                                \
+       );                                                              \
+} while (0)
+
+
+#define irq_disable_hazard()                                           \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__irq_disable_hazard)                               \
+       );                                                              \
+} while (0)
+
+
+#define back_to_back_c0_hazard()                                       \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__back_to_back_c0_hazard)                           \
+       );                                                              \
+} while (0)
+
+
+#define enable_fpu_hazard()                                            \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__enable_fpu_hazard)                                \
+       );                                                              \
+} while (0)
+
+
+#define disable_fpu_hazard()                                           \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__disable_fpu_hazard)                               \
+       );                                                              \
+} while (0)
+
+/*
+ * MIPS R2 instruction hazard barrier.   Needs to be called as a subroutine.
+ */
+extern void mips_ihb(void);
+
+#endif /* __ASSEMBLY__  */
+
 #endif /* _ASM_HAZARDS_H */
diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h
new file mode 100644 (file)
index 0000000..d192158
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef __ASM_IDLE_H
+#define __ASM_IDLE_H
+
+#include <linux/linkage.h>
+
+extern void (*cpu_wait)(void);
+extern void r4k_wait(void);
+extern asmlinkage void __r4k_wait(void);
+extern void r4k_wait_irqoff(void);
+extern void __pastwait(void);
+
+static inline int using_rollback_handler(void)
+{
+       return cpu_wait == r4k_wait;
+}
+
+static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)
+{
+       return addr >= (unsigned long)r4k_wait_irqoff &&
+              addr < (unsigned long)__pastwait;
+}
+
+#endif /* __ASM_IDLE_H  */
index f1eadf764071d1727af40d22a1476a378fc5ad1d..22912f78401c2f6b2a5d8b491146a485a84f1106 100644 (file)
 
 typedef unsigned int mips_instruction;
 
+/* microMIPS instruction decode structure. Do NOT export!!! */
+struct mm_decoded_insn {
+       mips_instruction insn;
+       mips_instruction next_insn;
+       int pc_inc;
+       int next_pc_inc;
+       int micro_mips_mode;
+};
+
+/* Recode table from 16-bit register notation to 32-bit GPR. Do NOT export!!! */
+extern const int reg16to32[];
+
 #endif /* _ASM_INST_H */
index 1be13727323f7f2baf0395ee3360de5a78fcfaae..b7e59853fd33b05df930c3fb795aa03a9a6de23e 100644 (file)
@@ -118,7 +118,7 @@ static inline void set_io_port_base(unsigned long base)
  */
 static inline unsigned long virt_to_phys(volatile const void *address)
 {
-       return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET;
+       return __pa(address);
 }
 
 /*
index 9f3384c789d7bfdff5511706b0dffc3d2962c96c..45c00951888b4661ecbcf93bcc46fff33f9a9555 100644 (file)
 #ifndef __ASSEMBLY__
 
 #include <linux/compiler.h>
+#include <linux/stringify.h>
 #include <asm/hazards.h>
 
 #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
 
-__asm__(
-       "       .macro  arch_local_irq_disable\n"
+static inline void arch_local_irq_disable(void)
+{
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    noat                                            \n"
        "       di                                                      \n"
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
-
-static inline void arch_local_irq_disable(void)
-{
-       __asm__ __volatile__(
-               "arch_local_irq_disable"
-               : /* no outputs */
-               : /* no inputs */
-               : "memory");
+       : /* no outputs */
+       : /* no inputs */
+       : "memory");
 }
 
+static inline unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
 
-__asm__(
-       "       .macro  arch_local_irq_save result                      \n"
+       asm __volatile__(
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
-       "       di      \\result                                        \n"
-       "       andi    \\result, 1                                     \n"
-       "       irq_disable_hazard                                      \n"
+       "       di      %[flags]                                        \n"
+       "       andi    %[flags], 1                                     \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : [flags] "=r" (flags)
+       : /* no inputs */
+       : "memory");
 
-static inline unsigned long arch_local_irq_save(void)
-{
-       unsigned long flags;
-       asm volatile("arch_local_irq_save\t%0"
-                    : "=r" (flags)
-                    : /* no inputs */
-                    : "memory");
        return flags;
 }
 
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+       unsigned long __tmp1;
 
-__asm__(
-       "       .macro  arch_local_irq_restore flags                    \n"
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    noreorder                                       \n"
        "       .set    noat                                            \n"
@@ -69,7 +64,7 @@ __asm__(
         * Slow, but doesn't suffer from a relatively unlikely race
         * condition we're having since days 1.
         */
-       "       beqz    \\flags, 1f                                     \n"
+       "       beqz    %[flags], 1f                                    \n"
        "       di                                                      \n"
        "       ei                                                      \n"
        "1:                                                             \n"
@@ -78,33 +73,44 @@ __asm__(
         * Fast, dangerous.  Life is fun, life is good.
         */
        "       mfc0    $1, $12                                         \n"
-       "       ins     $1, \\flags, 0, 1                               \n"
+       "       ins     $1, %[flags], 0, 1                              \n"
        "       mtc0    $1, $12                                         \n"
 #endif
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
-
-static inline void arch_local_irq_restore(unsigned long flags)
-{
-       unsigned long __tmp1;
-
-       __asm__ __volatile__(
-               "arch_local_irq_restore\t%0"
-               : "=r" (__tmp1)
-               : "0" (flags)
-               : "memory");
+       : [flags] "=r" (__tmp1)
+       : "0" (flags)
+       : "memory");
 }
 
 static inline void __arch_local_irq_restore(unsigned long flags)
 {
-       unsigned long __tmp1;
-
        __asm__ __volatile__(
-               "arch_local_irq_restore\t%0"
-               : "=r" (__tmp1)
-               : "0" (flags)
-               : "memory");
+       "       .set    push                                            \n"
+       "       .set    noreorder                                       \n"
+       "       .set    noat                                            \n"
+#if defined(CONFIG_IRQ_CPU)
+       /*
+        * Slow, but doesn't suffer from a relatively unlikely race
+        * condition we're having since days 1.
+        */
+       "       beqz    %[flags], 1f                                    \n"
+       "       di                                                      \n"
+       "       ei                                                      \n"
+       "1:                                                             \n"
+#else
+       /*
+        * Fast, dangerous.  Life is fun, life is good.
+        */
+       "       mfc0    $1, $12                                         \n"
+       "       ins     $1, %[flags], 0, 1                              \n"
+       "       mtc0    $1, $12                                         \n"
+#endif
+       "       " __stringify(__irq_disable_hazard) "                   \n"
+       "       .set    pop                                             \n"
+       : [flags] "=r" (flags)
+       : "0" (flags)
+       : "memory");
 }
 #else
 /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
@@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags);
 #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
 
 
-__asm__(
-       "       .macro  arch_local_irq_enable                           \n"
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC kernel needs to do a software replay of queued
+        * IPIs, at the cost of call overhead on each local_irq_enable()
+        */
+       smtc_ipi_replay();
+#endif
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
@@ -133,45 +149,28 @@ __asm__(
        "       xori    $1,0x1e                                         \n"
        "       mtc0    $1,$12                                          \n"
 #endif
-       "       irq_enable_hazard                                       \n"
+       "       " __stringify(__irq_enable_hazard) "                    \n"
        "       .set    pop                                             \n"
-       "       .endm");
-
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC kernel needs to do a software replay of queued
-        * IPIs, at the cost of call overhead on each local_irq_enable()
-        */
-       smtc_ipi_replay();
-#endif
-       __asm__ __volatile__(
-               "arch_local_irq_enable"
-               : /* no outputs */
-               : /* no inputs */
-               : "memory");
+       : /* no outputs */
+       : /* no inputs */
+       : "memory");
 }
 
+static inline unsigned long arch_local_save_flags(void)
+{
+       unsigned long flags;
 
-__asm__(
-       "       .macro  arch_local_save_flags flags                     \n"
+       asm __volatile__(
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
 #ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    \\flags, $2, 1                                  \n"
+       "       mfc0    %[flags], $2, 1                                 \n"
 #else
-       "       mfc0    \\flags, $12                                    \n"
+       "       mfc0    %[flags], $12                                   \n"
 #endif
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : [flags] "=r" (flags));
 
-static inline unsigned long arch_local_save_flags(void)
-{
-       unsigned long flags;
-       asm volatile("arch_local_save_flags %0" : "=r" (flags));
        return flags;
 }
 
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
new file mode 100644 (file)
index 0000000..4d6fa0b
--- /dev/null
@@ -0,0 +1,663 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __MIPS_KVM_HOST_H__
+#define __MIPS_KVM_HOST_H__
+
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+
+
+#define KVM_MAX_VCPUS          1
+#define KVM_USER_MEM_SLOTS     8
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS  0
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+/* Don't support huge pages */
+#define KVM_HPAGE_GFN_SHIFT(x) 0
+
+/* We don't currently support large pages. */
+#define KVM_NR_PAGE_SIZES      1
+#define KVM_PAGES_PER_HPAGE(x) 1
+
+
+
+/* Special address that contains the comm page, used for reducing # of traps */
+#define KVM_GUEST_COMMPAGE_ADDR     0x0
+
+#define KVM_GUEST_KERNEL_MODE(vcpu)    ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
+                                       ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
+
+#define KVM_GUEST_KUSEG             0x00000000UL
+#define KVM_GUEST_KSEG0             0x40000000UL
+#define KVM_GUEST_KSEG23            0x60000000UL
+#define KVM_GUEST_KSEGX(a)          ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_CPHYSADDR(a)      ((_ACAST32_(a)) & 0x1fffffff)
+
+#define KVM_GUEST_CKSEG0ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_CKSEG1ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_CKSEG23ADDR(a)       (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+/*
+ * Map an address to a certain kernel segment
+ */
+#define KVM_GUEST_KSEG0ADDR(a)         (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_KSEG1ADDR(a)         (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_KSEG23ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+#define KVM_INVALID_PAGE            0xdeadbeef
+#define KVM_INVALID_INST            0xdeadbeef
+#define KVM_INVALID_ADDR            0xdeadbeef
+
+#define KVM_MALTA_GUEST_RTC_ADDR    0xb8000070UL
+
+#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
+#define MS_TO_NS(x) (x * 1E6L)
+
+#define CAUSEB_DC       27
+#define CAUSEF_DC       (_ULCAST_(1)   << 27)
+
+struct kvm;
+struct kvm_run;
+struct kvm_vcpu;
+struct kvm_interrupt;
+
+extern atomic_t kvm_mips_instance;
+extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+
+struct kvm_vm_stat {
+       u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+       u32 wait_exits;
+       u32 cache_exits;
+       u32 signal_exits;
+       u32 int_exits;
+       u32 cop_unusable_exits;
+       u32 tlbmod_exits;
+       u32 tlbmiss_ld_exits;
+       u32 tlbmiss_st_exits;
+       u32 addrerr_st_exits;
+       u32 addrerr_ld_exits;
+       u32 syscall_exits;
+       u32 resvd_inst_exits;
+       u32 break_inst_exits;
+       u32 flush_dcache_exits;
+       u32 halt_wakeup;
+};
+
+enum kvm_mips_exit_types {
+       WAIT_EXITS,
+       CACHE_EXITS,
+       SIGNAL_EXITS,
+       INT_EXITS,
+       COP_UNUSABLE_EXITS,
+       TLBMOD_EXITS,
+       TLBMISS_LD_EXITS,
+       TLBMISS_ST_EXITS,
+       ADDRERR_ST_EXITS,
+       ADDRERR_LD_EXITS,
+       SYSCALL_EXITS,
+       RESVD_INST_EXITS,
+       BREAK_INST_EXITS,
+       FLUSH_DCACHE_EXITS,
+       MAX_KVM_MIPS_EXIT_TYPES
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_arch {
+       /* Guest GVA->HPA page table */
+       unsigned long *guest_pmap;
+       unsigned long guest_pmap_npages;
+
+       /* Wired host TLB used for the commpage */
+       int commpage_tlb;
+};
+
+#define N_MIPS_COPROC_REGS      32
+#define N_MIPS_COPROC_SEL      8
+
+struct mips_coproc {
+       unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+       unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#endif
+};
+
+/*
+ * Coprocessor 0 register names
+ */
+#define        MIPS_CP0_TLB_INDEX          0
+#define        MIPS_CP0_TLB_RANDOM         1
+#define        MIPS_CP0_TLB_LOW            2
+#define        MIPS_CP0_TLB_LO0            2
+#define        MIPS_CP0_TLB_LO1            3
+#define        MIPS_CP0_TLB_CONTEXT    4
+#define        MIPS_CP0_TLB_PG_MASK    5
+#define        MIPS_CP0_TLB_WIRED          6
+#define        MIPS_CP0_HWRENA             7
+#define        MIPS_CP0_BAD_VADDR          8
+#define        MIPS_CP0_COUNT          9
+#define        MIPS_CP0_TLB_HI         10
+#define        MIPS_CP0_COMPARE            11
+#define        MIPS_CP0_STATUS         12
+#define        MIPS_CP0_CAUSE          13
+#define        MIPS_CP0_EXC_PC         14
+#define        MIPS_CP0_PRID               15
+#define        MIPS_CP0_CONFIG         16
+#define        MIPS_CP0_LLADDR         17
+#define        MIPS_CP0_WATCH_LO           18
+#define        MIPS_CP0_WATCH_HI           19
+#define        MIPS_CP0_TLB_XCONTEXT   20
+#define        MIPS_CP0_ECC                26
+#define        MIPS_CP0_CACHE_ERR          27
+#define        MIPS_CP0_TAG_LO         28
+#define        MIPS_CP0_TAG_HI         29
+#define        MIPS_CP0_ERROR_PC           30
+#define        MIPS_CP0_DEBUG          23
+#define        MIPS_CP0_DEPC               24
+#define        MIPS_CP0_PERFCNT            25
+#define        MIPS_CP0_ERRCTL         26
+#define        MIPS_CP0_DATA_LO            28
+#define        MIPS_CP0_DATA_HI            29
+#define        MIPS_CP0_DESAVE         31
+
+#define MIPS_CP0_CONFIG_SEL        0
+#define MIPS_CP0_CONFIG1_SEL    1
+#define MIPS_CP0_CONFIG2_SEL    2
+#define MIPS_CP0_CONFIG3_SEL    3
+
+/* Config0 register bits */
+#define CP0C0_M    31
+#define CP0C0_K23  28
+#define CP0C0_KU   25
+#define CP0C0_MDU  20
+#define CP0C0_MM   17
+#define CP0C0_BM   16
+#define CP0C0_BE   15
+#define CP0C0_AT   13
+#define CP0C0_AR   10
+#define CP0C0_MT   7
+#define CP0C0_VI   3
+#define CP0C0_K0   0
+
+/* Config1 register bits */
+#define CP0C1_M    31
+#define CP0C1_MMU  25
+#define CP0C1_IS   22
+#define CP0C1_IL   19
+#define CP0C1_IA   16
+#define CP0C1_DS   13
+#define CP0C1_DL   10
+#define CP0C1_DA   7
+#define CP0C1_C2   6
+#define CP0C1_MD   5
+#define CP0C1_PC   4
+#define CP0C1_WR   3
+#define CP0C1_CA   2
+#define CP0C1_EP   1
+#define CP0C1_FP   0
+
+/* Config2 Register bits */
+#define CP0C2_M    31
+#define CP0C2_TU   28
+#define CP0C2_TS   24
+#define CP0C2_TL   20
+#define CP0C2_TA   16
+#define CP0C2_SU   12
+#define CP0C2_SS   8
+#define CP0C2_SL   4
+#define CP0C2_SA   0
+
+/* Config3 Register bits */
+#define CP0C3_M    31
+#define CP0C3_ISA_ON_EXC 16
+#define CP0C3_ULRI  13
+#define CP0C3_DSPP 10
+#define CP0C3_LPA  7
+#define CP0C3_VEIC 6
+#define CP0C3_VInt 5
+#define CP0C3_SP   4
+#define CP0C3_MT   2
+#define CP0C3_SM   1
+#define CP0C3_TL   0
+
+/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
+#define MIPS_CONFIG0                                              \
+  ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
+
+/* Have config2, no coprocessor2 attached, no MDMX support attached,
+   no performance counters, watch registers present,
+   no code compression, EJTAG present, no FPU, no watch registers */
+#define MIPS_CONFIG1                                              \
+((1 << CP0C1_M) |                                                 \
+ (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) |            \
+ (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) |            \
+ (0 << CP0C1_FP))
+
+/* Have config3, no tertiary/secondary caches implemented */
+#define MIPS_CONFIG2                                              \
+((1 << CP0C2_M))
+
+/* No config4, no DSP ASE, no large physaddr (PABITS),
+   no external interrupt controller, no vectored interrupts,
+   no 1kb pages, no SmartMIPS ASE, no trace logic */
+#define MIPS_CONFIG3                                              \
+((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) |          \
+ (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) |        \
+ (0 << CP0C3_SM) | (0 << CP0C3_TL))
+
+/* MMU types, the first four entries have the same layout as the
+   CP0C0_MT field.  */
+enum mips_mmu_types {
+       MMU_TYPE_NONE,
+       MMU_TYPE_R4000,
+       MMU_TYPE_RESERVED,
+       MMU_TYPE_FMT,
+       MMU_TYPE_R3000,
+       MMU_TYPE_R6000,
+       MMU_TYPE_R8000
+};
+
+/*
+ * Trap codes
+ */
+#define T_INT           0      /* Interrupt pending */
+#define T_TLB_MOD       1      /* TLB modified fault */
+#define T_TLB_LD_MISS       2  /* TLB miss on load or ifetch */
+#define T_TLB_ST_MISS       3  /* TLB miss on a store */
+#define T_ADDR_ERR_LD       4  /* Address error on a load or ifetch */
+#define T_ADDR_ERR_ST       5  /* Address error on a store */
+#define T_BUS_ERR_IFETCH    6  /* Bus error on an ifetch */
+#define T_BUS_ERR_LD_ST     7  /* Bus error on a load or store */
+#define T_SYSCALL       8      /* System call */
+#define T_BREAK         9      /* Breakpoint */
+#define T_RES_INST      10     /* Reserved instruction exception */
+#define T_COP_UNUSABLE      11 /* Coprocessor unusable */
+#define T_OVFLOW        12     /* Arithmetic overflow */
+
+/*
+ * Trap definitions added for r4000 port.
+ */
+#define T_TRAP          13     /* Trap instruction */
+#define T_VCEI          14     /* Virtual coherency exception */
+#define T_FPE           15     /* Floating point exception */
+#define T_WATCH         23     /* Watch address reference */
+#define T_VCED          31     /* Virtual coherency data */
+
+/* Resume Flags */
+#define RESUME_FLAG_DR          (1<<0) /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST        (1<<1) /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_GUEST_DR         RESUME_FLAG_DR
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+enum emulation_result {
+       EMULATE_DONE,           /* no further processing */
+       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
+       EMULATE_FAIL,           /* can't emulate this instruction */
+       EMULATE_WAIT,           /* WAIT instruction */
+       EMULATE_PRIV_FAIL,
+};
+
+#define MIPS3_PG_G  0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
+#define MIPS3_PG_V  0x00000002 /* Valid */
+#define MIPS3_PG_NV 0x00000000
+#define MIPS3_PG_D  0x00000004 /* Dirty */
+
+#define mips3_paddr_to_tlbpfn(x) \
+    (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
+#define mips3_tlbpfn_to_paddr(x) \
+    ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
+
+#define MIPS3_PG_SHIFT      6
+#define MIPS3_PG_FRAME      0x3fffffc0
+
+#define VPN2_MASK           0xffffe000
+#define TLB_IS_GLOBAL(x)    (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
+#define TLB_VPN2(x)         ((x).tlb_hi & VPN2_MASK)
+#define TLB_ASID(x)         ((x).tlb_hi & ASID_MASK)
+#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
+
+struct kvm_mips_tlb {
+       long tlb_mask;
+       long tlb_hi;
+       long tlb_lo0;
+       long tlb_lo1;
+};
+
+#define KVM_MIPS_GUEST_TLB_SIZE     64
+struct kvm_vcpu_arch {
+       void *host_ebase, *guest_ebase;
+       unsigned long host_stack;
+       unsigned long host_gp;
+
+       /* Host CP0 registers used when handling exits from guest */
+       unsigned long host_cp0_badvaddr;
+       unsigned long host_cp0_cause;
+       unsigned long host_cp0_epc;
+       unsigned long host_cp0_entryhi;
+       uint32_t guest_inst;
+
+       /* GPRS */
+       unsigned long gprs[32];
+       unsigned long hi;
+       unsigned long lo;
+       unsigned long pc;
+
+       /* FPU State */
+       struct mips_fpu_struct fpu;
+
+       /* COP0 State */
+       struct mips_coproc *cop0;
+
+       /* Host KSEG0 address of the EI/DI offset */
+       void *kseg0_commpage;
+
+       u32 io_gpr;             /* GPR used as IO source/target */
+
+       /* Used to calibrate the virutal count register for the guest */
+       int32_t host_cp0_count;
+
+       /* Bitmask of exceptions that are pending */
+       unsigned long pending_exceptions;
+
+       /* Bitmask of pending exceptions to be cleared */
+       unsigned long pending_exceptions_clr;
+
+       unsigned long pending_load_cause;
+
+       /* Save/Restore the entryhi register when are are preempted/scheduled back in */
+       unsigned long preempt_entryhi;
+
+       /* S/W Based TLB for guest */
+       struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
+
+       /* Cached guest kernel/user ASIDs */
+       uint32_t guest_user_asid[NR_CPUS];
+       uint32_t guest_kernel_asid[NR_CPUS];
+       struct mm_struct guest_kernel_mm, guest_user_mm;
+
+       struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE];
+
+
+       struct hrtimer comparecount_timer;
+
+       int last_sched_cpu;
+
+       /* WAIT executed */
+       int wait;
+};
+
+
+#define kvm_read_c0_guest_index(cop0)               (cop0->reg[MIPS_CP0_TLB_INDEX][0])
+#define kvm_write_c0_guest_index(cop0, val)         (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
+#define kvm_read_c0_guest_entrylo0(cop0)            (cop0->reg[MIPS_CP0_TLB_LO0][0])
+#define kvm_read_c0_guest_entrylo1(cop0)            (cop0->reg[MIPS_CP0_TLB_LO1][0])
+#define kvm_read_c0_guest_context(cop0)             (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
+#define kvm_write_c0_guest_context(cop0, val)       (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
+#define kvm_read_c0_guest_userlocal(cop0)           (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
+#define kvm_read_c0_guest_pagemask(cop0)            (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
+#define kvm_write_c0_guest_pagemask(cop0, val)      (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
+#define kvm_read_c0_guest_wired(cop0)               (cop0->reg[MIPS_CP0_TLB_WIRED][0])
+#define kvm_write_c0_guest_wired(cop0, val)         (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
+#define kvm_read_c0_guest_badvaddr(cop0)            (cop0->reg[MIPS_CP0_BAD_VADDR][0])
+#define kvm_write_c0_guest_badvaddr(cop0, val)      (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
+#define kvm_read_c0_guest_count(cop0)               (cop0->reg[MIPS_CP0_COUNT][0])
+#define kvm_write_c0_guest_count(cop0, val)         (cop0->reg[MIPS_CP0_COUNT][0] = (val))
+#define kvm_read_c0_guest_entryhi(cop0)             (cop0->reg[MIPS_CP0_TLB_HI][0])
+#define kvm_write_c0_guest_entryhi(cop0, val)       (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
+#define kvm_read_c0_guest_compare(cop0)             (cop0->reg[MIPS_CP0_COMPARE][0])
+#define kvm_write_c0_guest_compare(cop0, val)       (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
+#define kvm_read_c0_guest_status(cop0)              (cop0->reg[MIPS_CP0_STATUS][0])
+#define kvm_write_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] = (val))
+#define kvm_read_c0_guest_intctl(cop0)              (cop0->reg[MIPS_CP0_STATUS][1])
+#define kvm_write_c0_guest_intctl(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][1] = (val))
+#define kvm_read_c0_guest_cause(cop0)               (cop0->reg[MIPS_CP0_CAUSE][0])
+#define kvm_write_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
+#define kvm_read_c0_guest_epc(cop0)                 (cop0->reg[MIPS_CP0_EXC_PC][0])
+#define kvm_write_c0_guest_epc(cop0, val)           (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
+#define kvm_read_c0_guest_prid(cop0)                (cop0->reg[MIPS_CP0_PRID][0])
+#define kvm_write_c0_guest_prid(cop0, val)          (cop0->reg[MIPS_CP0_PRID][0] = (val))
+#define kvm_read_c0_guest_ebase(cop0)               (cop0->reg[MIPS_CP0_PRID][1])
+#define kvm_write_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] = (val))
+#define kvm_read_c0_guest_config(cop0)              (cop0->reg[MIPS_CP0_CONFIG][0])
+#define kvm_read_c0_guest_config1(cop0)             (cop0->reg[MIPS_CP0_CONFIG][1])
+#define kvm_read_c0_guest_config2(cop0)             (cop0->reg[MIPS_CP0_CONFIG][2])
+#define kvm_read_c0_guest_config3(cop0)             (cop0->reg[MIPS_CP0_CONFIG][3])
+#define kvm_read_c0_guest_config7(cop0)             (cop0->reg[MIPS_CP0_CONFIG][7])
+#define kvm_write_c0_guest_config(cop0, val)        (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
+#define kvm_write_c0_guest_config1(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
+#define kvm_write_c0_guest_config2(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
+#define kvm_write_c0_guest_config3(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
+#define kvm_write_c0_guest_config7(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
+#define kvm_read_c0_guest_errorepc(cop0)            (cop0->reg[MIPS_CP0_ERROR_PC][0])
+#define kvm_write_c0_guest_errorepc(cop0, val)      (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
+
+#define kvm_set_c0_guest_status(cop0, val)          (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
+#define kvm_clear_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
+#define kvm_set_c0_guest_cause(cop0, val)           (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
+#define kvm_clear_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
+#define kvm_change_c0_guest_cause(cop0, change, val)  \
+{                                                     \
+    kvm_clear_c0_guest_cause(cop0, change);           \
+    kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
+}
+#define kvm_set_c0_guest_ebase(cop0, val)           (cop0->reg[MIPS_CP0_PRID][1] |= (val))
+#define kvm_clear_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
+#define kvm_change_c0_guest_ebase(cop0, change, val)  \
+{                                                     \
+    kvm_clear_c0_guest_ebase(cop0, change);           \
+    kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
+}
+
+
+struct kvm_mips_callbacks {
+       int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
+       int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
+       int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
+       int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
+       int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
+       int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
+       int (*handle_syscall) (struct kvm_vcpu *vcpu);
+       int (*handle_res_inst) (struct kvm_vcpu *vcpu);
+       int (*handle_break) (struct kvm_vcpu *vcpu);
+       int (*vm_init) (struct kvm *kvm);
+       int (*vcpu_init) (struct kvm_vcpu *vcpu);
+       int (*vcpu_setup) (struct kvm_vcpu *vcpu);
+        gpa_t(*gva_to_gpa) (gva_t gva);
+       void (*queue_timer_int) (struct kvm_vcpu *vcpu);
+       void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
+       void (*queue_io_int) (struct kvm_vcpu *vcpu,
+                             struct kvm_mips_interrupt *irq);
+       void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
+                               struct kvm_mips_interrupt *irq);
+       int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
+                           uint32_t cause);
+       int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
+                         uint32_t cause);
+};
+extern struct kvm_mips_callbacks *kvm_mips_callbacks;
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
+
+/* Debug: dump vcpu state */
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
+
+/* Trampoline ASM routine to start running in "Guest" context */
+extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+/* TLB handling */
+uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
+
+uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
+
+uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
+                                          struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+                                             struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+                                               struct kvm_mips_tlb *tlb,
+                                               unsigned long *hpa0,
+                                               unsigned long *hpa1);
+
+extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
+                                                   uint32_t *opc,
+                                                   struct kvm_run *run,
+                                                   struct kvm_vcpu *vcpu);
+
+extern void kvm_mips_dump_host_tlbs(void);
+extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
+extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu);
+extern void kvm_mips_flush_host_tlb(int skip_kseg0);
+extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
+extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
+
+extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
+                                    unsigned long entryhi);
+extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
+extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+                                                  unsigned long gva);
+extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+                                   struct kvm_vcpu *vcpu);
+extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu);
+extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu);
+extern void kvm_local_flush_tlb_all(void);
+extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu);
+extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
+extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
+
+/* Emulation */
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
+
+extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
+                                                  uint32_t *opc,
+                                                  struct kvm_run *run,
+                                                  struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
+                                                     uint32_t *opc,
+                                                     struct kvm_run *run,
+                                                     struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
+                                                        uint32_t *opc,
+                                                        struct kvm_run *run,
+                                                        struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
+                                                       uint32_t *opc,
+                                                       struct kvm_run *run,
+                                                       struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
+                                                        uint32_t *opc,
+                                                        struct kvm_run *run,
+                                                        struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
+                                                       uint32_t *opc,
+                                                       struct kvm_run *run,
+                                                       struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
+                                                     uint32_t *opc,
+                                                     struct kvm_run *run,
+                                                     struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
+                                               uint32_t *opc,
+                                               struct kvm_run *run,
+                                               struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+                                                        struct kvm_run *run);
+
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+                                              uint32_t *opc,
+                                              struct kvm_run *run,
+                                              struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
+                                            uint32_t *opc,
+                                            uint32_t cause,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
+                                          uint32_t *opc,
+                                          uint32_t cause,
+                                          struct kvm_run *run,
+                                          struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_store(uint32_t inst,
+                                            uint32_t cause,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_load(uint32_t inst,
+                                           uint32_t cause,
+                                           struct kvm_run *run,
+                                           struct kvm_vcpu *vcpu);
+
+/* Dynamic binary translation */
+extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+                                     struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+                                  struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
+                              struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
+                              struct kvm_vcpu *vcpu);
+
+/* Misc */
+extern void mips32_SyncICache(unsigned long addr, unsigned long size);
+extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
+extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
+
+
+#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h
deleted file mode 100644 (file)
index 8fcf8df..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef BCM63XX_CLK_H_
-#define BCM63XX_CLK_H_
-
-struct clk {
-       void            (*set)(struct clk *, int);
-       unsigned int    rate;
-       unsigned int    usage;
-       int             id;
-};
-
-#endif /* ! BCM63XX_CLK_H_ */
index cb922b9cb0e9b1e7cc96f36149a24bd8b97e70cb..336228990808e5c4aed717f21682ebd1c85045bc 100644 (file)
 #define BCM6345_CPU_ID         0x6345
 #define BCM6348_CPU_ID         0x6348
 #define BCM6358_CPU_ID         0x6358
+#define BCM6362_CPU_ID         0x6362
 #define BCM6368_CPU_ID         0x6368
 
 void __init bcm63xx_cpu_init(void);
 u16 __bcm63xx_get_cpu_id(void);
-u16 bcm63xx_get_cpu_rev(void);
+u8 bcm63xx_get_cpu_rev(void);
 unsigned int bcm63xx_get_cpu_freq(void);
 
 #ifdef CONFIG_BCM63XX_CPU_6328
@@ -86,6 +87,20 @@ unsigned int bcm63xx_get_cpu_freq(void);
 # define BCMCPU_IS_6358()      (0)
 #endif
 
+#ifdef CONFIG_BCM63XX_CPU_6362
+# ifdef bcm63xx_get_cpu_id
+#  undef bcm63xx_get_cpu_id
+#  define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id()
+#  define BCMCPU_RUNTIME_DETECT
+# else
+#  define bcm63xx_get_cpu_id() BCM6362_CPU_ID
+# endif
+# define BCMCPU_IS_6362()      (bcm63xx_get_cpu_id() == BCM6362_CPU_ID)
+#else
+# define BCMCPU_IS_6362()      (0)
+#endif
+
+
 #ifdef CONFIG_BCM63XX_CPU_6368
 # ifdef bcm63xx_get_cpu_id
 #  undef bcm63xx_get_cpu_id
@@ -405,6 +420,62 @@ enum bcm63xx_regs_set {
 #define BCM_6358_MISC_BASE             (0xdeadbeef)
 
 
+/*
+ * 6362 register sets base address
+ */
+#define BCM_6362_DSL_LMEM_BASE         (0xdeadbeef)
+#define BCM_6362_PERF_BASE             (0xb0000000)
+#define BCM_6362_TIMER_BASE            (0xb0000040)
+#define BCM_6362_WDT_BASE              (0xb000005c)
+#define BCM_6362_UART0_BASE             (0xb0000100)
+#define BCM_6362_UART1_BASE            (0xb0000120)
+#define BCM_6362_GPIO_BASE             (0xb0000080)
+#define BCM_6362_SPI_BASE              (0xb0000800)
+#define BCM_6362_HSSPI_BASE            (0xb0001000)
+#define BCM_6362_UDC0_BASE             (0xdeadbeef)
+#define BCM_6362_USBDMA_BASE           (0xb000c000)
+#define BCM_6362_OHCI0_BASE            (0xb0002600)
+#define BCM_6362_OHCI_PRIV_BASE                (0xdeadbeef)
+#define BCM_6362_USBH_PRIV_BASE                (0xb0002700)
+#define BCM_6362_USBD_BASE             (0xb0002400)
+#define BCM_6362_MPI_BASE              (0xdeadbeef)
+#define BCM_6362_PCMCIA_BASE           (0xdeadbeef)
+#define BCM_6362_PCIE_BASE             (0xb0e40000)
+#define BCM_6362_SDRAM_REGS_BASE       (0xdeadbeef)
+#define BCM_6362_DSL_BASE              (0xdeadbeef)
+#define BCM_6362_UBUS_BASE             (0xdeadbeef)
+#define BCM_6362_ENET0_BASE            (0xdeadbeef)
+#define BCM_6362_ENET1_BASE            (0xdeadbeef)
+#define BCM_6362_ENETDMA_BASE          (0xb000d800)
+#define BCM_6362_ENETDMAC_BASE         (0xb000da00)
+#define BCM_6362_ENETDMAS_BASE         (0xb000dc00)
+#define BCM_6362_ENETSW_BASE           (0xb0e00000)
+#define BCM_6362_EHCI0_BASE            (0xb0002500)
+#define BCM_6362_SDRAM_BASE            (0xdeadbeef)
+#define BCM_6362_MEMC_BASE             (0xdeadbeef)
+#define BCM_6362_DDR_BASE              (0xb0003000)
+#define BCM_6362_M2M_BASE              (0xdeadbeef)
+#define BCM_6362_ATM_BASE              (0xdeadbeef)
+#define BCM_6362_XTM_BASE              (0xb0007800)
+#define BCM_6362_XTMDMA_BASE           (0xb000b800)
+#define BCM_6362_XTMDMAC_BASE          (0xdeadbeef)
+#define BCM_6362_XTMDMAS_BASE          (0xdeadbeef)
+#define BCM_6362_PCM_BASE              (0xb000a800)
+#define BCM_6362_PCMDMA_BASE           (0xdeadbeef)
+#define BCM_6362_PCMDMAC_BASE          (0xdeadbeef)
+#define BCM_6362_PCMDMAS_BASE          (0xdeadbeef)
+#define BCM_6362_RNG_BASE              (0xdeadbeef)
+#define BCM_6362_MISC_BASE             (0xb0001800)
+
+#define BCM_6362_NAND_REG_BASE         (0xb0000200)
+#define BCM_6362_NAND_CACHE_BASE       (0xb0000600)
+#define BCM_6362_LED_BASE              (0xb0001900)
+#define BCM_6362_IPSEC_BASE            (0xb0002800)
+#define BCM_6362_IPSEC_DMA_BASE                (0xb000d000)
+#define BCM_6362_WLAN_CHIPCOMMON_BASE  (0xb0004000)
+#define BCM_6362_WLAN_D11_BASE         (0xb0005000)
+#define BCM_6362_WLAN_SHIM_BASE                (0xb0007000)
+
 /*
  * 6368 register sets base address
  */
@@ -564,6 +635,9 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
 #ifdef CONFIG_BCM63XX_CPU_6358
        __GEN_RSET(6358)
 #endif
+#ifdef CONFIG_BCM63XX_CPU_6362
+       __GEN_RSET(6362)
+#endif
 #ifdef CONFIG_BCM63XX_CPU_6368
        __GEN_RSET(6368)
 #endif
@@ -819,6 +893,71 @@ enum bcm63xx_irq {
 #define BCM_6358_EXT_IRQ2              (IRQ_INTERNAL_BASE + 27)
 #define BCM_6358_EXT_IRQ3              (IRQ_INTERNAL_BASE + 28)
 
+/*
+ * 6362 irqs
+ */
+#define BCM_6362_HIGH_IRQ_BASE         (IRQ_INTERNAL_BASE + 32)
+
+#define BCM_6362_TIMER_IRQ             (IRQ_INTERNAL_BASE + 0)
+#define BCM_6362_SPI_IRQ               (IRQ_INTERNAL_BASE + 2)
+#define BCM_6362_UART0_IRQ             (IRQ_INTERNAL_BASE + 3)
+#define BCM_6362_UART1_IRQ             (IRQ_INTERNAL_BASE + 4)
+#define BCM_6362_DSL_IRQ               (IRQ_INTERNAL_BASE + 28)
+#define BCM_6362_UDC0_IRQ              0
+#define BCM_6362_ENET0_IRQ             0
+#define BCM_6362_ENET1_IRQ             0
+#define BCM_6362_ENET_PHY_IRQ          (IRQ_INTERNAL_BASE + 14)
+#define BCM_6362_HSSPI_IRQ             (IRQ_INTERNAL_BASE + 5)
+#define BCM_6362_OHCI0_IRQ             (IRQ_INTERNAL_BASE + 9)
+#define BCM_6362_EHCI0_IRQ             (IRQ_INTERNAL_BASE + 10)
+#define BCM_6362_USBD_IRQ              (IRQ_INTERNAL_BASE + 11)
+#define BCM_6362_USBD_RXDMA0_IRQ       (IRQ_INTERNAL_BASE + 20)
+#define BCM_6362_USBD_TXDMA0_IRQ       (IRQ_INTERNAL_BASE + 21)
+#define BCM_6362_USBD_RXDMA1_IRQ       (IRQ_INTERNAL_BASE + 22)
+#define BCM_6362_USBD_TXDMA1_IRQ       (IRQ_INTERNAL_BASE + 23)
+#define BCM_6362_USBD_RXDMA2_IRQ       (IRQ_INTERNAL_BASE + 24)
+#define BCM_6362_USBD_TXDMA2_IRQ       (IRQ_INTERNAL_BASE + 25)
+#define BCM_6362_PCMCIA_IRQ            0
+#define BCM_6362_ENET0_RXDMA_IRQ       0
+#define BCM_6362_ENET0_TXDMA_IRQ       0
+#define BCM_6362_ENET1_RXDMA_IRQ       0
+#define BCM_6362_ENET1_TXDMA_IRQ       0
+#define BCM_6362_PCI_IRQ               (IRQ_INTERNAL_BASE + 30)
+#define BCM_6362_ATM_IRQ               0
+#define BCM_6362_ENETSW_RXDMA0_IRQ     (BCM_6362_HIGH_IRQ_BASE + 0)
+#define BCM_6362_ENETSW_RXDMA1_IRQ     (BCM_6362_HIGH_IRQ_BASE + 1)
+#define BCM_6362_ENETSW_RXDMA2_IRQ     (BCM_6362_HIGH_IRQ_BASE + 2)
+#define BCM_6362_ENETSW_RXDMA3_IRQ     (BCM_6362_HIGH_IRQ_BASE + 3)
+#define BCM_6362_ENETSW_TXDMA0_IRQ     0
+#define BCM_6362_ENETSW_TXDMA1_IRQ     0
+#define BCM_6362_ENETSW_TXDMA2_IRQ     0
+#define BCM_6362_ENETSW_TXDMA3_IRQ     0
+#define BCM_6362_XTM_IRQ               0
+#define BCM_6362_XTM_DMA0_IRQ          (BCM_6362_HIGH_IRQ_BASE + 12)
+
+#define BCM_6362_RING_OSC_IRQ          (IRQ_INTERNAL_BASE + 1)
+#define BCM_6362_WLAN_GPIO_IRQ         (IRQ_INTERNAL_BASE + 6)
+#define BCM_6362_WLAN_IRQ              (IRQ_INTERNAL_BASE + 7)
+#define BCM_6362_IPSEC_IRQ             (IRQ_INTERNAL_BASE + 8)
+#define BCM_6362_NAND_IRQ              (IRQ_INTERNAL_BASE + 12)
+#define BCM_6362_PCM_IRQ               (IRQ_INTERNAL_BASE + 13)
+#define BCM_6362_DG_IRQ                        (IRQ_INTERNAL_BASE + 15)
+#define BCM_6362_EPHY_ENERGY0_IRQ      (IRQ_INTERNAL_BASE + 16)
+#define BCM_6362_EPHY_ENERGY1_IRQ      (IRQ_INTERNAL_BASE + 17)
+#define BCM_6362_EPHY_ENERGY2_IRQ      (IRQ_INTERNAL_BASE + 18)
+#define BCM_6362_EPHY_ENERGY3_IRQ      (IRQ_INTERNAL_BASE + 19)
+#define BCM_6362_IPSEC_DMA0_IRQ                (IRQ_INTERNAL_BASE + 26)
+#define BCM_6362_IPSEC_DMA1_IRQ                (IRQ_INTERNAL_BASE + 27)
+#define BCM_6362_FAP0_IRQ              (IRQ_INTERNAL_BASE + 29)
+#define BCM_6362_PCM_DMA0_IRQ          (BCM_6362_HIGH_IRQ_BASE + 4)
+#define BCM_6362_PCM_DMA1_IRQ          (BCM_6362_HIGH_IRQ_BASE + 5)
+#define BCM_6362_DECT0_IRQ             (BCM_6362_HIGH_IRQ_BASE + 6)
+#define BCM_6362_DECT1_IRQ             (BCM_6362_HIGH_IRQ_BASE + 7)
+#define BCM_6362_EXT_IRQ0              (BCM_6362_HIGH_IRQ_BASE + 8)
+#define BCM_6362_EXT_IRQ1              (BCM_6362_HIGH_IRQ_BASE + 9)
+#define BCM_6362_EXT_IRQ2              (BCM_6362_HIGH_IRQ_BASE + 10)
+#define BCM_6362_EXT_IRQ3              (BCM_6362_HIGH_IRQ_BASE + 11)
+
 /*
  * 6368 irqs
  */
index b0184cf025755a49d5fc7ca6614e82c9866b4fd8..c426cabc620a1df26338693f77e0485e07402cb1 100644 (file)
@@ -71,18 +71,13 @@ static inline unsigned long bcm63xx_spireg(enum bcm63xx_regs_spi reg)
 
        return bcm63xx_regs_spi[reg];
 #else
-#ifdef CONFIG_BCM63XX_CPU_6338
-       __GEN_SPI_RSET(6338)
-#endif
-#ifdef CONFIG_BCM63XX_CPU_6348
+#if defined(CONFIG_BCM63XX_CPU_6338) || defined(CONFIG_BCM63XX_CPU_6348)
        __GEN_SPI_RSET(6348)
 #endif
-#ifdef CONFIG_BCM63XX_CPU_6358
+#if defined(CONFIG_BCM63XX_CPU_6358) || defined(CONFIG_BCM63XX_CPU_6362) || \
+       defined(CONFIG_BCM63XX_CPU_6368)
        __GEN_SPI_RSET(6358)
 #endif
-#ifdef CONFIG_BCM63XX_CPU_6368
-       __GEN_SPI_RSET(6368)
-#endif
 #endif
        return 0;
 }
index 0a9891f7580de557e702e7c4708139705b27605e..35baa1a60a64547a7594aa30e91ee7fedcc1f101 100644 (file)
@@ -17,6 +17,8 @@ static inline unsigned long bcm63xx_gpio_count(void)
                return 8;
        case BCM6345_CPU_ID:
                return 16;
+       case BCM6362_CPU_ID:
+               return 48;
        case BCM6368_CPU_ID:
                return 38;
        case BCM6348_CPU_ID:
index 81b4702f792a81693dbfc49db980276296967cff..3203fe49b34d4d55e6808c0c50f4d55a4f9f91b0 100644 (file)
@@ -10,7 +10,7 @@
 #define REV_CHIPID_SHIFT               16
 #define REV_CHIPID_MASK                        (0xffff << REV_CHIPID_SHIFT)
 #define REV_REVID_SHIFT                        0
-#define REV_REVID_MASK                 (0xffff << REV_REVID_SHIFT)
+#define REV_REVID_MASK                 (0xff << REV_REVID_SHIFT)
 
 /* Clock Control register */
 #define PERF_CKCTL_REG                 0x4
                                        CKCTL_6358_USBSU_EN |           \
                                        CKCTL_6358_EPHY_EN)
 
+#define CKCTL_6362_ADSL_QPROC_EN       (1 << 1)
+#define CKCTL_6362_ADSL_AFE_EN         (1 << 2)
+#define CKCTL_6362_ADSL_EN             (1 << 3)
+#define CKCTL_6362_MIPS_EN             (1 << 4)
+#define CKCTL_6362_WLAN_OCP_EN         (1 << 5)
+#define CKCTL_6362_SWPKT_USB_EN                (1 << 7)
+#define CKCTL_6362_SWPKT_SAR_EN                (1 << 8)
+#define CKCTL_6362_SAR_EN              (1 << 9)
+#define CKCTL_6362_ROBOSW_EN           (1 << 10)
+#define CKCTL_6362_PCM_EN              (1 << 11)
+#define CKCTL_6362_USBD_EN             (1 << 12)
+#define CKCTL_6362_USBH_EN             (1 << 13)
+#define CKCTL_6362_IPSEC_EN            (1 << 14)
+#define CKCTL_6362_SPI_EN              (1 << 15)
+#define CKCTL_6362_HSSPI_EN            (1 << 16)
+#define CKCTL_6362_PCIE_EN             (1 << 17)
+#define CKCTL_6362_FAP_EN              (1 << 18)
+#define CKCTL_6362_PHYMIPS_EN          (1 << 19)
+#define CKCTL_6362_NAND_EN             (1 << 20)
+
+#define CKCTL_6362_ALL_SAFE_EN         (CKCTL_6362_PHYMIPS_EN |        \
+                                       CKCTL_6362_ADSL_QPROC_EN |      \
+                                       CKCTL_6362_ADSL_AFE_EN |        \
+                                       CKCTL_6362_ADSL_EN |            \
+                                       CKCTL_6362_SAR_EN  |            \
+                                       CKCTL_6362_PCM_EN  |            \
+                                       CKCTL_6362_IPSEC_EN |           \
+                                       CKCTL_6362_USBD_EN |            \
+                                       CKCTL_6362_USBH_EN |            \
+                                       CKCTL_6362_ROBOSW_EN |          \
+                                       CKCTL_6362_PCIE_EN)
+
+
 #define CKCTL_6368_VDSL_QPROC_EN       (1 << 2)
 #define CKCTL_6368_VDSL_AFE_EN         (1 << 3)
 #define CKCTL_6368_VDSL_BONDING_EN     (1 << 4)
 #define PERF_IRQMASK_6345_REG          0xc
 #define PERF_IRQMASK_6348_REG          0xc
 #define PERF_IRQMASK_6358_REG          0xc
+#define PERF_IRQMASK_6362_REG          0x20
 #define PERF_IRQMASK_6368_REG          0x20
 
 /* Interrupt Status register */
 #define PERF_IRQSTAT_6345_REG          0x10
 #define PERF_IRQSTAT_6348_REG          0x10
 #define PERF_IRQSTAT_6358_REG          0x10
+#define PERF_IRQSTAT_6362_REG          0x28
 #define PERF_IRQSTAT_6368_REG          0x28
 
 /* External Interrupt Configuration register */
 #define PERF_EXTIRQ_CFG_REG_6345       0x14
 #define PERF_EXTIRQ_CFG_REG_6348       0x14
 #define PERF_EXTIRQ_CFG_REG_6358       0x14
+#define PERF_EXTIRQ_CFG_REG_6362       0x18
 #define PERF_EXTIRQ_CFG_REG_6368       0x18
 
 #define PERF_EXTIRQ_CFG_REG2_6368      0x1c
 #define PERF_SOFTRESET_REG             0x28
 #define PERF_SOFTRESET_6328_REG                0x10
 #define PERF_SOFTRESET_6358_REG                0x34
+#define PERF_SOFTRESET_6362_REG                0x10
 #define PERF_SOFTRESET_6368_REG                0x10
 
 #define SOFTRESET_6328_SPI_MASK                (1 << 0)
 #define SOFTRESET_6358_PCM_MASK                (1 << 13)
 #define SOFTRESET_6358_ADSL_MASK       (1 << 14)
 
+#define SOFTRESET_6362_SPI_MASK                (1 << 0)
+#define SOFTRESET_6362_IPSEC_MASK      (1 << 1)
+#define SOFTRESET_6362_EPHY_MASK       (1 << 2)
+#define SOFTRESET_6362_SAR_MASK                (1 << 3)
+#define SOFTRESET_6362_ENETSW_MASK     (1 << 4)
+#define SOFTRESET_6362_USBS_MASK       (1 << 5)
+#define SOFTRESET_6362_USBH_MASK       (1 << 6)
+#define SOFTRESET_6362_PCM_MASK                (1 << 7)
+#define SOFTRESET_6362_PCIE_CORE_MASK  (1 << 8)
+#define SOFTRESET_6362_PCIE_MASK       (1 << 9)
+#define SOFTRESET_6362_PCIE_EXT_MASK   (1 << 10)
+#define SOFTRESET_6362_WLAN_SHIM_MASK  (1 << 11)
+#define SOFTRESET_6362_DDR_PHY_MASK    (1 << 12)
+#define SOFTRESET_6362_FAP_MASK                (1 << 13)
+#define SOFTRESET_6362_WLAN_UBUS_MASK  (1 << 14)
+
 #define SOFTRESET_6368_SPI_MASK                (1 << 0)
 #define SOFTRESET_6368_MPI_MASK                (1 << 3)
 #define SOFTRESET_6368_EPHY_MASK       (1 << 6)
  * _REG relative to RSET_SPI
  *************************************************************************/
 
-/* BCM 6338 SPI core */
-#define SPI_6338_CMD                   0x00    /* 16-bits register */
-#define SPI_6338_INT_STATUS            0x02
-#define SPI_6338_INT_MASK_ST           0x03
-#define SPI_6338_INT_MASK              0x04
-#define SPI_6338_ST                    0x05
-#define SPI_6338_CLK_CFG               0x06
-#define SPI_6338_FILL_BYTE             0x07
-#define SPI_6338_MSG_TAIL              0x09
-#define SPI_6338_RX_TAIL               0x0b
-#define SPI_6338_MSG_CTL               0x40    /* 8-bits register */
-#define SPI_6338_MSG_CTL_WIDTH         8
-#define SPI_6338_MSG_DATA              0x41
-#define SPI_6338_MSG_DATA_SIZE         0x3f
-#define SPI_6338_RX_DATA               0x80
-#define SPI_6338_RX_DATA_SIZE          0x3f
-
-/* BCM 6348 SPI core */
+/* BCM 6338/6348 SPI core */
 #define SPI_6348_CMD                   0x00    /* 16-bits register */
 #define SPI_6348_INT_STATUS            0x02
 #define SPI_6348_INT_MASK_ST           0x03
 #define SPI_6348_RX_DATA               0x80
 #define SPI_6348_RX_DATA_SIZE          0x3f
 
-/* BCM 6358 SPI core */
+/* BCM 6358/6262/6368 SPI core */
 #define SPI_6358_MSG_CTL               0x00    /* 16-bits register */
 #define SPI_6358_MSG_CTL_WIDTH         16
 #define SPI_6358_MSG_DATA              0x02
 #define SPI_6358_MSG_TAIL              0x709
 #define SPI_6358_RX_TAIL               0x70B
 
-/* BCM 6358 SPI core */
-#define SPI_6368_MSG_CTL               0x00    /* 16-bits register */
-#define SPI_6368_MSG_CTL_WIDTH         16
-#define SPI_6368_MSG_DATA              0x02
-#define SPI_6368_MSG_DATA_SIZE         0x21e
-#define SPI_6368_RX_DATA               0x400
-#define SPI_6368_RX_DATA_SIZE          0x220
-#define SPI_6368_CMD                   0x700   /* 16-bits register */
-#define SPI_6368_INT_STATUS            0x702
-#define SPI_6368_INT_MASK_ST           0x703
-#define SPI_6368_INT_MASK              0x704
-#define SPI_6368_ST                    0x705
-#define SPI_6368_CLK_CFG               0x706
-#define SPI_6368_FILL_BYTE             0x707
-#define SPI_6368_MSG_TAIL              0x709
-#define SPI_6368_RX_TAIL               0x70B
-
 /* Shared SPI definitions */
 
 /* Message configuration */
 #define SPI_HD_W                       0x01
 #define SPI_HD_R                       0x02
 #define SPI_BYTE_CNT_SHIFT             0
-#define SPI_6338_MSG_TYPE_SHIFT                6
 #define SPI_6348_MSG_TYPE_SHIFT                6
 #define SPI_6358_MSG_TYPE_SHIFT                14
-#define SPI_6368_MSG_TYPE_SHIFT                14
 
 /* Command */
 #define SPI_CMD_NOOP                   0x00
 /*************************************************************************
  * _REG relative to RSET_MISC
  *************************************************************************/
-#define MISC_SERDES_CTRL_REG           0x0
+#define MISC_SERDES_CTRL_6328_REG      0x0
+#define MISC_SERDES_CTRL_6362_REG      0x4
 #define SERDES_PCIE_EN                 (1 << 0)
 #define SERDES_PCIE_EXD_EN             (1 << 15)
 
+#define MISC_STRAPBUS_6362_REG         0x14
+#define STRAPBUS_6362_FCVO_SHIFT       1
+#define STRAPBUS_6362_HSSPI_CLK_FAST   (1 << 13)
+#define STRAPBUS_6362_FCVO_MASK                (0x1f << STRAPBUS_6362_FCVO_SHIFT)
+#define STRAPBUS_6362_BOOT_SEL_SERIAL  (1 << 15)
+#define STRAPBUS_6362_BOOT_SEL_NAND    (0 << 15)
+
 #define MISC_STRAPBUS_6328_REG         0x240
 #define STRAPBUS_6328_FCVO_SHIFT       7
 #define STRAPBUS_6328_FCVO_MASK                (0x1f << STRAPBUS_6328_FCVO_SHIFT)
index 30931c42379d95841ff667bc2036e1f4939dfe51..94e3011ba7df99b55e5c6657e218a5cccc25e1e4 100644 (file)
@@ -19,6 +19,7 @@ static inline int is_bcm63xx_internal_registers(phys_t offset)
                        return 1;
                break;
        case BCM6328_CPU_ID:
+       case BCM6362_CPU_ID:
        case BCM6368_CPU_ID:
                if (offset >= 0xb0000000 && offset < 0xb1000000)
                        return 1;
index 9c95177f7a7e04bde7771cf5b7c8204753a57e9b..fe23034aaf721497af158ee2deec840038478dcf 100644 (file)
@@ -61,9 +61,8 @@ static inline int plat_device_is_coherent(struct device *dev)
 {
 #ifdef CONFIG_DMA_COHERENT
        return 1;
-#endif
-#ifdef CONFIG_DMA_NONCOHERENT
-       return 0;
+#else
+       return coherentio;
 #endif
 }
 
index 73d717a75cb0a1038e2f10009d67e657e23ab429..5b2f2e68e57f08210be7d4a98370cb3895111adf 100644 (file)
 #endif
 
 #ifdef CONFIG_32BIT
-
+#ifdef CONFIG_KVM_GUEST
+#define CAC_BASE               _AC(0x40000000, UL)
+#else
 #define CAC_BASE               _AC(0x80000000, UL)
+#endif
 #define IO_BASE                        _AC(0xa0000000, UL)
 #define UNCAC_BASE             _AC(0xa0000000, UL)
 
 #ifndef MAP_BASE
+#ifdef CONFIG_KVM_GUEST
+#define MAP_BASE               _AC(0x60000000, UL)
+#else
 #define MAP_BASE               _AC(0xc0000000, UL)
 #endif
+#endif
 
 /*
  * Memory above this physical address will be considered highmem.
index 75fd8c0f986eaff51a02ce2807a5c92442e657be..c0f3ef45c2c14f0e658feee183eabc6f8141ea9f 100644 (file)
@@ -57,5 +57,6 @@
 #define cpu_has_vint           0
 #define cpu_has_vtag_icache    0
 #define cpu_has_watch          1
+#define cpu_has_local_ebase    0
 
 #endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
new file mode 100644 (file)
index 0000000..9809972
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _MT7620_REGS_H_
+#define _MT7620_REGS_H_
+
+#define MT7620_SYSC_BASE               0x10000000
+
+#define SYSC_REG_CHIP_NAME0            0x00
+#define SYSC_REG_CHIP_NAME1            0x04
+#define SYSC_REG_CHIP_REV              0x0c
+#define SYSC_REG_SYSTEM_CONFIG0                0x10
+#define SYSC_REG_SYSTEM_CONFIG1                0x14
+#define SYSC_REG_CPLL_CONFIG0          0x54
+#define SYSC_REG_CPLL_CONFIG1          0x58
+
+#define MT7620N_CHIP_NAME0             0x33365452
+#define MT7620N_CHIP_NAME1             0x20203235
+
+#define MT7620A_CHIP_NAME0             0x3637544d
+#define MT7620A_CHIP_NAME1             0x20203032
+
+#define CHIP_REV_PKG_MASK              0x1
+#define CHIP_REV_PKG_SHIFT             16
+#define CHIP_REV_VER_MASK              0xf
+#define CHIP_REV_VER_SHIFT             8
+#define CHIP_REV_ECO_MASK              0xf
+
+#define CPLL_SW_CONFIG_SHIFT           31
+#define CPLL_SW_CONFIG_MASK            0x1
+#define CPLL_CPU_CLK_SHIFT             24
+#define CPLL_CPU_CLK_MASK              0x1
+#define CPLL_MULT_RATIO_SHIFT           16
+#define CPLL_MULT_RATIO                 0x7
+#define CPLL_DIV_RATIO_SHIFT            10
+#define CPLL_DIV_RATIO                  0x3
+
+#define SYSCFG0_DRAM_TYPE_MASK         0x3
+#define SYSCFG0_DRAM_TYPE_SHIFT                4
+#define SYSCFG0_DRAM_TYPE_SDRAM                0
+#define SYSCFG0_DRAM_TYPE_DDR1         1
+#define SYSCFG0_DRAM_TYPE_DDR2         2
+
+#define MT7620_DRAM_BASE               0x0
+#define MT7620_SDRAM_SIZE_MIN          2
+#define MT7620_SDRAM_SIZE_MAX          64
+#define MT7620_DDR1_SIZE_MIN           32
+#define MT7620_DDR1_SIZE_MAX           128
+#define MT7620_DDR2_SIZE_MIN           32
+#define MT7620_DDR2_SIZE_MAX           256
+
+#define MT7620_GPIO_MODE_I2C           BIT(0)
+#define MT7620_GPIO_MODE_UART0_SHIFT   2
+#define MT7620_GPIO_MODE_UART0_MASK    0x7
+#define MT7620_GPIO_MODE_UART0(x)      ((x) << MT7620_GPIO_MODE_UART0_SHIFT)
+#define MT7620_GPIO_MODE_UARTF         0x0
+#define MT7620_GPIO_MODE_PCM_UARTF     0x1
+#define MT7620_GPIO_MODE_PCM_I2S       0x2
+#define MT7620_GPIO_MODE_I2S_UARTF     0x3
+#define MT7620_GPIO_MODE_PCM_GPIO      0x4
+#define MT7620_GPIO_MODE_GPIO_UARTF    0x5
+#define MT7620_GPIO_MODE_GPIO_I2S      0x6
+#define MT7620_GPIO_MODE_GPIO          0x7
+#define MT7620_GPIO_MODE_UART1         BIT(5)
+#define MT7620_GPIO_MODE_MDIO          BIT(8)
+#define MT7620_GPIO_MODE_RGMII1                BIT(9)
+#define MT7620_GPIO_MODE_RGMII2                BIT(10)
+#define MT7620_GPIO_MODE_SPI           BIT(11)
+#define MT7620_GPIO_MODE_SPI_REF_CLK   BIT(12)
+#define MT7620_GPIO_MODE_WLED          BIT(13)
+#define MT7620_GPIO_MODE_JTAG          BIT(15)
+#define MT7620_GPIO_MODE_EPHY          BIT(15)
+#define MT7620_GPIO_MODE_WDT           BIT(22)
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h
new file mode 100644 (file)
index 0000000..03ad716
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _RT288X_REGS_H_
+#define _RT288X_REGS_H_
+
+#define RT2880_SYSC_BASE               0x00300000
+
+#define SYSC_REG_CHIP_NAME0            0x00
+#define SYSC_REG_CHIP_NAME1            0x04
+#define SYSC_REG_CHIP_ID               0x0c
+#define SYSC_REG_SYSTEM_CONFIG         0x10
+#define SYSC_REG_CLKCFG                        0x30
+
+#define RT2880_CHIP_NAME0              0x38325452
+#define RT2880_CHIP_NAME1              0x20203038
+
+#define CHIP_ID_ID_MASK                        0xff
+#define CHIP_ID_ID_SHIFT               8
+#define CHIP_ID_REV_MASK               0xff
+
+#define SYSTEM_CONFIG_CPUCLK_SHIFT     20
+#define SYSTEM_CONFIG_CPUCLK_MASK      0x3
+#define SYSTEM_CONFIG_CPUCLK_250       0x0
+#define SYSTEM_CONFIG_CPUCLK_266       0x1
+#define SYSTEM_CONFIG_CPUCLK_280       0x2
+#define SYSTEM_CONFIG_CPUCLK_300       0x3
+
+#define RT2880_GPIO_MODE_I2C           BIT(0)
+#define RT2880_GPIO_MODE_UART0         BIT(1)
+#define RT2880_GPIO_MODE_SPI           BIT(2)
+#define RT2880_GPIO_MODE_UART1         BIT(3)
+#define RT2880_GPIO_MODE_JTAG          BIT(4)
+#define RT2880_GPIO_MODE_MDIO          BIT(5)
+#define RT2880_GPIO_MODE_SDRAM         BIT(6)
+#define RT2880_GPIO_MODE_PCI           BIT(7)
+
+#define CLKCFG_SRAM_CS_N_WDT           BIT(9)
+
+#define RT2880_SDRAM_BASE              0x08000000
+#define RT2880_MEM_SIZE_MIN            2
+#define RT2880_MEM_SIZE_MAX            128
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..72fc106
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Ralink RT288x specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *     Copyright (C) 2003, 2004 Ralf Baechle
+ *     Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT288X_CPU_FEATURE_OVERRIDES_H
+#define _RT288X_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb            1
+#define cpu_has_4kex           1
+#define cpu_has_3k_cache       0
+#define cpu_has_4k_cache       1
+#define cpu_has_tx39_cache     0
+#define cpu_has_sb1_cache      0
+#define cpu_has_fpu            0
+#define cpu_has_32fpr          0
+#define cpu_has_counter                1
+#define cpu_has_watch          1
+#define cpu_has_divec          1
+
+#define cpu_has_prefetch       1
+#define cpu_has_ejtag          1
+#define cpu_has_llsc           1
+
+#define cpu_has_mips16         1
+#define cpu_has_mdmx           0
+#define cpu_has_mips3d         0
+#define cpu_has_smartmips      0
+
+#define cpu_has_mips32r1       1
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#define cpu_has_dsp            0
+#define cpu_has_mipsmt         0
+
+#define cpu_has_64bits         0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs  0
+#define cpu_has_64bit_addresses        0
+
+#define cpu_dcache_line_size() 16
+#define cpu_icache_line_size() 16
+
+#endif /* _RT288X_CPU_FEATURE_OVERRIDES_H */
index 7d344f2d7d0addb88148b1be703aa25a9c676452..069bf37a6010635ec026b0eae0bcb1766813460f 100644 (file)
@@ -97,6 +97,14 @@ static inline int soc_is_rt5350(void)
 #define RT5350_SYSCFG0_CPUCLK_320      0x2
 #define RT5350_SYSCFG0_CPUCLK_300      0x3
 
+#define RT5350_SYSCFG0_DRAM_SIZE_SHIFT  12
+#define RT5350_SYSCFG0_DRAM_SIZE_MASK   7
+#define RT5350_SYSCFG0_DRAM_SIZE_2M     0
+#define RT5350_SYSCFG0_DRAM_SIZE_8M     1
+#define RT5350_SYSCFG0_DRAM_SIZE_16M    2
+#define RT5350_SYSCFG0_DRAM_SIZE_32M    3
+#define RT5350_SYSCFG0_DRAM_SIZE_64M    4
+
 /* multi function gpio pins */
 #define RT305X_GPIO_I2C_SD             1
 #define RT305X_GPIO_I2C_SCLK           2
@@ -136,4 +144,23 @@ static inline int soc_is_rt5350(void)
 #define RT305X_GPIO_MODE_SDRAM         BIT(8)
 #define RT305X_GPIO_MODE_RGMII         BIT(9)
 
+#define RT3352_SYSC_REG_SYSCFG0                0x010
+#define RT3352_SYSC_REG_SYSCFG1         0x014
+#define RT3352_SYSC_REG_CLKCFG1         0x030
+#define RT3352_SYSC_REG_RSTCTRL         0x034
+#define RT3352_SYSC_REG_USB_PS          0x05c
+
+#define RT3352_CLKCFG0_XTAL_SEL                BIT(20)
+#define RT3352_CLKCFG1_UPHY0_CLK_EN    BIT(18)
+#define RT3352_CLKCFG1_UPHY1_CLK_EN    BIT(20)
+#define RT3352_RSTCTRL_UHST            BIT(22)
+#define RT3352_RSTCTRL_UDEV            BIT(25)
+#define RT3352_SYSCFG1_USB0_HOST_MODE  BIT(10)
+
+#define RT305X_SDRAM_BASE              0x00000000
+#define RT305X_MEM_SIZE_MIN            2
+#define RT305X_MEM_SIZE_MAX            64
+#define RT3352_MEM_SIZE_MIN            2
+#define RT3352_MEM_SIZE_MAX            256
+
 #endif
diff --git a/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..917c286
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Ralink RT305x specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *     Copyright (C) 2003, 2004 Ralf Baechle
+ *     Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT305X_CPU_FEATURE_OVERRIDES_H
+#define _RT305X_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb            1
+#define cpu_has_4kex           1
+#define cpu_has_3k_cache       0
+#define cpu_has_4k_cache       1
+#define cpu_has_tx39_cache     0
+#define cpu_has_sb1_cache      0
+#define cpu_has_fpu            0
+#define cpu_has_32fpr          0
+#define cpu_has_counter                1
+#define cpu_has_watch          1
+#define cpu_has_divec          1
+
+#define cpu_has_prefetch       1
+#define cpu_has_ejtag          1
+#define cpu_has_llsc           1
+
+#define cpu_has_mips16         1
+#define cpu_has_mdmx           0
+#define cpu_has_mips3d         0
+#define cpu_has_smartmips      0
+
+#define cpu_has_mips32r1       1
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#define cpu_has_dsp            1
+#define cpu_has_mipsmt         0
+
+#define cpu_has_64bits         0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs  0
+#define cpu_has_64bit_addresses        0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif /* _RT305X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h
new file mode 100644 (file)
index 0000000..058382f
--- /dev/null
@@ -0,0 +1,252 @@
+/*
+ * Ralink RT3662/RT3883 SoC register definitions
+ *
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _RT3883_REGS_H_
+#define _RT3883_REGS_H_
+
+#include <linux/bitops.h>
+
+#define RT3883_SDRAM_BASE      0x00000000
+#define RT3883_SYSC_BASE       0x10000000
+#define RT3883_TIMER_BASE      0x10000100
+#define RT3883_INTC_BASE       0x10000200
+#define RT3883_MEMC_BASE       0x10000300
+#define RT3883_UART0_BASE      0x10000500
+#define RT3883_PIO_BASE                0x10000600
+#define RT3883_FSCC_BASE       0x10000700
+#define RT3883_NANDC_BASE      0x10000810
+#define RT3883_I2C_BASE                0x10000900
+#define RT3883_I2S_BASE                0x10000a00
+#define RT3883_SPI_BASE                0x10000b00
+#define RT3883_UART1_BASE      0x10000c00
+#define RT3883_PCM_BASE                0x10002000
+#define RT3883_GDMA_BASE       0x10002800
+#define RT3883_CODEC1_BASE     0x10003000
+#define RT3883_CODEC2_BASE     0x10003800
+#define RT3883_FE_BASE         0x10100000
+#define RT3883_ROM_BASE                0x10118000
+#define RT3883_USBDEV_BASE     0x10112000
+#define RT3883_PCI_BASE                0x10140000
+#define RT3883_WLAN_BASE       0x10180000
+#define RT3883_USBHOST_BASE    0x101c0000
+#define RT3883_BOOT_BASE       0x1c000000
+#define RT3883_SRAM_BASE       0x1e000000
+#define RT3883_PCIMEM_BASE     0x20000000
+
+#define RT3883_EHCI_BASE       (RT3883_USBHOST_BASE)
+#define RT3883_OHCI_BASE       (RT3883_USBHOST_BASE + 0x1000)
+
+#define RT3883_SYSC_SIZE       0x100
+#define RT3883_TIMER_SIZE      0x100
+#define RT3883_INTC_SIZE       0x100
+#define RT3883_MEMC_SIZE       0x100
+#define RT3883_UART0_SIZE      0x100
+#define RT3883_UART1_SIZE      0x100
+#define RT3883_PIO_SIZE                0x100
+#define RT3883_FSCC_SIZE       0x100
+#define RT3883_NANDC_SIZE      0x0f0
+#define RT3883_I2C_SIZE                0x100
+#define RT3883_I2S_SIZE                0x100
+#define RT3883_SPI_SIZE                0x100
+#define RT3883_PCM_SIZE                0x800
+#define RT3883_GDMA_SIZE       0x800
+#define RT3883_CODEC1_SIZE     0x800
+#define RT3883_CODEC2_SIZE     0x800
+#define RT3883_FE_SIZE         0x10000
+#define RT3883_ROM_SIZE                0x4000
+#define RT3883_USBDEV_SIZE     0x4000
+#define RT3883_PCI_SIZE                0x40000
+#define RT3883_WLAN_SIZE       0x40000
+#define RT3883_USBHOST_SIZE    0x40000
+#define RT3883_BOOT_SIZE       (32 * 1024 * 1024)
+#define RT3883_SRAM_SIZE       (32 * 1024 * 1024)
+
+/* SYSC registers */
+#define RT3883_SYSC_REG_CHIPID0_3      0x00    /* Chip ID 0 */
+#define RT3883_SYSC_REG_CHIPID4_7      0x04    /* Chip ID 1 */
+#define RT3883_SYSC_REG_REVID          0x0c    /* Chip Revision Identification */
+#define RT3883_SYSC_REG_SYSCFG0                0x10    /* System Configuration 0 */
+#define RT3883_SYSC_REG_SYSCFG1                0x14    /* System Configuration 1 */
+#define RT3883_SYSC_REG_CLKCFG0                0x2c    /* Clock Configuration 0 */
+#define RT3883_SYSC_REG_CLKCFG1                0x30    /* Clock Configuration 1 */
+#define RT3883_SYSC_REG_RSTCTRL                0x34    /* Reset Control*/
+#define RT3883_SYSC_REG_RSTSTAT                0x38    /* Reset Status*/
+#define RT3883_SYSC_REG_USB_PS         0x5c    /* USB Power saving control */
+#define RT3883_SYSC_REG_GPIO_MODE      0x60    /* GPIO Purpose Select */
+#define RT3883_SYSC_REG_PCIE_CLK_GEN0  0x7c
+#define RT3883_SYSC_REG_PCIE_CLK_GEN1  0x80
+#define RT3883_SYSC_REG_PCIE_CLK_GEN2  0x84
+#define RT3883_SYSC_REG_PMU            0x88
+#define RT3883_SYSC_REG_PMU1           0x8c
+
+#define RT3883_CHIP_NAME0              0x38335452
+#define RT3883_CHIP_NAME1              0x20203338
+
+#define RT3883_REVID_VER_ID_MASK       0x0f
+#define RT3883_REVID_VER_ID_SHIFT      8
+#define RT3883_REVID_ECO_ID_MASK       0x0f
+
+#define RT3883_SYSCFG0_DRAM_TYPE_DDR2  BIT(17)
+#define RT3883_SYSCFG0_CPUCLK_SHIFT    8
+#define RT3883_SYSCFG0_CPUCLK_MASK     0x3
+#define RT3883_SYSCFG0_CPUCLK_250      0x0
+#define RT3883_SYSCFG0_CPUCLK_384      0x1
+#define RT3883_SYSCFG0_CPUCLK_480      0x2
+#define RT3883_SYSCFG0_CPUCLK_500      0x3
+
+#define RT3883_SYSCFG1_USB0_HOST_MODE  BIT(10)
+#define RT3883_SYSCFG1_PCIE_RC_MODE    BIT(8)
+#define RT3883_SYSCFG1_PCI_HOST_MODE   BIT(7)
+#define RT3883_SYSCFG1_PCI_66M_MODE    BIT(6)
+#define RT3883_SYSCFG1_GPIO2_AS_WDT_OUT        BIT(2)
+
+#define RT3883_CLKCFG1_PCIE_CLK_EN     BIT(21)
+#define RT3883_CLKCFG1_UPHY1_CLK_EN    BIT(20)
+#define RT3883_CLKCFG1_PCI_CLK_EN      BIT(19)
+#define RT3883_CLKCFG1_UPHY0_CLK_EN    BIT(18)
+
+#define RT3883_GPIO_MODE_I2C           BIT(0)
+#define RT3883_GPIO_MODE_SPI           BIT(1)
+#define RT3883_GPIO_MODE_UART0_SHIFT   2
+#define RT3883_GPIO_MODE_UART0_MASK    0x7
+#define RT3883_GPIO_MODE_UART0(x)      ((x) << RT3883_GPIO_MODE_UART0_SHIFT)
+#define RT3883_GPIO_MODE_UARTF         0x0
+#define RT3883_GPIO_MODE_PCM_UARTF     0x1
+#define RT3883_GPIO_MODE_PCM_I2S       0x2
+#define RT3883_GPIO_MODE_I2S_UARTF     0x3
+#define RT3883_GPIO_MODE_PCM_GPIO      0x4
+#define RT3883_GPIO_MODE_GPIO_UARTF    0x5
+#define RT3883_GPIO_MODE_GPIO_I2S      0x6
+#define RT3883_GPIO_MODE_GPIO          0x7
+#define RT3883_GPIO_MODE_UART1         BIT(5)
+#define RT3883_GPIO_MODE_JTAG          BIT(6)
+#define RT3883_GPIO_MODE_MDIO          BIT(7)
+#define RT3883_GPIO_MODE_GE1           BIT(9)
+#define RT3883_GPIO_MODE_GE2           BIT(10)
+#define RT3883_GPIO_MODE_PCI_SHIFT     11
+#define RT3883_GPIO_MODE_PCI_MASK      0x7
+#define RT3883_GPIO_MODE_PCI           (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT)
+#define RT3883_GPIO_MODE_LNA_A_SHIFT   16
+#define RT3883_GPIO_MODE_LNA_A_MASK    0x3
+#define _RT3883_GPIO_MODE_LNA_A(_x)    ((_x) << RT3883_GPIO_MODE_LNA_A_SHIFT)
+#define RT3883_GPIO_MODE_LNA_A_GPIO    0x3
+#define RT3883_GPIO_MODE_LNA_A         _RT3883_GPIO_MODE_LNA_A(RT3883_GPIO_MODE_LNA_A_MASK)
+#define RT3883_GPIO_MODE_LNA_G_SHIFT   18
+#define RT3883_GPIO_MODE_LNA_G_MASK    0x3
+#define _RT3883_GPIO_MODE_LNA_G(_x)    ((_x) << RT3883_GPIO_MODE_LNA_G_SHIFT)
+#define RT3883_GPIO_MODE_LNA_G_GPIO    0x3
+#define RT3883_GPIO_MODE_LNA_G         _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
+
+#define RT3883_GPIO_I2C_SD             1
+#define RT3883_GPIO_I2C_SCLK           2
+#define RT3883_GPIO_SPI_CS0            3
+#define RT3883_GPIO_SPI_CLK            4
+#define RT3883_GPIO_SPI_MOSI           5
+#define RT3883_GPIO_SPI_MISO           6
+#define RT3883_GPIO_7                  7
+#define RT3883_GPIO_10                 10
+#define RT3883_GPIO_11                 11
+#define RT3883_GPIO_14                 14
+#define RT3883_GPIO_UART1_TXD          15
+#define RT3883_GPIO_UART1_RXD          16
+#define RT3883_GPIO_JTAG_TDO           17
+#define RT3883_GPIO_JTAG_TDI           18
+#define RT3883_GPIO_JTAG_TMS           19
+#define RT3883_GPIO_JTAG_TCLK          20
+#define RT3883_GPIO_JTAG_TRST_N                21
+#define RT3883_GPIO_MDIO_MDC           22
+#define RT3883_GPIO_MDIO_MDIO          23
+#define RT3883_GPIO_LNA_PE_A0          32
+#define RT3883_GPIO_LNA_PE_A1          33
+#define RT3883_GPIO_LNA_PE_A2          34
+#define RT3883_GPIO_LNA_PE_G0          35
+#define RT3883_GPIO_LNA_PE_G1          36
+#define RT3883_GPIO_LNA_PE_G2          37
+#define RT3883_GPIO_PCI_AD0            40
+#define RT3883_GPIO_PCI_AD31           71
+#define RT3883_GPIO_GE2_TXD0           72
+#define RT3883_GPIO_GE2_TXD1           73
+#define RT3883_GPIO_GE2_TXD2           74
+#define RT3883_GPIO_GE2_TXD3           75
+#define RT3883_GPIO_GE2_TXEN           76
+#define RT3883_GPIO_GE2_TXCLK          77
+#define RT3883_GPIO_GE2_RXD0           78
+#define RT3883_GPIO_GE2_RXD1           79
+#define RT3883_GPIO_GE2_RXD2           80
+#define RT3883_GPIO_GE2_RXD3           81
+#define RT3883_GPIO_GE2_RXDV           82
+#define RT3883_GPIO_GE2_RXCLK          83
+#define RT3883_GPIO_GE1_TXD0           84
+#define RT3883_GPIO_GE1_TXD1           85
+#define RT3883_GPIO_GE1_TXD2           86
+#define RT3883_GPIO_GE1_TXD3           87
+#define RT3883_GPIO_GE1_TXEN           88
+#define RT3883_GPIO_GE1_TXCLK          89
+#define RT3883_GPIO_GE1_RXD0           90
+#define RT3883_GPIO_GE1_RXD1           91
+#define RT3883_GPIO_GE1_RXD2           92
+#define RT3883_GPIO_GE1_RXD3           93
+#define RT3883_GPIO_GE1_RXDV           94
+#define RT3883_GPIO_GE1_RXCLK  95
+
+#define RT3883_RSTCTRL_PCIE_PCI_PDM    BIT(27)
+#define RT3883_RSTCTRL_FLASH           BIT(26)
+#define RT3883_RSTCTRL_UDEV            BIT(25)
+#define RT3883_RSTCTRL_PCI             BIT(24)
+#define RT3883_RSTCTRL_PCIE            BIT(23)
+#define RT3883_RSTCTRL_UHST            BIT(22)
+#define RT3883_RSTCTRL_FE              BIT(21)
+#define RT3883_RSTCTRL_WLAN            BIT(20)
+#define RT3883_RSTCTRL_UART1           BIT(29)
+#define RT3883_RSTCTRL_SPI             BIT(18)
+#define RT3883_RSTCTRL_I2S             BIT(17)
+#define RT3883_RSTCTRL_I2C             BIT(16)
+#define RT3883_RSTCTRL_NAND            BIT(15)
+#define RT3883_RSTCTRL_DMA             BIT(14)
+#define RT3883_RSTCTRL_PIO             BIT(13)
+#define RT3883_RSTCTRL_UART            BIT(12)
+#define RT3883_RSTCTRL_PCM             BIT(11)
+#define RT3883_RSTCTRL_MC              BIT(10)
+#define RT3883_RSTCTRL_INTC            BIT(9)
+#define RT3883_RSTCTRL_TIMER           BIT(8)
+#define RT3883_RSTCTRL_SYS             BIT(0)
+
+#define RT3883_INTC_INT_SYSCTL BIT(0)
+#define RT3883_INTC_INT_TIMER0 BIT(1)
+#define RT3883_INTC_INT_TIMER1 BIT(2)
+#define RT3883_INTC_INT_IA     BIT(3)
+#define RT3883_INTC_INT_PCM    BIT(4)
+#define RT3883_INTC_INT_UART0  BIT(5)
+#define RT3883_INTC_INT_PIO    BIT(6)
+#define RT3883_INTC_INT_DMA    BIT(7)
+#define RT3883_INTC_INT_NAND   BIT(8)
+#define RT3883_INTC_INT_PERFC  BIT(9)
+#define RT3883_INTC_INT_I2S    BIT(10)
+#define RT3883_INTC_INT_UART1  BIT(12)
+#define RT3883_INTC_INT_UHST   BIT(18)
+#define RT3883_INTC_INT_UDEV   BIT(19)
+
+/* FLASH/SRAM/Codec Controller registers */
+#define RT3883_FSCC_REG_FLASH_CFG0     0x00
+#define RT3883_FSCC_REG_FLASH_CFG1     0x04
+#define RT3883_FSCC_REG_CODEC_CFG0     0x40
+#define RT3883_FSCC_REG_CODEC_CFG1     0x44
+
+#define RT3883_FLASH_CFG_WIDTH_SHIFT   26
+#define RT3883_FLASH_CFG_WIDTH_MASK    0x3
+#define RT3883_FLASH_CFG_WIDTH_8BIT    0x0
+#define RT3883_FLASH_CFG_WIDTH_16BIT   0x1
+#define RT3883_FLASH_CFG_WIDTH_32BIT   0x2
+
+#define RT3883_SDRAM_BASE              0x00000000
+#define RT3883_MEM_SIZE_MIN            2
+#define RT3883_MEM_SIZE_MAX            256
+
+#endif /* _RT3883_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..181fbf4
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Ralink RT3662/RT3883 specific CPU feature overrides
+ *
+ * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *     Copyright (C) 2003, 2004 Ralf Baechle
+ *     Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT3883_CPU_FEATURE_OVERRIDES_H
+#define _RT3883_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb            1
+#define cpu_has_4kex           1
+#define cpu_has_3k_cache       0
+#define cpu_has_4k_cache       1
+#define cpu_has_tx39_cache     0
+#define cpu_has_sb1_cache      0
+#define cpu_has_fpu            0
+#define cpu_has_32fpr          0
+#define cpu_has_counter                1
+#define cpu_has_watch          1
+#define cpu_has_divec          1
+
+#define cpu_has_prefetch       1
+#define cpu_has_ejtag          1
+#define cpu_has_llsc           1
+
+#define cpu_has_mips16         1
+#define cpu_has_mdmx           0
+#define cpu_has_mips3d         0
+#define cpu_has_smartmips      0
+
+#define cpu_has_mips32r1       1
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#define cpu_has_dsp            1
+#define cpu_has_mipsmt         0
+
+#define cpu_has_64bits         0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs  0
+#define cpu_has_64bit_addresses        0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif /* _RT3883_CPU_FEATURE_OVERRIDES_H */
index 193c0912d38e651519b8fb8b7bbc9fa31d7893e8..bfbd7035d4c54541787fb7de42cbafe7b1230edb 100644 (file)
 /* #define cpu_has_prefetch    ? */
 #define cpu_has_mcheck         1
 /* #define cpu_has_ejtag       ? */
+#ifdef CONFIG_CPU_MICROMIPS
+#define cpu_has_llsc           0
+#else
 #define cpu_has_llsc           1
+#endif
 /* #define cpu_has_vtag_icache ? */
 /* #define cpu_has_dc_aliases  ? */
 /* #define cpu_has_ic_fills_f_dc ? */
index 44a09a64160ac52fac8862beaaa8a9c9815f3fd2..bd9746fbe4af8a6b098df65c5f386cecec86f2e4 100644 (file)
@@ -83,4 +83,7 @@ extern void mips_pcibios_init(void);
 #define mips_pcibios_init() do { } while (0)
 #endif
 
+extern void mips_scroll_message(void);
+extern void mips_display_message(const char *str);
+
 #endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/mips-boards/prom.h b/arch/mips/include/asm/mips-boards/prom.h
deleted file mode 100644 (file)
index e7aed3e..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * ########################################################################
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- * MIPS boards bootprom interface for the Linux kernel.
- *
- */
-
-#ifndef _MIPS_PROM_H
-#define _MIPS_PROM_H
-
-extern char *prom_getcmdline(void);
-extern char *prom_getenv(char *name);
-extern void prom_init_cmdline(void);
-extern void prom_meminit(void);
-extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
-extern void mips_display_message(const char *str);
-extern void mips_display_word(unsigned int num);
-extern void mips_scroll_message(void);
-extern int get_ethernet_addr(char *ethernet_addr);
-
-/* Memory descriptor management. */
-#define PROM_MAX_PMEMBLOCKS    32
-struct prom_pmemblock {
-       unsigned long base; /* Within KSEG0. */
-       unsigned int size;  /* In bytes. */
-       unsigned int type;  /* free or prom memory */
-};
-
-#endif /* !(_MIPS_PROM_H) */
index 363bb352c7f70b4da402ed8725ce383a0f22a6e1..9d00aebe98426c5ab93c44bb0df4b09c1d552b7b 100644 (file)
@@ -42,13 +42,9 @@ extern long __mips_machines_end;
 #ifdef CONFIG_MIPS_MACHINE
 int  mips_machtype_setup(char *id) __init;
 void mips_machine_setup(void) __init;
-void mips_set_machine_name(const char *name) __init;
-char *mips_get_machine_name(void);
 #else
 static inline int mips_machtype_setup(char *id) { return 1; }
 static inline void mips_machine_setup(void) { }
-static inline void mips_set_machine_name(const char *name) { }
-static inline char *mips_get_machine_name(void) { return NULL; }
 #endif /* CONFIG_MIPS_MACHINE */
 
 #endif /* __ASM_MIPS_MACHINE_H */
index 0da44d422f5b0242380bfc0dde4973b547c1900e..87e6207b05e4334f242cadf80b01305811edd3fd 100644 (file)
 #define MIPS_CONF3_RXI         (_ULCAST_(1) << 12)
 #define MIPS_CONF3_ULRI                (_ULCAST_(1) << 13)
 #define MIPS_CONF3_ISA         (_ULCAST_(3) << 14)
+#define MIPS_CONF3_ISA_OE      (_ULCAST_(3) << 16)
 #define MIPS_CONF3_VZ          (_ULCAST_(1) << 23)
 
 #define MIPS_CONF4_MMUSIZEEXT  (_ULCAST_(255) << 0)
 
 #ifndef __ASSEMBLY__
 
+/*
+ * Macros for handling the ISA mode bit for microMIPS.
+ */
+#define get_isa16_mode(x)              ((x) & 0x1)
+#define msk_isa16_mode(x)              ((x) & ~0x1)
+#define set_isa16_mode(x)              do { (x) |= 0x1; } while(0)
+
+/*
+ * microMIPS instructions can be 16-bit or 32-bit in length. This
+ * returns a 1 if the instruction is 16-bit and a 0 if 32-bit.
+ */
+static inline int mm_insn_16bit(u16 insn)
+{
+       u16 opcode = (insn >> 10) & 0x7;
+
+       return (opcode >= 1 && opcode <= 3) ? 1 : 0;
+}
+
 /*
  * Functions to access the R10000 performance counters.         These are basically
  * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit
index e81d719efcd18e9e226d07e8d69e29b970c89280..820116067c101070c6a4c35727d1e4cfb24563ee 100644 (file)
 
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 
-#define TLBMISS_HANDLER_SETUP_PGD(pgd)                         \
-       tlbmiss_handler_setup_pgd((unsigned long)(pgd))
-
-extern void tlbmiss_handler_setup_pgd(unsigned long pgd);
+#define TLBMISS_HANDLER_SETUP_PGD(pgd)                                 \
+do {                                                                   \
+       void (*tlbmiss_handler_setup_pgd)(unsigned long);               \
+       extern u32 tlbmiss_handler_setup_pgd_array[16];                 \
+                                                                       \
+       tlbmiss_handler_setup_pgd =                                     \
+               (__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \
+       tlbmiss_handler_setup_pgd((unsigned long)(pgd));                \
+} while (0)
 
 #define TLBMISS_HANDLER_SETUP()                                                \
        do {                                                            \
@@ -106,15 +111,21 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 static inline void
 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
 {
+       extern void kvm_local_flush_tlb_all(void);
        unsigned long asid = asid_cache(cpu);
 
        if (! ((asid += ASID_INC) & ASID_MASK) ) {
                if (cpu_has_vtag_icache)
                        flush_icache_all();
+#ifdef CONFIG_VIRTUALIZATION
+               kvm_local_flush_tlb_all();      /* start new asid cycle */
+#else
                local_flush_tlb_all();  /* start new asid cycle */
+#endif
                if (!asid)              /* fix version if needed */
                        asid = ASID_FIRST_VERSION;
        }
+
        cpu_context(cpu, mm) = asid_cache(cpu) = asid;
 }
 
@@ -133,7 +144,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
        int i;
 
-       for_each_online_cpu(i)
+       for_each_possible_cpu(i)
                cpu_context(i, mm) = 0;
 
        return 0;
index 419d8aef8569333418f68f61e88144d1e0b37285..79c7cccdc22ca5b688cfe6f5bdb125f17b37a0dd 100644 (file)
 #ifndef __NLM_HAL_HALDEFS_H__
 #define __NLM_HAL_HALDEFS_H__
 
+#include <linux/irqflags.h>    /* for local_irq_disable */
+
 /*
  * This file contains platform specific memory mapped IO implementation
  * and will provide a way to read 32/64 bit memory mapped registers in
  * all ABIs
  */
-#if !defined(CONFIG_64BIT) && defined(CONFIG_CPU_XLP)
-#error "o32 compile not supported on XLP yet"
-#endif
-/*
- * For o32 compilation, we have to disable interrupts and enable KX bit to
- * access 64 bit addresses or data.
- *
- * We need to disable interrupts because we save just the lower 32 bits of
- * registers in         interrupt handling. So if we get hit by an interrupt while
- * using the upper 32 bits of a register, we lose.
- */
-static inline uint32_t nlm_save_flags_kx(void)
-{
-       return change_c0_status(ST0_KX | ST0_IE, ST0_KX);
-}
-
-static inline uint32_t nlm_save_flags_cop2(void)
-{
-       return change_c0_status(ST0_CU2 | ST0_IE, ST0_CU2);
-}
-
-static inline void nlm_restore_flags(uint32_t sr)
-{
-       write_c0_status(sr);
-}
-
-/*
- * The n64 implementations are simple, the o32 implementations when they
- * are added, will have to disable interrupts and enable KX before doing
- * 64 bit ops.
- */
 static inline uint32_t
 nlm_read_reg(uint64_t base, uint32_t reg)
 {
@@ -87,13 +58,40 @@ nlm_write_reg(uint64_t base, uint32_t reg, uint32_t val)
        *addr = val;
 }
 
+/*
+ * For o32 compilation, we have to disable interrupts to access 64 bit
+ * registers
+ *
+ * We need to disable interrupts because we save just the lower 32 bits of
+ * registers in  interrupt handling. So if we get hit by an interrupt while
+ * using the upper 32 bits of a register, we lose.
+ */
+
 static inline uint64_t
 nlm_read_reg64(uint64_t base, uint32_t reg)
 {
        uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
        volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
-
-       return *ptr;
+       uint64_t val;
+
+       if (sizeof(unsigned long) == 4) {
+               unsigned long flags;
+
+               local_irq_save(flags);
+               __asm__ __volatile__(
+                       ".set   push"                   "\n\t"
+                       ".set   mips64"                 "\n\t"
+                       "ld     %L0, %1"                "\n\t"
+                       "dsra32 %M0, %L0, 0"            "\n\t"
+                       "sll    %L0, %L0, 0"            "\n\t"
+                       ".set   pop"                    "\n"
+                       : "=r" (val)
+                       : "m" (*ptr));
+               local_irq_restore(flags);
+       } else
+               val = *ptr;
+
+       return val;
 }
 
 static inline void
@@ -102,7 +100,25 @@ nlm_write_reg64(uint64_t base, uint32_t reg, uint64_t val)
        uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
        volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
 
-       *ptr = val;
+       if (sizeof(unsigned long) == 4) {
+               unsigned long flags;
+               uint64_t tmp;
+
+               local_irq_save(flags);
+               __asm__ __volatile__(
+                       ".set   push"                   "\n\t"
+                       ".set   mips64"                 "\n\t"
+                       "dsll32 %L0, %L0, 0"            "\n\t"
+                       "dsrl32 %L0, %L0, 0"            "\n\t"
+                       "dsll32 %M0, %M0, 0"            "\n\t"
+                       "or     %L0, %L0, %M0"          "\n\t"
+                       "sd     %L0, %2"                "\n\t"
+                       ".set   pop"                    "\n"
+                       : "=r" (tmp)
+                       : "0" (val), "m" (*ptr));
+               local_irq_restore(flags);
+       } else
+               *ptr = val;
 }
 
 /*
@@ -143,14 +159,6 @@ nlm_pcicfg_base(uint32_t devoffset)
        return nlm_io_base + devoffset;
 }
 
-static inline uint64_t
-nlm_xkphys_map_pcibar0(uint64_t pcibase)
-{
-       uint64_t paddr;
-
-       paddr = nlm_read_reg(pcibase, 0x4) & ~0xfu;
-       return (uint64_t)0x9000000000000000 | paddr;
-}
 #elif defined(CONFIG_CPU_XLR)
 
 static inline uint64_t
index 8ad2e0f81719814f077659d48c46a3b4f87c578d..f299d31d7c1a3faf7eeffd22f7409cce90954cd6 100644 (file)
 /*
  * XLR and XLP interrupt request and interrupt mask registers
  */
-#define read_c0_eirr()         __read_64bit_c0_register($9, 6)
-#define read_c0_eimr()         __read_64bit_c0_register($9, 7)
-#define write_c0_eirr(val)     __write_64bit_c0_register($9, 6, val)
-
 /*
- * Writing EIMR in 32 bit is a special case, the lower 8 bit of the
- * EIMR is shadowed in the status register, so we cannot save and
- * restore status register for split read.
+ * NOTE: Do not save/restore flags around write_c0_eimr().
+ * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS
+ * register. Restoring flags will overwrite the lower 8 bits of EIMR.
+ *
+ * Call with interrupts disabled.
  */
 #define write_c0_eimr(val)                                             \
 do {                                                                   \
        if (sizeof(unsigned long) == 4) {                               \
-               unsigned long __flags;                                  \
-                                                                       \
-               local_irq_save(__flags);                                \
                __asm__ __volatile__(                                   \
                        ".set\tmips64\n\t"                              \
                        "dsll\t%L0, %L0, 32\n\t"                        \
@@ -62,8 +57,6 @@ do {                                                                  \
                        "dmtc0\t%L0, $9, 7\n\t"                         \
                        ".set\tmips0"                                   \
                        : : "r" (val));                                 \
-               __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\
-               local_irq_restore(__flags);                             \
        } else                                                          \
                __write_64bit_c0_register($9, 7, (val));                \
 } while (0)
@@ -128,7 +121,7 @@ static inline uint64_t read_c0_eirr_and_eimr(void)
        uint64_t val;
 
 #ifdef CONFIG_64BIT
-       val = read_c0_eimr() & read_c0_eirr();
+       val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7);
 #else
        __asm__ __volatile__(
                ".set   push\n\t"
@@ -143,7 +136,6 @@ static inline uint64_t read_c0_eirr_and_eimr(void)
                ".set   pop"
                : "=r" (val));
 #endif
-
        return val;
 }
 
index 3df53017fe513f29b8ee99a6ce2ff11ad78b198c..a981f4681a154a53fb6fed0bffc788be83ed24a3 100644 (file)
 #define PIC_IRT_PCIE_LINK_2_INDEX      80
 #define PIC_IRT_PCIE_LINK_3_INDEX      81
 #define PIC_IRT_PCIE_LINK_INDEX(num)   ((num) + PIC_IRT_PCIE_LINK_0_INDEX)
-/* 78 to 81 */
-#define PIC_NUM_NA_IRTS                        32
-/* 82 to 113 */
-#define PIC_IRT_NA_0_INDEX             82
-#define PIC_IRT_NA_INDEX(num)          ((num) + PIC_IRT_NA_0_INDEX)
-#define PIC_IRT_POE_INDEX              114
-
-#define PIC_NUM_USB_IRTS               6
-#define PIC_IRT_USB_0_INDEX            115
-#define PIC_IRT_EHCI_0_INDEX           115
-#define PIC_IRT_OHCI_0_INDEX           116
-#define PIC_IRT_OHCI_1_INDEX           117
-#define PIC_IRT_EHCI_1_INDEX           118
-#define PIC_IRT_OHCI_2_INDEX           119
-#define PIC_IRT_OHCI_3_INDEX           120
-#define PIC_IRT_USB_INDEX(num)         ((num) + PIC_IRT_USB_0_INDEX)
-/* 115 to 120 */
-#define PIC_IRT_GDX_INDEX              121
-#define PIC_IRT_SEC_INDEX              122
-#define PIC_IRT_RSA_INDEX              123
-
-#define PIC_NUM_COMP_IRTS              4
-#define PIC_IRT_COMP_0_INDEX           124
-#define PIC_IRT_COMP_INDEX(num)                ((num) + PIC_IRT_COMP_0_INDEX)
-/* 124 to 127 */
-#define PIC_IRT_GBU_INDEX              128
-#define PIC_IRT_ICC_0_INDEX            129 /* ICC - Inter Chip Coherency */
-#define PIC_IRT_ICC_1_INDEX            130
-#define PIC_IRT_ICC_2_INDEX            131
-#define PIC_IRT_CAM_INDEX              132
-#define PIC_IRT_UART_0_INDEX           133
-#define PIC_IRT_UART_1_INDEX           134
-#define PIC_IRT_I2C_0_INDEX            135
-#define PIC_IRT_I2C_1_INDEX            136
-#define PIC_IRT_SYS_0_INDEX            137
-#define PIC_IRT_SYS_1_INDEX            138
-#define PIC_IRT_JTAG_INDEX             139
-#define PIC_IRT_PIC_INDEX              140
-#define PIC_IRT_NBU_INDEX              141
-#define PIC_IRT_TCU_INDEX              142
-#define PIC_IRT_GCU_INDEX              143 /* GBC - Global Coherency */
-#define PIC_IRT_DMC_0_INDEX            144
-#define PIC_IRT_DMC_1_INDEX            145
-
-#define PIC_NUM_GPIO_IRTS              4
-#define PIC_IRT_GPIO_0_INDEX           146
-#define PIC_IRT_GPIO_INDEX(num)                ((num) + PIC_IRT_GPIO_0_INDEX)
-
-/* 146 to 149 */
-#define PIC_IRT_NOR_INDEX              150
-#define PIC_IRT_NAND_INDEX             151
-#define PIC_IRT_SPI_INDEX              152
-#define PIC_IRT_MMC_INDEX              153
 
 #define PIC_CLOCK_TIMER                        7
 #define PIC_IRQ_BASE                   8
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/usb.h b/arch/mips/include/asm/netlogic/xlp-hal/usb.h
deleted file mode 100644 (file)
index a9cd350..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __NLM_HAL_USB_H__
-#define __NLM_HAL_USB_H__
-
-#define USB_CTL_0                      0x01
-#define USB_PHY_0                      0x0A
-#define USB_PHY_RESET                  0x01
-#define USB_PHY_PORT_RESET_0           0x10
-#define USB_PHY_PORT_RESET_1           0x20
-#define USB_CONTROLLER_RESET           0x01
-#define USB_INT_STATUS                 0x0E
-#define USB_INT_EN                     0x0F
-#define USB_PHY_INTERRUPT_EN           0x01
-#define USB_OHCI_INTERRUPT_EN          0x02
-#define USB_OHCI_INTERRUPT1_EN         0x04
-#define USB_OHCI_INTERRUPT2_EN         0x08
-#define USB_CTRL_INTERRUPT_EN          0x10
-
-#ifndef __ASSEMBLY__
-
-#define nlm_read_usb_reg(b, r)                 nlm_read_reg(b, r)
-#define nlm_write_usb_reg(b, r, v)             nlm_write_reg(b, r, v)
-#define nlm_get_usb_pcibase(node, inst)                \
-       nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
-#define nlm_get_usb_hcd_base(node, inst)       \
-       nlm_xkphys_map_pcibar0(nlm_get_usb_pcibase(node, inst))
-#define nlm_get_usb_regbase(node, inst)                \
-       (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
-
-#endif
-#endif /* __NLM_HAL_USB_H__ */
index eab99e536b5c9137e5cd1f117fa57ca1318c6c54..f59552fae9173264ab58ffd3fbe95e080ec5db93 100644 (file)
@@ -46,7 +46,6 @@
 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 
 #include <linux/pfn.h>
-#include <asm/io.h>
 
 extern void build_clear_page(void);
 extern void build_copy_page(void);
@@ -151,6 +150,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
     ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
 #endif
 #define __va(x)                ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
+#include <asm/io.h>
 
 /*
  * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
@@ -171,14 +171,13 @@ typedef struct { unsigned long pgprot; } pgprot_t;
 
 #ifdef CONFIG_FLATMEM
 
-#define pfn_valid(pfn)                                                 \
-({                                                                     \
-       unsigned long __pfn = (pfn);                                    \
-       /* avoid <linux/bootmem.h> include hell */                      \
-       extern unsigned long min_low_pfn;                               \
-                                                                       \
-       __pfn >= min_low_pfn && __pfn < max_mapnr;                      \
-})
+static inline int pfn_valid(unsigned long pfn)
+{
+       /* avoid <linux/mm.h> include hell */
+       extern unsigned long max_mapnr;
+
+       return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr;
+}
 
 #elif defined(CONFIG_SPARSEMEM)
 
index fdc62fb5630da68bb746ed3dac53b45eb5bddc37..8b8f6b39336350b8c570b30d990f4b16332e29db 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef _ASM_PGTABLE_H
 #define _ASM_PGTABLE_H
 
+#include <linux/mm_types.h>
 #include <linux/mmzone.h>
 #ifdef CONFIG_32BIT
 #include <asm/pgtable-32.h>
index 2a5fa7abb346b8633ddda81ff40766c0e2e47aef..1470b7b68b0e98996d271b25b86584fb6f3261a5 100644 (file)
@@ -28,7 +28,6 @@
 /*
  * System setup and hardware flags..
  */
-extern void (*cpu_wait)(void);
 
 extern unsigned int vced_count, vcei_count;
 
@@ -44,11 +43,16 @@ extern unsigned int vced_count, vcei_count;
 #define SPECIAL_PAGES_SIZE PAGE_SIZE
 
 #ifdef CONFIG_32BIT
+#ifdef CONFIG_KVM_GUEST
+/* User space process size is limited to 1GB in KVM Guest Mode */
+#define TASK_SIZE      0x3fff8000UL
+#else
 /*
  * User space process size: 2GB. This is hardcoded into a few places,
  * so don't change it unless you know what you are doing.
  */
 #define TASK_SIZE      0x7fff8000UL
+#endif
 
 #ifdef __KERNEL__
 #define STACK_TOP_MAX  TASK_SIZE
index 8808bf548b99d2e0b4966b162f22ed4505d046e0..1e7e0961064bdcdbeba50f3fdce48b1c7cc9029d 100644 (file)
@@ -48,4 +48,7 @@ extern void __dt_setup_arch(struct boot_param_header *bph);
 static inline void device_tree_init(void) { }
 #endif /* CONFIG_OF */
 
+extern char *mips_get_machine_name(void);
+extern void mips_set_machine_name(const char *name);
+
 #endif /* __ASM_PROM_H */
index 1a2c3025bf2892ba98feb2a338ae2cf18c5c3e4c..fdfae43d8b99e49b4f6b20565f6e97178f00a216 100644 (file)
@@ -14,6 +14,6 @@ extern void install_cpu_nmi_handler(int slice);
 extern void install_ipi(void);
 extern void setup_replication_mask(void);
 extern void replicate_kernel_text(void);
-extern pfn_t node_getfirstfree(cnodeid_t);
+extern unsigned long node_getfirstfree(cnodeid_t);
 
 #endif /* __ASM_SN_SN_PRIVATE_H */
index c4813d67aec3f5543272a53e1d87e9a1b202e193..6d24d4e8b9ed855cb27a860133e1a541c91870ab 100644 (file)
@@ -19,7 +19,6 @@ typedef signed char   partid_t;       /* partition ID type */
 typedef signed short   moduleid_t;     /* user-visible module number type */
 typedef signed short   cmoduleid_t;    /* kernel compact module id type */
 typedef unsigned char  clusterid_t;    /* Clusterid of the cell */
-typedef unsigned long  pfn_t;
 
 typedef dev_t          vertex_hdl_t;   /* hardware graph vertex handle */
 
index 5130c88d64204b398be70c968823d408bb27f93c..78d201fb6c87c93608b8295327277a6e8804818e 100644 (file)
@@ -71,7 +71,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "        nop                                            \n"
                "       srl     %[my_ticket], %[ticket], 16             \n"
                "       andi    %[ticket], %[ticket], 0xffff            \n"
-               "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
                "       bne     %[ticket], %[my_ticket], 4f             \n"
                "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
                "2:                                                     \n"
@@ -105,7 +104,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "       beqz    %[my_ticket], 1b                        \n"
                "        srl    %[my_ticket], %[ticket], 16             \n"
                "       andi    %[ticket], %[ticket], 0xffff            \n"
-               "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
                "       bne     %[ticket], %[my_ticket], 4f             \n"
                "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
                "2:                                                     \n"
@@ -153,7 +151,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "                                                       \n"
                "1:     ll      %[ticket], %[ticket_ptr]                \n"
                "       srl     %[my_ticket], %[ticket], 16             \n"
-               "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
                "       andi    %[now_serving], %[ticket], 0xffff       \n"
                "       bne     %[my_ticket], %[now_serving], 3f        \n"
                "        addu   %[ticket], %[ticket], %[inc]            \n"
@@ -178,7 +175,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "                                                       \n"
                "1:     ll      %[ticket], %[ticket_ptr]                \n"
                "       srl     %[my_ticket], %[ticket], 16             \n"
-               "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
                "       andi    %[now_serving], %[ticket], 0xffff       \n"
                "       bne     %[my_ticket], %[now_serving], 3f        \n"
                "        addu   %[ticket], %[ticket], %[inc]            \n"
@@ -242,25 +238,16 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
-               __asm__ __volatile__(
-               "       .set    noreorder       # arch_read_lock        \n"
-               "1:     ll      %1, %2                                  \n"
-               "       bltz    %1, 3f                                  \n"
-               "        addu   %1, 1                                   \n"
-               "2:     sc      %1, %0                                  \n"
-               "       beqz    %1, 1b                                  \n"
-               "        nop                                            \n"
-               "       .subsection 2                                   \n"
-               "3:     ll      %1, %2                                  \n"
-               "       bltz    %1, 3b                                  \n"
-               "        addu   %1, 1                                   \n"
-               "       b       2b                                      \n"
-               "        nop                                            \n"
-               "       .previous                                       \n"
-               "       .set    reorder                                 \n"
-               : "=m" (rw->lock), "=&r" (tmp)
-               : "m" (rw->lock)
-               : "memory");
+               do {
+                       __asm__ __volatile__(
+                       "1:     ll      %1, %2  # arch_read_lock        \n"
+                       "       bltz    %1, 1b                          \n"
+                       "        addu   %1, 1                           \n"
+                       "2:     sc      %1, %0                          \n"
+                       : "=m" (rw->lock), "=&r" (tmp)
+                       : "m" (rw->lock)
+                       : "memory");
+               } while (unlikely(!tmp));
        }
 
        smp_llsc_mb();
@@ -285,21 +272,15 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
-               __asm__ __volatile__(
-               "       .set    noreorder       # arch_read_unlock      \n"
-               "1:     ll      %1, %2                                  \n"
-               "       sub     %1, 1                                   \n"
-               "       sc      %1, %0                                  \n"
-               "       beqz    %1, 2f                                  \n"
-               "        nop                                            \n"
-               "       .subsection 2                                   \n"
-               "2:     b       1b                                      \n"
-               "        nop                                            \n"
-               "       .previous                                       \n"
-               "       .set    reorder                                 \n"
-               : "=m" (rw->lock), "=&r" (tmp)
-               : "m" (rw->lock)
-               : "memory");
+               do {
+                       __asm__ __volatile__(
+                       "1:     ll      %1, %2  # arch_read_unlock      \n"
+                       "       sub     %1, 1                           \n"
+                       "       sc      %1, %0                          \n"
+                       : "=m" (rw->lock), "=&r" (tmp)
+                       : "m" (rw->lock)
+                       : "memory");
+               } while (unlikely(!tmp));
        }
 }
 
@@ -321,25 +302,16 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
-               __asm__ __volatile__(
-               "       .set    noreorder       # arch_write_lock       \n"
-               "1:     ll      %1, %2                                  \n"
-               "       bnez    %1, 3f                                  \n"
-               "        lui    %1, 0x8000                              \n"
-               "2:     sc      %1, %0                                  \n"
-               "       beqz    %1, 3f                                  \n"
-               "        nop                                            \n"
-               "       .subsection 2                                   \n"
-               "3:     ll      %1, %2                                  \n"
-               "       bnez    %1, 3b                                  \n"
-               "        lui    %1, 0x8000                              \n"
-               "       b       2b                                      \n"
-               "        nop                                            \n"
-               "       .previous                                       \n"
-               "       .set    reorder                                 \n"
-               : "=m" (rw->lock), "=&r" (tmp)
-               : "m" (rw->lock)
-               : "memory");
+               do {
+                       __asm__ __volatile__(
+                       "1:     ll      %1, %2  # arch_write_lock       \n"
+                       "       bnez    %1, 1b                          \n"
+                       "        lui    %1, 0x8000                      \n"
+                       "2:     sc      %1, %0                          \n"
+                       : "=m" (rw->lock), "=&r" (tmp)
+                       : "m" (rw->lock)
+                       : "memory");
+               } while (unlikely(!tmp));
        }
 
        smp_llsc_mb();
@@ -424,25 +396,21 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
-               __asm__ __volatile__(
-               "       .set    noreorder       # arch_write_trylock    \n"
-               "       li      %2, 0                                   \n"
-               "1:     ll      %1, %3                                  \n"
-               "       bnez    %1, 2f                                  \n"
-               "       lui     %1, 0x8000                              \n"
-               "       sc      %1, %0                                  \n"
-               "       beqz    %1, 3f                                  \n"
-               "        li     %2, 1                                   \n"
-               "2:                                                     \n"
-               __WEAK_LLSC_MB
-               "       .subsection 2                                   \n"
-               "3:     b       1b                                      \n"
-               "        li     %2, 0                                   \n"
-               "       .previous                                       \n"
-               "       .set    reorder                                 \n"
-               : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
-               : "m" (rw->lock)
-               : "memory");
+               do {
+                       __asm__ __volatile__(
+                       "       ll      %1, %3  # arch_write_trylock    \n"
+                       "       li      %2, 0                           \n"
+                       "       bnez    %1, 2f                          \n"
+                       "       lui     %1, 0x8000                      \n"
+                       "       sc      %1, %0                          \n"
+                       "       li      %2, 1                           \n"
+                       "2:                                             \n"
+                       : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
+                       : "m" (rw->lock)
+                       : "memory");
+               } while (unlikely(!tmp));
+
+               smp_llsc_mb();
        }
 
        return ret;
index c99384018161ec77fe54e72f1ae851b2b04c8ac3..a89d1b10d027ce65d83eecb9dd57bfda7859400e 100644 (file)
 1:             move    ra, k0
                li      k0, 3
                mtc0    k0, $22
-#endif /* CONFIG_CPU_LOONGSON2F */
+#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
                lui     k1, %hi(kernelsp)
 #else
                LONG_S  $0, PT_R0(sp)
                mfc0    v1, CP0_STATUS
                LONG_S  $2, PT_R2(sp)
+               LONG_S  v1, PT_STATUS(sp)
 #ifdef CONFIG_MIPS_MT_SMTC
                /*
                 * Ideally, these instructions would be shuffled in
                LONG_S  k0, PT_TCSTATUS(sp)
 #endif /* CONFIG_MIPS_MT_SMTC */
                LONG_S  $4, PT_R4(sp)
-               LONG_S  $5, PT_R5(sp)
-               LONG_S  v1, PT_STATUS(sp)
                mfc0    v1, CP0_CAUSE
-               LONG_S  $6, PT_R6(sp)
-               LONG_S  $7, PT_R7(sp)
+               LONG_S  $5, PT_R5(sp)
                LONG_S  v1, PT_CAUSE(sp)
+               LONG_S  $6, PT_R6(sp)
                MFC0    v1, CP0_EPC
+               LONG_S  $7, PT_R7(sp)
 #ifdef CONFIG_64BIT
                LONG_S  $8, PT_R8(sp)
                LONG_S  $9, PT_R9(sp)
 #endif
+               LONG_S  v1, PT_EPC(sp)
                LONG_S  $25, PT_R25(sp)
                LONG_S  $28, PT_R28(sp)
                LONG_S  $31, PT_R31(sp)
-               LONG_S  v1, PT_EPC(sp)
                ori     $28, sp, _THREAD_MASK
                xori    $28, _THREAD_MASK
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
index 178f7924149ad4b5e06ff4aa1a1db868734a10d8..895320e25662cd98a673980c449ec916841920d7 100644 (file)
@@ -58,8 +58,12 @@ struct thread_info {
 #define init_stack             (init_thread_union.stack)
 
 /* How to get the thread information struct from C.  */
-register struct thread_info *__current_thread_info __asm__("$28");
-#define current_thread_info()  __current_thread_info
+static inline struct thread_info *current_thread_info(void)
+{
+       register struct thread_info *__current_thread_info __asm__("$28");
+
+       return __current_thread_info;
+}
 
 #endif /* !__ASSEMBLY__ */
 
index debc8009bd5814e1f22dda89c63fd2eda3497202..2d7b9df4542dd478d53f53d8151d3381d1c58aee 100644 (file)
@@ -52,13 +52,15 @@ extern int (*perf_irq)(void);
  */
 extern unsigned int __weak get_c0_compare_int(void);
 extern int r4k_clockevent_init(void);
+extern int smtc_clockevent_init(void);
+extern int gic_clockevent_init(void);
 
 static inline int mips_clockevent_init(void)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-       extern int smtc_clockevent_init(void);
-
        return smtc_clockevent_init();
+#elif defined(CONFIG_CEVT_GIC)
+       return (gic_clockevent_init() | r4k_clockevent_init());
 #elif defined(CONFIG_CEVT_R4K)
        return r4k_clockevent_init();
 #else
@@ -69,9 +71,7 @@ static inline int mips_clockevent_init(void)
 /*
  * Initialize the count register as a clocksource
  */
-#ifdef CONFIG_CSRC_R4K
 extern int init_r4k_clocksource(void);
-#endif
 
 static inline int init_mips_clocksource(void)
 {
index bd87e36bf26a3cc980c73804bbb8a5d009f4d2ef..f3fa3750f577c2414396943871a8f0bd6df6b928 100644 (file)
  */
 #ifdef CONFIG_32BIT
 
-#define __UA_LIMIT     0x80000000UL
+#ifdef CONFIG_KVM_GUEST
+#define __UA_LIMIT 0x40000000UL
+#else
+#define __UA_LIMIT 0x80000000UL
+#endif
 
 #define __UA_ADDR      ".word"
 #define __UA_LA                "la"
@@ -55,8 +59,13 @@ extern u64 __ua_limit;
  * address in this range it's the process's problem, not ours :-)
  */
 
+#ifdef CONFIG_KVM_GUEST
+#define KERNEL_DS      ((mm_segment_t) { 0x80000000UL })
+#define USER_DS                ((mm_segment_t) { 0xC0000000UL })
+#else
 #define KERNEL_DS      ((mm_segment_t) { 0UL })
 #define USER_DS                ((mm_segment_t) { __UA_LIMIT })
+#endif
 
 #define VERIFY_READ    0
 #define VERIFY_WRITE   1
@@ -261,6 +270,7 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     " insn "        %1, %3                          \n"     \
        "2:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section .fixup,\"ax\"                          \n"     \
        "3:     li      %0, %4                                  \n"     \
        "       j       2b                                      \n"     \
@@ -287,7 +297,9 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     lw      %1, (%3)                                \n"     \
        "2:     lw      %D1, 4(%3)                              \n"     \
-       "3:     .section        .fixup,\"ax\"                   \n"     \
+       "3:                                                     \n"     \
+       "       .insn                                           \n"     \
+       "       .section        .fixup,\"ax\"                   \n"     \
        "4:     li      %0, %4                                  \n"     \
        "       move    %1, $0                                  \n"     \
        "       move    %D1, $0                                 \n"     \
@@ -355,6 +367,7 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     " insn "        %z2, %3         # __put_user_asm\n"     \
        "2:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
        "3:     li      %0, %4                                  \n"     \
        "       j       2b                                      \n"     \
@@ -373,6 +386,7 @@ do {                                                                        \
        "1:     sw      %2, (%3)        # __put_user_asm_ll32   \n"     \
        "2:     sw      %D2, 4(%3)                              \n"     \
        "3:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
        "4:     li      %0, %4                                  \n"     \
        "       j       3b                                      \n"     \
@@ -524,6 +538,7 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     " insn "        %1, %3                          \n"     \
        "2:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section .fixup,\"ax\"                          \n"     \
        "3:     li      %0, %4                                  \n"     \
        "       j       2b                                      \n"     \
@@ -549,7 +564,9 @@ do {                                                                        \
        "1:     ulw     %1, (%3)                                \n"     \
        "2:     ulw     %D1, 4(%3)                              \n"     \
        "       move    %0, $0                                  \n"     \
-       "3:     .section        .fixup,\"ax\"                   \n"     \
+       "3:                                                     \n"     \
+       "       .insn                                           \n"     \
+       "       .section        .fixup,\"ax\"                   \n"     \
        "4:     li      %0, %4                                  \n"     \
        "       move    %1, $0                                  \n"     \
        "       move    %D1, $0                                 \n"     \
@@ -616,6 +633,7 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" \
        "2:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
        "3:     li      %0, %4                                  \n"     \
        "       j       2b                                      \n"     \
@@ -634,6 +652,7 @@ do {                                                                        \
        "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
        "2:     sw      %D2, 4(%3)                              \n"     \
        "3:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
        "4:     li      %0, %4                                  \n"     \
        "       j       3b                                      \n"     \
index 058e941626a6ab51e823fa352e8c1f63e1d06624..370d967725c28d0e59d26ad8726f2ad052d5d2fa 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
  * Copyright (C) 2005  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2012  MIPS Technologies, Inc.
+ * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
  */
 
 #include <linux/types.h>
 #define UASM_EXPORT_SYMBOL(sym)
 #endif
 
+#define _UASM_ISA_CLASSIC      0
+#define _UASM_ISA_MICROMIPS    1
+
+#ifndef UASM_ISA
+#ifdef CONFIG_CPU_MICROMIPS
+#define UASM_ISA       _UASM_ISA_MICROMIPS
+#else
+#define UASM_ISA       _UASM_ISA_CLASSIC
+#endif
+#endif
+
+#if (UASM_ISA == _UASM_ISA_CLASSIC)
+#ifdef CONFIG_CPU_MICROMIPS
+#define ISAOPC(op)     CL_uasm_i##op
+#define ISAFUNC(x)     CL_##x
+#else
+#define ISAOPC(op)     uasm_i##op
+#define ISAFUNC(x)     x
+#endif
+#elif (UASM_ISA == _UASM_ISA_MICROMIPS)
+#ifdef CONFIG_CPU_MICROMIPS
+#define ISAOPC(op)     uasm_i##op
+#define ISAFUNC(x)     x
+#else
+#define ISAOPC(op)     MM_uasm_i##op
+#define ISAFUNC(x)     MM_##x
+#endif
+#else
+#error Unsupported micro-assembler ISA!!!
+#endif
+
 #define Ip_u1u2u3(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
 
 #define Ip_u2u1u3(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
 
 #define Ip_u3u1u2(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
 
 #define Ip_u1u2s3(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 
 #define Ip_u2s3u1(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
 
 #define Ip_u2u1s3(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 
 #define Ip_u2u1msbu3(op)                                               \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c,  \
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c,  \
           unsigned int d)
 
 #define Ip_u1u2(op)                                                    \
-void __uasminit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
+void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
 
 #define Ip_u1s2(op)                                                    \
-void __uasminit uasm_i##op(u32 **buf, unsigned int a, signed int b)
+void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
 
-#define Ip_u1(op) void __uasminit uasm_i##op(u32 **buf, unsigned int a)
+#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a)
 
-#define Ip_0(op) void __uasminit uasm_i##op(u32 **buf)
+#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf)
 
 Ip_u2u1s3(_addiu);
 Ip_u3u1u2(_addu);
@@ -132,19 +163,20 @@ struct uasm_label {
        int lab;
 };
 
-void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
+void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
+                       int lid);
 #ifdef CONFIG_64BIT
-int uasm_in_compat_space_p(long addr);
+int ISAFUNC(uasm_in_compat_space_p)(long addr);
 #endif
-int uasm_rel_hi(long val);
-int uasm_rel_lo(long val);
-void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
-void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
+int ISAFUNC(uasm_rel_hi)(long val);
+int ISAFUNC(uasm_rel_lo)(long val);
+void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
+void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
 
 #define UASM_L_LA(lb)                                                  \
-static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
+static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
 {                                                                      \
-       uasm_build_label(lab, addr, label##lb);                         \
+       ISAFUNC(uasm_build_label)(lab, addr, label##lb);                \
 }
 
 /* convenience macros for instructions */
@@ -196,27 +228,27 @@ static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
                                     unsigned int a2, unsigned int a3)
 {
        if (a3 < 32)
-               uasm_i_drotr(p, a1, a2, a3);
+               ISAOPC(_drotr)(p, a1, a2, a3);
        else
-               uasm_i_drotr32(p, a1, a2, a3 - 32);
+               ISAOPC(_drotr32)(p, a1, a2, a3 - 32);
 }
 
 static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
                                    unsigned int a2, unsigned int a3)
 {
        if (a3 < 32)
-               uasm_i_dsll(p, a1, a2, a3);
+               ISAOPC(_dsll)(p, a1, a2, a3);
        else
-               uasm_i_dsll32(p, a1, a2, a3 - 32);
+               ISAOPC(_dsll32)(p, a1, a2, a3 - 32);
 }
 
 static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
                                    unsigned int a2, unsigned int a3)
 {
        if (a3 < 32)
-               uasm_i_dsrl(p, a1, a2, a3);
+               ISAOPC(_dsrl)(p, a1, a2, a3);
        else
-               uasm_i_dsrl32(p, a1, a2, a3 - 32);
+               ISAOPC(_dsrl32)(p, a1, a2, a3 - 32);
 }
 
 /* Handle relocations. */
index 4d078815eaa5448d933ecce88e1ada2503f629ce..0f4aec2ad1e6e1c9f7d8b7a1b54cf49ebb58c392 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright (C) 1996, 2000 by Ralf Baechle
  * Copyright (C) 2006 by Thiemo Seufer
+ * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #ifndef _UAPI_ASM_INST_H
 #define _UAPI_ASM_INST_H
@@ -192,6 +193,282 @@ enum lx_func {
        lbx_op  = 0x16,
 };
 
+/*
+ * (microMIPS) Major opcodes.
+ */
+enum mm_major_op {
+       mm_pool32a_op, mm_pool16a_op, mm_lbu16_op, mm_move16_op,
+       mm_addi32_op, mm_lbu32_op, mm_sb32_op, mm_lb32_op,
+       mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op,
+       mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op,
+       mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op,
+       mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op,
+       mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op,
+       mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op,
+       mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op,
+       mm_slti32_op, mm_beq32_op, mm_swc132_op, mm_lwc132_op,
+       mm_reserved5_op, mm_reserved6_op, mm_sh16_op, mm_bnez16_op,
+       mm_sltiu32_op, mm_bne32_op, mm_sdc132_op, mm_ldc132_op,
+       mm_reserved7_op, mm_reserved8_op, mm_swsp16_op, mm_b16_op,
+       mm_andi32_op, mm_j32_op, mm_sd32_op, mm_ld32_op,
+       mm_reserved11_op, mm_reserved12_op, mm_sw16_op, mm_li16_op,
+       mm_jalx32_op, mm_jal32_op, mm_sw32_op, mm_lw32_op,
+};
+
+/*
+ * (microMIPS) POOL32I minor opcodes.
+ */
+enum mm_32i_minor_op {
+       mm_bltz_op, mm_bltzal_op, mm_bgez_op, mm_bgezal_op,
+       mm_blez_op, mm_bnezc_op, mm_bgtz_op, mm_beqzc_op,
+       mm_tlti_op, mm_tgei_op, mm_tltiu_op, mm_tgeiu_op,
+       mm_tnei_op, mm_lui_op, mm_teqi_op, mm_reserved13_op,
+       mm_synci_op, mm_bltzals_op, mm_reserved14_op, mm_bgezals_op,
+       mm_bc2f_op, mm_bc2t_op, mm_reserved15_op, mm_reserved16_op,
+       mm_reserved17_op, mm_reserved18_op, mm_bposge64_op, mm_bposge32_op,
+       mm_bc1f_op, mm_bc1t_op, mm_reserved19_op, mm_reserved20_op,
+       mm_bc1any2f_op, mm_bc1any2t_op, mm_bc1any4f_op, mm_bc1any4t_op,
+};
+
+/*
+ * (microMIPS) POOL32A minor opcodes.
+ */
+enum mm_32a_minor_op {
+       mm_sll32_op = 0x000,
+       mm_ins_op = 0x00c,
+       mm_ext_op = 0x02c,
+       mm_pool32axf_op = 0x03c,
+       mm_srl32_op = 0x040,
+       mm_sra_op = 0x080,
+       mm_rotr_op = 0x0c0,
+       mm_lwxs_op = 0x118,
+       mm_addu32_op = 0x150,
+       mm_subu32_op = 0x1d0,
+       mm_and_op = 0x250,
+       mm_or32_op = 0x290,
+       mm_xor32_op = 0x310,
+};
+
+/*
+ * (microMIPS) POOL32B functions.
+ */
+enum mm_32b_func {
+       mm_lwc2_func = 0x0,
+       mm_lwp_func = 0x1,
+       mm_ldc2_func = 0x2,
+       mm_ldp_func = 0x4,
+       mm_lwm32_func = 0x5,
+       mm_cache_func = 0x6,
+       mm_ldm_func = 0x7,
+       mm_swc2_func = 0x8,
+       mm_swp_func = 0x9,
+       mm_sdc2_func = 0xa,
+       mm_sdp_func = 0xc,
+       mm_swm32_func = 0xd,
+       mm_sdm_func = 0xf,
+};
+
+/*
+ * (microMIPS) POOL32C functions.
+ */
+enum mm_32c_func {
+       mm_pref_func = 0x2,
+       mm_ll_func = 0x3,
+       mm_swr_func = 0x9,
+       mm_sc_func = 0xb,
+       mm_lwu_func = 0xe,
+};
+
+/*
+ * (microMIPS) POOL32AXF minor opcodes.
+ */
+enum mm_32axf_minor_op {
+       mm_mfc0_op = 0x003,
+       mm_mtc0_op = 0x00b,
+       mm_tlbp_op = 0x00d,
+       mm_jalr_op = 0x03c,
+       mm_tlbr_op = 0x04d,
+       mm_jalrhb_op = 0x07c,
+       mm_tlbwi_op = 0x08d,
+       mm_tlbwr_op = 0x0cd,
+       mm_jalrs_op = 0x13c,
+       mm_jalrshb_op = 0x17c,
+       mm_syscall_op = 0x22d,
+       mm_eret_op = 0x3cd,
+};
+
+/*
+ * (microMIPS) POOL32F minor opcodes.
+ */
+enum mm_32f_minor_op {
+       mm_32f_00_op = 0x00,
+       mm_32f_01_op = 0x01,
+       mm_32f_02_op = 0x02,
+       mm_32f_10_op = 0x08,
+       mm_32f_11_op = 0x09,
+       mm_32f_12_op = 0x0a,
+       mm_32f_20_op = 0x10,
+       mm_32f_30_op = 0x18,
+       mm_32f_40_op = 0x20,
+       mm_32f_41_op = 0x21,
+       mm_32f_42_op = 0x22,
+       mm_32f_50_op = 0x28,
+       mm_32f_51_op = 0x29,
+       mm_32f_52_op = 0x2a,
+       mm_32f_60_op = 0x30,
+       mm_32f_70_op = 0x38,
+       mm_32f_73_op = 0x3b,
+       mm_32f_74_op = 0x3c,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_10_minor_op {
+       mm_lwxc1_op = 0x1,
+       mm_swxc1_op,
+       mm_ldxc1_op,
+       mm_sdxc1_op,
+       mm_luxc1_op,
+       mm_suxc1_op,
+};
+
+enum mm_32f_func {
+       mm_lwxc1_func = 0x048,
+       mm_swxc1_func = 0x088,
+       mm_ldxc1_func = 0x0c8,
+       mm_sdxc1_func = 0x108,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_40_minor_op {
+       mm_fmovf_op,
+       mm_fmovt_op,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_60_minor_op {
+       mm_fadd_op,
+       mm_fsub_op,
+       mm_fmul_op,
+       mm_fdiv_op,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_70_minor_op {
+       mm_fmovn_op,
+       mm_fmovz_op,
+};
+
+/*
+ * (microMIPS) POOL32FXF secondary minor opcodes for POOL32F.
+ */
+enum mm_32f_73_minor_op {
+       mm_fmov0_op = 0x01,
+       mm_fcvtl_op = 0x04,
+       mm_movf0_op = 0x05,
+       mm_frsqrt_op = 0x08,
+       mm_ffloorl_op = 0x0c,
+       mm_fabs0_op = 0x0d,
+       mm_fcvtw_op = 0x24,
+       mm_movt0_op = 0x25,
+       mm_fsqrt_op = 0x28,
+       mm_ffloorw_op = 0x2c,
+       mm_fneg0_op = 0x2d,
+       mm_cfc1_op = 0x40,
+       mm_frecip_op = 0x48,
+       mm_fceill_op = 0x4c,
+       mm_fcvtd0_op = 0x4d,
+       mm_ctc1_op = 0x60,
+       mm_fceilw_op = 0x6c,
+       mm_fcvts0_op = 0x6d,
+       mm_mfc1_op = 0x80,
+       mm_fmov1_op = 0x81,
+       mm_movf1_op = 0x85,
+       mm_ftruncl_op = 0x8c,
+       mm_fabs1_op = 0x8d,
+       mm_mtc1_op = 0xa0,
+       mm_movt1_op = 0xa5,
+       mm_ftruncw_op = 0xac,
+       mm_fneg1_op = 0xad,
+       mm_froundl_op = 0xcc,
+       mm_fcvtd1_op = 0xcd,
+       mm_froundw_op = 0xec,
+       mm_fcvts1_op = 0xed,
+};
+
+/*
+ * (microMIPS) POOL16C minor opcodes.
+ */
+enum mm_16c_minor_op {
+       mm_lwm16_op = 0x04,
+       mm_swm16_op = 0x05,
+       mm_jr16_op = 0x18,
+       mm_jrc_op = 0x1a,
+       mm_jalr16_op = 0x1c,
+       mm_jalrs16_op = 0x1e,
+};
+
+/*
+ * (microMIPS) POOL16D minor opcodes.
+ */
+enum mm_16d_minor_op {
+       mm_addius5_func,
+       mm_addiusp_func,
+};
+
+/*
+ * (MIPS16e) opcodes.
+ */
+enum MIPS16e_ops {
+       MIPS16e_jal_op = 003,
+       MIPS16e_ld_op = 007,
+       MIPS16e_i8_op = 014,
+       MIPS16e_sd_op = 017,
+       MIPS16e_lb_op = 020,
+       MIPS16e_lh_op = 021,
+       MIPS16e_lwsp_op = 022,
+       MIPS16e_lw_op = 023,
+       MIPS16e_lbu_op = 024,
+       MIPS16e_lhu_op = 025,
+       MIPS16e_lwpc_op = 026,
+       MIPS16e_lwu_op = 027,
+       MIPS16e_sb_op = 030,
+       MIPS16e_sh_op = 031,
+       MIPS16e_swsp_op = 032,
+       MIPS16e_sw_op = 033,
+       MIPS16e_rr_op = 035,
+       MIPS16e_extend_op = 036,
+       MIPS16e_i64_op = 037,
+};
+
+enum MIPS16e_i64_func {
+       MIPS16e_ldsp_func,
+       MIPS16e_sdsp_func,
+       MIPS16e_sdrasp_func,
+       MIPS16e_dadjsp_func,
+       MIPS16e_ldpc_func,
+};
+
+enum MIPS16e_rr_func {
+       MIPS16e_jr_func,
+};
+
+enum MIPS6e_i8_func {
+       MIPS16e_swrasp_func = 02,
+};
+
+/*
+ * (microMIPS & MIPS16e) NOP instruction.
+ */
+#define MM_NOP16       0x0c00
+
 /*
  * Damn ...  bitfields depend from byteorder :-(
  */
@@ -311,6 +588,262 @@ struct v_format {                         /* MDMX vector format */
        ;)))))))
 };
 
+/*
+ * microMIPS instruction formats (32-bit length)
+ *
+ * NOTE:
+ *     Parenthesis denote whether the format is a microMIPS instruction or
+ *     if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.
+ */
+struct fb_format {             /* FPU branch format (MIPS32) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int bc : 5,
+       BITFIELD_FIELD(unsigned int cc : 3,
+       BITFIELD_FIELD(unsigned int flag : 2,
+       BITFIELD_FIELD(signed int simmediate : 16,
+       ;)))))
+};
+
+struct fp0_format {            /* FPU multiply and add format (MIPS32) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int fmt : 5,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp0_format {         /* FPU multipy and add format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int fmt : 3,
+       BITFIELD_FIELD(unsigned int op : 2,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;)))))))
+};
+
+struct fp1_format {            /* FPU mfc1 and cfc1 format (MIPS32) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int op : 5,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp1_format {         /* FPU mfc1 and cfc1 format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fmt : 2,
+       BITFIELD_FIELD(unsigned int op : 8,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp2_format {         /* FPU movt and movf format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int cc : 3,
+       BITFIELD_FIELD(unsigned int zero : 2,
+       BITFIELD_FIELD(unsigned int fmt : 2,
+       BITFIELD_FIELD(unsigned int op : 3,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))))
+};
+
+struct mm_fp3_format {         /* FPU abs and neg format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fmt : 3,
+       BITFIELD_FIELD(unsigned int op : 7,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp4_format {         /* FPU c.cond format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int cc : 3,
+       BITFIELD_FIELD(unsigned int fmt : 3,
+       BITFIELD_FIELD(unsigned int cond : 4,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;)))))))
+};
+
+struct mm_fp5_format {         /* FPU lwxc1 and swxc1 format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int index : 5,
+       BITFIELD_FIELD(unsigned int base : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int op : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct fp6_format {            /* FPU madd and msub format (MIPS IV) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int fr : 5,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp6_format {         /* FPU madd and msub format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int fr : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_i_format {           /* Immediate format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int rs : 5,
+       BITFIELD_FIELD(signed int simmediate : 16,
+       ;))))
+};
+
+struct mm_m_format {           /* Multi-word load/store format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rd : 5,
+       BITFIELD_FIELD(unsigned int base : 5,
+       BITFIELD_FIELD(unsigned int func : 4,
+       BITFIELD_FIELD(signed int simmediate : 12,
+       ;)))))
+};
+
+struct mm_x_format {           /* Scaled indexed load format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int index : 5,
+       BITFIELD_FIELD(unsigned int base : 5,
+       BITFIELD_FIELD(unsigned int rd : 5,
+       BITFIELD_FIELD(unsigned int func : 11,
+       ;)))))
+};
+
+/*
+ * microMIPS instruction formats (16-bit length)
+ */
+struct mm_b0_format {          /* Unconditional branch format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(signed int simmediate : 10,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;)))
+};
+
+struct mm_b1_format {          /* Conditional branch format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rs : 3,
+       BITFIELD_FIELD(signed int simmediate : 7,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;))))
+};
+
+struct mm16_m_format {         /* Multi-word load/store format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int func : 4,
+       BITFIELD_FIELD(unsigned int rlist : 2,
+       BITFIELD_FIELD(unsigned int imm : 4,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;)))))
+};
+
+struct mm16_rb_format {                /* Signed immediate format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 3,
+       BITFIELD_FIELD(unsigned int base : 3,
+       BITFIELD_FIELD(signed int simmediate : 4,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;)))))
+};
+
+struct mm16_r3_format {                /* Load from global pointer format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 3,
+       BITFIELD_FIELD(signed int simmediate : 7,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;))))
+};
+
+struct mm16_r5_format {                /* Load/store from stack pointer format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(signed int simmediate : 5,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;))))
+};
+
+/*
+ * MIPS16e instruction formats (16-bit length)
+ */
+struct m16e_rr {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int rx : 3,
+       BITFIELD_FIELD(unsigned int nd : 1,
+       BITFIELD_FIELD(unsigned int l : 1,
+       BITFIELD_FIELD(unsigned int ra : 1,
+       BITFIELD_FIELD(unsigned int func : 5,
+       ;))))))
+};
+
+struct m16e_jal {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int x : 1,
+       BITFIELD_FIELD(unsigned int imm20_16 : 5,
+       BITFIELD_FIELD(signed int imm25_21 : 5,
+       ;))))
+};
+
+struct m16e_i64 {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int func : 3,
+       BITFIELD_FIELD(unsigned int imm : 8,
+       ;)))
+};
+
+struct m16e_ri64 {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int func : 3,
+       BITFIELD_FIELD(unsigned int ry : 3,
+       BITFIELD_FIELD(unsigned int imm : 5,
+       ;))))
+};
+
+struct m16e_ri {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int rx : 3,
+       BITFIELD_FIELD(unsigned int imm : 8,
+       ;)))
+};
+
+struct m16e_rri {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int rx : 3,
+       BITFIELD_FIELD(unsigned int ry : 3,
+       BITFIELD_FIELD(unsigned int imm : 5,
+       ;))))
+};
+
+struct m16e_i8 {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int func : 3,
+       BITFIELD_FIELD(unsigned int imm : 8,
+       ;)))
+};
+
 union mips_instruction {
        unsigned int word;
        unsigned short halfword[2];
@@ -326,6 +859,37 @@ union mips_instruction {
        struct b_format b_format;
        struct ps_format ps_format;
        struct v_format v_format;
+       struct fb_format fb_format;
+       struct fp0_format fp0_format;
+       struct mm_fp0_format mm_fp0_format;
+       struct fp1_format fp1_format;
+       struct mm_fp1_format mm_fp1_format;
+       struct mm_fp2_format mm_fp2_format;
+       struct mm_fp3_format mm_fp3_format;
+       struct mm_fp4_format mm_fp4_format;
+       struct mm_fp5_format mm_fp5_format;
+       struct fp6_format fp6_format;
+       struct mm_fp6_format mm_fp6_format;
+       struct mm_i_format mm_i_format;
+       struct mm_m_format mm_m_format;
+       struct mm_x_format mm_x_format;
+       struct mm_b0_format mm_b0_format;
+       struct mm_b1_format mm_b1_format;
+       struct mm16_m_format mm16_m_format ;
+       struct mm16_rb_format mm16_rb_format;
+       struct mm16_r3_format mm16_r3_format;
+       struct mm16_r5_format mm16_r5_format;
+};
+
+union mips16e_instruction {
+       unsigned int full : 16;
+       struct m16e_rr rr;
+       struct m16e_jal jal;
+       struct m16e_i64 i64;
+       struct m16e_ri64 ri64;
+       struct m16e_ri ri;
+       struct m16e_rri rri;
+       struct m16e_i8 i8;
 };
 
 #endif /* _UAPI_ASM_INST_H */
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
new file mode 100644 (file)
index 0000000..3f424f5
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Cavium, Inc.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#ifndef __LINUX_KVM_MIPS_H
+#define __LINUX_KVM_MIPS_H
+
+#include <linux/types.h>
+
+/*
+ * KVM MIPS specific structures and definitions.
+ *
+ * Some parts derived from the x86 version of this file.
+ */
+
+/*
+ * for KVM_GET_REGS and KVM_SET_REGS
+ *
+ * If Config[AT] is zero (32-bit CPU), the register contents are
+ * stored in the lower 32-bits of the struct kvm_regs fields and sign
+ * extended to 64-bits.
+ */
+struct kvm_regs {
+       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+       __u64 gpr[32];
+       __u64 hi;
+       __u64 lo;
+       __u64 pc;
+};
+
+/*
+ * for KVM_GET_FPU and KVM_SET_FPU
+ *
+ * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
+ * are zero filled.
+ */
+struct kvm_fpu {
+       __u64 fpr[32];
+       __u32 fir;
+       __u32 fccr;
+       __u32 fexr;
+       __u32 fenr;
+       __u32 fcsr;
+       __u32 pad;
+};
+
+
+/*
+ * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0
+ * registers.  The id field is broken down as follows:
+ *
+ *  bits[2..0]   - Register 'sel' index.
+ *  bits[7..3]   - Register 'rd'  index.
+ *  bits[15..8]  - Must be zero.
+ *  bits[63..16] - 1 -> CP0 registers.
+ *
+ * Other sets registers may be added in the future.  Each set would
+ * have its own identifier in bits[63..16].
+ *
+ * The addr field of struct kvm_one_reg must point to an aligned
+ * 64-bit wide location.  For registers that are narrower than
+ * 64-bits, the value is stored in the low order bits of the location,
+ * and sign extended to 64-bits.
+ *
+ * The registers defined in struct kvm_regs are also accessible, the
+ * id values for these are below.
+ */
+
+#define KVM_REG_MIPS_R0 0
+#define KVM_REG_MIPS_R1 1
+#define KVM_REG_MIPS_R2 2
+#define KVM_REG_MIPS_R3 3
+#define KVM_REG_MIPS_R4 4
+#define KVM_REG_MIPS_R5 5
+#define KVM_REG_MIPS_R6 6
+#define KVM_REG_MIPS_R7 7
+#define KVM_REG_MIPS_R8 8
+#define KVM_REG_MIPS_R9 9
+#define KVM_REG_MIPS_R10 10
+#define KVM_REG_MIPS_R11 11
+#define KVM_REG_MIPS_R12 12
+#define KVM_REG_MIPS_R13 13
+#define KVM_REG_MIPS_R14 14
+#define KVM_REG_MIPS_R15 15
+#define KVM_REG_MIPS_R16 16
+#define KVM_REG_MIPS_R17 17
+#define KVM_REG_MIPS_R18 18
+#define KVM_REG_MIPS_R19 19
+#define KVM_REG_MIPS_R20 20
+#define KVM_REG_MIPS_R21 21
+#define KVM_REG_MIPS_R22 22
+#define KVM_REG_MIPS_R23 23
+#define KVM_REG_MIPS_R24 24
+#define KVM_REG_MIPS_R25 25
+#define KVM_REG_MIPS_R26 26
+#define KVM_REG_MIPS_R27 27
+#define KVM_REG_MIPS_R28 28
+#define KVM_REG_MIPS_R29 29
+#define KVM_REG_MIPS_R30 30
+#define KVM_REG_MIPS_R31 31
+
+#define KVM_REG_MIPS_HI 32
+#define KVM_REG_MIPS_LO 33
+#define KVM_REG_MIPS_PC 34
+
+/*
+ * KVM MIPS specific structures and definitions
+ *
+ */
+struct kvm_debug_exit_arch {
+       __u64 epc;
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+/* dummy definition */
+struct kvm_sregs {
+};
+
+struct kvm_mips_interrupt {
+       /* in */
+       __u32 cpu;
+       __u32 irq;
+};
+
+#endif /* __LINUX_KVM_MIPS_H */
index 16338b84fa79d337c9d0f32c64f37de9a23a43e3..1dee279f96659c6ae2d2ac6d42af77483454076d 100644 (file)
 #define __NR_process_vm_writev         (__NR_Linux + 305)
 #define __NR_kcmp                      (__NR_Linux + 306)
 #define __NR_finit_module              (__NR_Linux + 307)
+#define __NR_getdents64                        (__NR_Linux + 308)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            307
+#define __NR_Linux_syscalls            308
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         307
+#define __NR_64_Linux_syscalls         308
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
index 520a908d45d62af6d3edb1ef83b2f0f990faa562..423d871a946ba15ae5b5ea70338530949fa8d166 100644 (file)
@@ -4,8 +4,8 @@
 
 extra-y                := head.o vmlinux.lds
 
-obj-y          += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
-                  ptrace.o reset.o setup.o signal.o syscall.o \
+obj-y          += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \
+                  prom.o ptrace.o reset.o setup.o signal.o syscall.o \
                   time.o topology.o traps.o unaligned.o watch.o vdso.o
 
 ifdef CONFIG_FUNCTION_TRACER
@@ -19,15 +19,16 @@ obj-$(CONFIG_CEVT_BCM1480)  += cevt-bcm1480.o
 obj-$(CONFIG_CEVT_R4K)         += cevt-r4k.o
 obj-$(CONFIG_MIPS_MT_SMTC)     += cevt-smtc.o
 obj-$(CONFIG_CEVT_DS1287)      += cevt-ds1287.o
+obj-$(CONFIG_CEVT_GIC)         += cevt-gic.o
 obj-$(CONFIG_CEVT_GT641XX)     += cevt-gt641xx.o
 obj-$(CONFIG_CEVT_SB1250)      += cevt-sb1250.o
 obj-$(CONFIG_CEVT_TXX9)                += cevt-txx9.o
 obj-$(CONFIG_CSRC_BCM1480)     += csrc-bcm1480.o
+obj-$(CONFIG_CSRC_GIC)         += csrc-gic.o
 obj-$(CONFIG_CSRC_IOASIC)      += csrc-ioasic.o
 obj-$(CONFIG_CSRC_POWERTV)     += csrc-powertv.o
 obj-$(CONFIG_CSRC_R4K)         += csrc-r4k.o
 obj-$(CONFIG_CSRC_SB1250)      += csrc-sb1250.o
-obj-$(CONFIG_CSRC_GIC)         += csrc-gic.o
 obj-$(CONFIG_SYNC_R4K)         += sync-r4k.o
 
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
@@ -86,8 +87,6 @@ obj-$(CONFIG_EARLY_PRINTK)    += early_printk.o
 obj-$(CONFIG_SPINLOCK_TEST)    += spinlock_test.o
 obj-$(CONFIG_MIPS_MACHINE)     += mips_machine.o
 
-obj-$(CONFIG_OF)               += prom.o
-
 CFLAGS_cpu-bugs64.o    = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
 
 obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT)  += 8250-platform.o
index 50285b2c7ffe30d5a54d877f7f0cac075dd59f8c..0845091ba480bba5313e32f7e12c2a0b79b83048 100644 (file)
@@ -17,6 +17,8 @@
 #include <asm/ptrace.h>
 #include <asm/processor.h>
 
+#include <linux/kvm_host.h>
+
 void output_ptreg_defines(void)
 {
        COMMENT("MIPS pt_regs offsets.");
@@ -328,3 +330,67 @@ void output_pbe_defines(void)
        BLANK();
 }
 #endif
+
+void output_kvm_defines(void)
+{
+       COMMENT(" KVM/MIPS Specfic offsets. ");
+       DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
+       OFFSET(VCPU_RUN, kvm_vcpu, run);
+       OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
+
+       OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
+       OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
+
+       OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
+       OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
+
+       OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
+       OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
+       OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
+       OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
+
+       OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
+
+       OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
+       OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
+       OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
+       OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
+       OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
+       OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
+       OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
+       OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
+       OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
+       OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
+       OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
+       OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
+       OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
+       OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
+       OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
+       OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
+       OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
+       OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
+       OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
+       OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
+       OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
+       OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
+       OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
+       OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
+       OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
+       OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
+       OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
+       OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
+       OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
+       OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
+       OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
+       OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
+       OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
+       OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
+       OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
+       OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
+       OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
+       OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
+
+       OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
+       OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
+       BLANK();
+}
index 556a4357d7fc69f79b024fd968ef409bcbe20fb8..97c5a1668e5347bb4a7668882931349cfdac08a6 100644 (file)
@@ -48,7 +48,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
        __res;                                                          \
 })
 
+#ifdef CONFIG_KVM_GUEST
+#define TASK32_SIZE            0x3fff8000UL
+#else
 #define TASK32_SIZE            0x7fff8000UL
+#endif
 #undef ELF_ET_DYN_BASE
 #define ELF_ET_DYN_BASE                (TASK32_SIZE / 3 * 2)
 
index 83ffe950f710f86c985d56e9febabb0aafe39d75..46c2ad0703a0b1040140b4a2041c179fdaa45b34 100644 (file)
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
 #include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
 #include <asm/inst.h>
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
+/*
+ * Calculate and return exception PC in case of branch delay slot
+ * for microMIPS and MIPS16e. It does not clear the ISA mode bit.
+ */
+int __isa_exception_epc(struct pt_regs *regs)
+{
+       unsigned short inst;
+       long epc = regs->cp0_epc;
+
+       /* Calculate exception PC in branch delay slot. */
+       if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
+               /* This should never happen because delay slot was checked. */
+               force_sig(SIGSEGV, current);
+               return epc;
+       }
+       if (cpu_has_mips16) {
+               if (((union mips16e_instruction)inst).ri.opcode
+                               == MIPS16e_jal_op)
+                       epc += 4;
+               else
+                       epc += 2;
+       } else if (mm_insn_16bit(inst))
+               epc += 2;
+       else
+               epc += 4;
+
+       return epc;
+}
+
+/*
+ * Compute return address and emulate branch in microMIPS mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __microMIPS_compute_return_epc(struct pt_regs *regs)
+{
+       u16 __user *pc16;
+       u16 halfword;
+       unsigned int word;
+       unsigned long contpc;
+       struct mm_decoded_insn mminsn = { 0 };
+
+       mminsn.micro_mips_mode = 1;
+
+       /* This load never faults. */
+       pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+       __get_user(halfword, pc16);
+       pc16++;
+       contpc = regs->cp0_epc + 2;
+       word = ((unsigned int)halfword << 16);
+       mminsn.pc_inc = 2;
+
+       if (!mm_insn_16bit(halfword)) {
+               __get_user(halfword, pc16);
+               pc16++;
+               contpc = regs->cp0_epc + 4;
+               mminsn.pc_inc = 4;
+               word |= halfword;
+       }
+       mminsn.insn = word;
+
+       if (get_user(halfword, pc16))
+               goto sigsegv;
+       mminsn.next_pc_inc = 2;
+       word = ((unsigned int)halfword << 16);
+
+       if (!mm_insn_16bit(halfword)) {
+               pc16++;
+               if (get_user(halfword, pc16))
+                       goto sigsegv;
+               mminsn.next_pc_inc = 4;
+               word |= halfword;
+       }
+       mminsn.next_insn = word;
+
+       mm_isBranchInstr(regs, mminsn, &contpc);
+
+       regs->cp0_epc = contpc;
+
+       return 0;
+
+sigsegv:
+       force_sig(SIGSEGV, current);
+       return -EFAULT;
+}
+
+/*
+ * Compute return address and emulate branch in MIPS16e mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __MIPS16e_compute_return_epc(struct pt_regs *regs)
+{
+       u16 __user *addr;
+       union mips16e_instruction inst;
+       u16 inst2;
+       u32 fullinst;
+       long epc;
+
+       epc = regs->cp0_epc;
+
+       /* Read the instruction. */
+       addr = (u16 __user *)msk_isa16_mode(epc);
+       if (__get_user(inst.full, addr)) {
+               force_sig(SIGSEGV, current);
+               return -EFAULT;
+       }
+
+       switch (inst.ri.opcode) {
+       case MIPS16e_extend_op:
+               regs->cp0_epc += 4;
+               return 0;
+
+               /*
+                *  JAL and JALX in MIPS16e mode
+                */
+       case MIPS16e_jal_op:
+               addr += 1;
+               if (__get_user(inst2, addr)) {
+                       force_sig(SIGSEGV, current);
+                       return -EFAULT;
+               }
+               fullinst = ((unsigned)inst.full << 16) | inst2;
+               regs->regs[31] = epc + 6;
+               epc += 4;
+               epc >>= 28;
+               epc <<= 28;
+               /*
+                * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
+                *
+                * ......TARGET[15:0].................TARGET[20:16]...........
+                * ......TARGET[25:21]
+                */
+               epc |=
+                   ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
+                   ((fullinst & 0x1f0000) << 7);
+               if (!inst.jal.x)
+                       set_isa16_mode(epc);    /* Set ISA mode bit. */
+               regs->cp0_epc = epc;
+               return 0;
+
+               /*
+                *  J(AL)R(C)
+                */
+       case MIPS16e_rr_op:
+               if (inst.rr.func == MIPS16e_jr_func) {
+
+                       if (inst.rr.ra)
+                               regs->cp0_epc = regs->regs[31];
+                       else
+                               regs->cp0_epc =
+                                   regs->regs[reg16to32[inst.rr.rx]];
+
+                       if (inst.rr.l) {
+                               if (inst.rr.nd)
+                                       regs->regs[31] = epc + 2;
+                               else
+                                       regs->regs[31] = epc + 4;
+                       }
+                       return 0;
+               }
+               break;
+       }
+
+       /*
+        * All other cases have no branch delay slot and are 16-bits.
+        * Branches do not cause an exception.
+        */
+       regs->cp0_epc += 2;
+
+       return 0;
+}
+
 /**
  * __compute_return_epc_for_insn - Computes the return address and do emulate
  *                                 branch simulation, if required.
@@ -129,6 +305,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                epc <<= 28;
                epc |= (insn.j_format.target << 2);
                regs->cp0_epc = epc;
+               if (insn.i_format.opcode == jalx_op)
+                       set_isa16_mode(regs->cp0_epc);
                break;
 
        /*
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
new file mode 100644 (file)
index 0000000..730eaf9
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013  Imagination Technologies Ltd.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+#include <asm/gic.h>
+#include <asm/mips-boards/maltaint.h>
+
+DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
+int gic_timer_irq_installed;
+
+
+static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+       u64 cnt;
+       int res;
+
+       cnt = gic_read_count();
+       cnt += (u64)delta;
+       gic_write_compare(cnt);
+       res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
+       return res;
+}
+
+void gic_set_clock_mode(enum clock_event_mode mode,
+                               struct clock_event_device *evt)
+{
+       /* Nothing to do ...  */
+}
+
+irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *cd;
+       int cpu = smp_processor_id();
+
+       gic_write_compare(gic_read_compare());
+       cd = &per_cpu(gic_clockevent_device, cpu);
+       cd->event_handler(cd);
+       return IRQ_HANDLED;
+}
+
+struct irqaction gic_compare_irqaction = {
+       .handler = gic_compare_interrupt,
+       .flags = IRQF_PERCPU | IRQF_TIMER,
+       .name = "timer",
+};
+
+
+void gic_event_handler(struct clock_event_device *dev)
+{
+}
+
+int __cpuinit gic_clockevent_init(void)
+{
+       unsigned int cpu = smp_processor_id();
+       struct clock_event_device *cd;
+       unsigned int irq;
+
+       if (!cpu_has_counter || !gic_frequency)
+               return -ENXIO;
+
+       irq = MIPS_GIC_IRQ_BASE;
+
+       cd = &per_cpu(gic_clockevent_device, cpu);
+
+       cd->name                = "MIPS GIC";
+       cd->features            = CLOCK_EVT_FEAT_ONESHOT;
+
+       clockevent_set_clock(cd, gic_frequency);
+
+       /* Calculate the min / max delta */
+       cd->max_delta_ns        = clockevent_delta2ns(0x7fffffff, cd);
+       cd->min_delta_ns        = clockevent_delta2ns(0x300, cd);
+
+       cd->rating              = 300;
+       cd->irq                 = irq;
+       cd->cpumask             = cpumask_of(cpu);
+       cd->set_next_event      = gic_next_event;
+       cd->set_mode            = gic_set_clock_mode;
+       cd->event_handler       = gic_event_handler;
+
+       clockevents_register_device(cd);
+
+       GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
+       GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
+
+       if (gic_timer_irq_installed)
+               return 0;
+
+       gic_timer_irq_installed = 1;
+
+       setup_irq(irq, &gic_compare_irqaction);
+       irq_set_handler(irq, handle_percpu_irq);
+       return 0;
+}
index 07b847d77f5da029dca6a6cbb53198807ff781a8..02033eaf8825420eea30105e92edcc795d8b5b70 100644 (file)
@@ -23,7 +23,6 @@
  */
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 static int mips_next_event(unsigned long delta,
                           struct clock_event_device *evt)
 {
@@ -49,7 +48,6 @@ DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
 int cp0_timer_irq_installed;
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 {
        const int r2 = cpu_has_mips_r2;
@@ -74,6 +72,9 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
                /* Clear Count/Compare Interrupt */
                write_c0_compare(read_c0_compare());
                cd = &per_cpu(mips_clockevent_device, cpu);
+#ifdef CONFIG_CEVT_GIC
+               if (!gic_present)
+#endif
                cd->event_handler(cd);
        }
 
@@ -118,6 +119,10 @@ int c0_compare_int_usable(void)
        unsigned int delta;
        unsigned int cnt;
 
+#ifdef CONFIG_KVM_GUEST
+    return 1;
+#endif
+
        /*
         * IP7 already pending?  Try to clear it by acking the timer.
         */
@@ -166,7 +171,6 @@ int c0_compare_int_usable(void)
 }
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 int __cpuinit r4k_clockevent_init(void)
 {
        unsigned int cpu = smp_processor_id();
@@ -206,6 +210,9 @@ int __cpuinit r4k_clockevent_init(void)
        cd->set_mode            = mips_set_clock_mode;
        cd->event_handler       = mips_event_handler;
 
+#ifdef CONFIG_CEVT_GIC
+       if (!gic_present)
+#endif
        clockevents_register_device(cd);
 
        if (cp0_timer_irq_installed)
index 5fe66a0c32245366bc56079eca0a160a7b9bc0cb..c6568bf4b1b05559b43bc18f65e33c84a85963ba 100644 (file)
 #include <asm/spram.h>
 #include <asm/uaccess.h>
 
-/*
- * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
- * the implementation of the "wait" feature differs between CPU families. This
- * points to the function that implements CPU specific wait.
- * The wait instruction stops the pipeline and reduces the power consumption of
- * the CPU very much.
- */
-void (*cpu_wait)(void);
-EXPORT_SYMBOL(cpu_wait);
-
-static void r3081_wait(void)
-{
-       unsigned long cfg = read_c0_conf();
-       write_c0_conf(cfg | R30XX_CONF_HALT);
-}
-
-static void r39xx_wait(void)
-{
-       local_irq_disable();
-       if (!need_resched())
-               write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
-       local_irq_enable();
-}
-
-extern void r4k_wait(void);
-
-/*
- * This variant is preferable as it allows testing need_resched and going to
- * sleep depending on the outcome atomically.  Unfortunately the "It is
- * implementation-dependent whether the pipeline restarts when a non-enabled
- * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
- * using this version a gamble.
- */
-void r4k_wait_irqoff(void)
-{
-       local_irq_disable();
-       if (!need_resched())
-               __asm__("       .set    push            \n"
-                       "       .set    mips3           \n"
-                       "       wait                    \n"
-                       "       .set    pop             \n");
-       local_irq_enable();
-       __asm__("       .globl __pastwait       \n"
-               "__pastwait:                    \n");
-}
-
-/*
- * The RM7000 variant has to handle erratum 38.         The workaround is to not
- * have any pending stores when the WAIT instruction is executed.
- */
-static void rm7k_wait_irqoff(void)
-{
-       local_irq_disable();
-       if (!need_resched())
-               __asm__(
-               "       .set    push                                    \n"
-               "       .set    mips3                                   \n"
-               "       .set    noat                                    \n"
-               "       mfc0    $1, $12                                 \n"
-               "       sync                                            \n"
-               "       mtc0    $1, $12         # stalls until W stage  \n"
-               "       wait                                            \n"
-               "       mtc0    $1, $12         # stalls until W stage  \n"
-               "       .set    pop                                     \n");
-       local_irq_enable();
-}
-
-/*
- * The Au1xxx wait is available only if using 32khz counter or
- * external timer source, but specifically not CP0 Counter.
- * alchemy/common/time.c may override cpu_wait!
- */
-static void au1k_wait(void)
-{
-       __asm__("       .set    mips3                   \n"
-               "       cache   0x14, 0(%0)             \n"
-               "       cache   0x14, 32(%0)            \n"
-               "       sync                            \n"
-               "       nop                             \n"
-               "       wait                            \n"
-               "       nop                             \n"
-               "       nop                             \n"
-               "       nop                             \n"
-               "       nop                             \n"
-               "       .set    mips0                   \n"
-               : : "r" (au1k_wait));
-}
-
-static int __initdata nowait;
-
-static int __init wait_disable(char *s)
-{
-       nowait = 1;
-
-       return 1;
-}
-
-__setup("nowait", wait_disable);
-
 static int __cpuinitdata mips_fpu_disabled;
 
 static int __init fpu_disable(char *s)
@@ -150,105 +51,6 @@ static int __init dsp_disable(char *s)
 
 __setup("nodsp", dsp_disable);
 
-void __init check_wait(void)
-{
-       struct cpuinfo_mips *c = &current_cpu_data;
-
-       if (nowait) {
-               printk("Wait instruction disabled.\n");
-               return;
-       }
-
-       switch (c->cputype) {
-       case CPU_R3081:
-       case CPU_R3081E:
-               cpu_wait = r3081_wait;
-               break;
-       case CPU_TX3927:
-               cpu_wait = r39xx_wait;
-               break;
-       case CPU_R4200:
-/*     case CPU_R4300: */
-       case CPU_R4600:
-       case CPU_R4640:
-       case CPU_R4650:
-       case CPU_R4700:
-       case CPU_R5000:
-       case CPU_R5500:
-       case CPU_NEVADA:
-       case CPU_4KC:
-       case CPU_4KEC:
-       case CPU_4KSC:
-       case CPU_5KC:
-       case CPU_25KF:
-       case CPU_PR4450:
-       case CPU_BMIPS3300:
-       case CPU_BMIPS4350:
-       case CPU_BMIPS4380:
-       case CPU_BMIPS5000:
-       case CPU_CAVIUM_OCTEON:
-       case CPU_CAVIUM_OCTEON_PLUS:
-       case CPU_CAVIUM_OCTEON2:
-       case CPU_JZRISC:
-       case CPU_LOONGSON1:
-       case CPU_XLR:
-       case CPU_XLP:
-               cpu_wait = r4k_wait;
-               break;
-
-       case CPU_RM7000:
-               cpu_wait = rm7k_wait_irqoff;
-               break;
-
-       case CPU_M14KC:
-       case CPU_M14KEC:
-       case CPU_24K:
-       case CPU_34K:
-       case CPU_1004K:
-               cpu_wait = r4k_wait;
-               if (read_c0_config7() & MIPS_CONF7_WII)
-                       cpu_wait = r4k_wait_irqoff;
-               break;
-
-       case CPU_74K:
-               cpu_wait = r4k_wait;
-               if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
-                       cpu_wait = r4k_wait_irqoff;
-               break;
-
-       case CPU_TX49XX:
-               cpu_wait = r4k_wait_irqoff;
-               break;
-       case CPU_ALCHEMY:
-               cpu_wait = au1k_wait;
-               break;
-       case CPU_20KC:
-               /*
-                * WAIT on Rev1.0 has E1, E2, E3 and E16.
-                * WAIT on Rev2.0 and Rev3.0 has E16.
-                * Rev3.1 WAIT is nop, why bother
-                */
-               if ((c->processor_id & 0xff) <= 0x64)
-                       break;
-
-               /*
-                * Another rev is incremeting c0_count at a reduced clock
-                * rate while in WAIT mode.  So we basically have the choice
-                * between using the cp0 timer as clocksource or avoiding
-                * the WAIT instruction.  Until more details are known,
-                * disable the use of WAIT for 20Kc entirely.
-                  cpu_wait = r4k_wait;
-                */
-               break;
-       case CPU_RM9000:
-               if ((c->processor_id & 0x00ff) >= 0x40)
-                       cpu_wait = r4k_wait;
-               break;
-       default:
-               break;
-       }
-}
-
 static inline void check_errata(void)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
@@ -470,6 +272,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
                c->options |= MIPS_CPU_ULRI;
        if (config3 & MIPS_CONF3_ISA)
                c->options |= MIPS_CPU_MICROMIPS;
+#ifdef CONFIG_CPU_MICROMIPS
+       write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE);
+#endif
        if (config3 & MIPS_CONF3_VZ)
                c->ases |= MIPS_ASE_VZ;
 
index 35bed0d2342c9a779ef5d0c12a3a8080938484f7..3be9e7bb30ff05dbfa1c4866d3decba0a69a8715 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/bootmem.h>
 #include <linux/crash_dump.h>
 #include <asm/uaccess.h>
+#include <linux/slab.h>
 
 static int __init parse_savemaxmem(char *p)
 {
index 5dca24bce51bef67320417767e7df8419a983dd7..e026209011178342a6c98bbd709366fb8f3d06d8 100644 (file)
@@ -5,23 +5,14 @@
  *
  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
  */
-#include <linux/clocksource.h>
 #include <linux/init.h>
+#include <linux/time.h>
 
-#include <asm/time.h>
 #include <asm/gic.h>
 
 static cycle_t gic_hpt_read(struct clocksource *cs)
 {
-       unsigned int hi, hi2, lo;
-
-       do {
-               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
-               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
-               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
-       } while (hi2 != hi);
-
-       return (((cycle_t) hi) << 32) + lo;
+       return gic_read_count();
 }
 
 static struct clocksource gic_clocksource = {
index ecb347ce1b3d03edc9cb15c3d5dcaf9bb434da84..31fa856829cbf2620521317e5247d42b9e3fb087 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2001 MIPS Technologies, Inc.
  * Copyright (C) 2002, 2007  Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #include <linux/init.h>
 
 #include <asm/war.h>
 #include <asm/thread_info.h>
 
+#ifdef CONFIG_MIPS_MT_SMTC
 #define PANIC_PIC(msg)                                 \
-               .set push;                              \
+               .set    push;                           \
+               .set    nomicromips;                    \
                .set    reorder;                        \
                PTR_LA  a0,8f;                          \
                .set    noat;                           \
 9:             b       9b;                             \
                .set    pop;                            \
                TEXT(msg)
+#endif
 
        __INIT
 
-NESTED(except_vec0_generic, 0, sp)
-       PANIC_PIC("Exception vector 0 called")
-       END(except_vec0_generic)
-
-NESTED(except_vec1_generic, 0, sp)
-       PANIC_PIC("Exception vector 1 called")
-       END(except_vec1_generic)
-
 /*
  * General exception vector for all other CPUs.
  *
@@ -127,7 +122,7 @@ handle_vcei:
        __FINIT
 
        .align  5       /* 32 byte rollback region */
-LEAF(r4k_wait)
+LEAF(__r4k_wait)
        .set    push
        .set    noreorder
        /* start of rollback region */
@@ -138,20 +133,27 @@ LEAF(r4k_wait)
         nop
        nop
        nop
+#ifdef CONFIG_CPU_MICROMIPS
+       nop
+       nop
+       nop
+       nop
+#endif
        .set    mips3
        wait
        /* end of rollback region (the region size must be power of two) */
-       .set    pop
 1:
        jr      ra
-       END(r4k_wait)
+       nop
+       .set    pop
+       END(__r4k_wait)
 
        .macro  BUILD_ROLLBACK_PROLOGUE handler
        FEXPORT(rollback_\handler)
        .set    push
        .set    noat
        MFC0    k0, CP0_EPC
-       PTR_LA  k1, r4k_wait
+       PTR_LA  k1, __r4k_wait
        ori     k0, 0x1f        /* 32 byte rollback region */
        xori    k0, 0x1f
        bne     k0, k1, 9f
@@ -201,7 +203,11 @@ NESTED(handle_int, PT_SIZE, sp)
        LONG_L  s0, TI_REGS($28)
        LONG_S  sp, TI_REGS($28)
        PTR_LA  ra, ret_from_irq
-       j       plat_irq_dispatch
+       PTR_LA  v0, plat_irq_dispatch
+       jr      v0
+#ifdef CONFIG_CPU_MICROMIPS
+       nop
+#endif
        END(handle_int)
 
        __INIT
@@ -222,11 +228,14 @@ NESTED(except_vec4, 0, sp)
 /*
  * EJTAG debug exception handler.
  * The EJTAG debug exception entry point is 0xbfc00480, which
- * normally is in the boot PROM, so the boot PROM must do a
+ * normally is in the boot PROM, so the boot PROM must do an
  * unconditional jump to this vector.
  */
 NESTED(except_vec_ejtag_debug, 0, sp)
        j       ejtag_debug_handler
+#ifdef CONFIG_CPU_MICROMIPS
+        nop
+#endif
        END(except_vec_ejtag_debug)
 
        __FINIT
@@ -251,9 +260,10 @@ NESTED(except_vec_vi, 0, sp)
 FEXPORT(except_vec_vi_mori)
        ori     a0, $0, 0
 #endif /* CONFIG_MIPS_MT_SMTC */
+       PTR_LA  v1, except_vec_vi_handler
 FEXPORT(except_vec_vi_lui)
        lui     v0, 0           /* Patched */
-       j       except_vec_vi_handler
+       jr      v1
 FEXPORT(except_vec_vi_ori)
         ori    v0, 0           /* Patched */
        .set    pop
@@ -354,6 +364,9 @@ EXPORT(ejtag_debug_buffer)
  */
 NESTED(except_vec_nmi, 0, sp)
        j       nmi_handler
+#ifdef CONFIG_CPU_MICROMIPS
+        nop
+#endif
        END(except_vec_nmi)
 
        __FINIT
@@ -500,13 +513,35 @@ NESTED(nmi_handler, PT_SIZE, sp)
        .set    push
        .set    noat
        .set    noreorder
-       /* 0x7c03e83b: rdhwr v1,$29 */
+       /* MIPS32:    0x7c03e83b: rdhwr v1,$29 */
+       /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
        MFC0    k1, CP0_EPC
-       lui     k0, 0x7c03
-       lw      k1, (k1)
-       ori     k0, 0xe83b
-       .set    reorder
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
+       and     k0, k1, 1
+       beqz    k0, 1f
+       xor     k1, k0
+       lhu     k0, (k1)
+       lhu     k1, 2(k1)
+       ins     k1, k0, 16, 16
+       lui     k0, 0x007d
+       b       docheck
+       ori     k0, 0x6b3c
+1:
+       lui     k0, 0x7c03
+       lw      k1, (k1)
+       ori     k0, 0xe83b
+#else
+       andi    k0, k1, 1
+       bnez    k0, handle_ri
+       lui     k0, 0x7c03
+       lw      k1, (k1)
+       ori     k0, 0xe83b
+#endif
+       .set    reorder
+docheck:
        bne     k0, k1, handle_ri       /* if not ours */
+
+isrdhwr:
        /* The insn is rdhwr.  No need to check CAUSE.BD here. */
        get_saved_sp    /* k1 := current_thread_info */
        .set    noreorder
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
new file mode 100644 (file)
index 0000000..3b09b88
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * MIPS idle loop and WAIT instruction support.
+ *
+ * Copyright (C) xxxx  the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004  Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012         MIPS Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/idle.h>
+#include <asm/mipsregs.h>
+
+/*
+ * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
+ * the implementation of the "wait" feature differs between CPU families. This
+ * points to the function that implements CPU specific wait.
+ * The wait instruction stops the pipeline and reduces the power consumption of
+ * the CPU very much.
+ */
+void (*cpu_wait)(void);
+EXPORT_SYMBOL(cpu_wait);
+
+static void r3081_wait(void)
+{
+       unsigned long cfg = read_c0_conf();
+       write_c0_conf(cfg | R30XX_CONF_HALT);
+       local_irq_enable();
+}
+
+static void r39xx_wait(void)
+{
+       if (!need_resched())
+               write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
+       local_irq_enable();
+}
+
+void r4k_wait(void)
+{
+       local_irq_enable();
+       __r4k_wait();
+}
+
+/*
+ * This variant is preferable as it allows testing need_resched and going to
+ * sleep depending on the outcome atomically.  Unfortunately the "It is
+ * implementation-dependent whether the pipeline restarts when a non-enabled
+ * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
+ * using this version a gamble.
+ */
+void r4k_wait_irqoff(void)
+{
+       if (!need_resched())
+               __asm__(
+               "       .set    push            \n"
+               "       .set    mips3           \n"
+               "       wait                    \n"
+               "       .set    pop             \n");
+       local_irq_enable();
+       __asm__(
+       "       .globl __pastwait       \n"
+       "__pastwait:                    \n");
+}
+
+/*
+ * The RM7000 variant has to handle erratum 38.         The workaround is to not
+ * have any pending stores when the WAIT instruction is executed.
+ */
+static void rm7k_wait_irqoff(void)
+{
+       if (!need_resched())
+               __asm__(
+               "       .set    push                                    \n"
+               "       .set    mips3                                   \n"
+               "       .set    noat                                    \n"
+               "       mfc0    $1, $12                                 \n"
+               "       sync                                            \n"
+               "       mtc0    $1, $12         # stalls until W stage  \n"
+               "       wait                                            \n"
+               "       mtc0    $1, $12         # stalls until W stage  \n"
+               "       .set    pop                                     \n");
+       local_irq_enable();
+}
+
+/*
+ * The Au1xxx wait is available only if using 32khz counter or
+ * external timer source, but specifically not CP0 Counter.
+ * alchemy/common/time.c may override cpu_wait!
+ */
+static void au1k_wait(void)
+{
+       __asm__(
+       "       .set    mips3                   \n"
+       "       cache   0x14, 0(%0)             \n"
+       "       cache   0x14, 32(%0)            \n"
+       "       sync                            \n"
+       "       nop                             \n"
+       "       wait                            \n"
+       "       nop                             \n"
+       "       nop                             \n"
+       "       nop                             \n"
+       "       nop                             \n"
+       "       .set    mips0                   \n"
+       : : "r" (au1k_wait));
+       local_irq_enable();
+}
+
+static int __initdata nowait;
+
+static int __init wait_disable(char *s)
+{
+       nowait = 1;
+
+       return 1;
+}
+
+__setup("nowait", wait_disable);
+
+void __init check_wait(void)
+{
+       struct cpuinfo_mips *c = &current_cpu_data;
+
+       if (nowait) {
+               printk("Wait instruction disabled.\n");
+               return;
+       }
+
+       switch (c->cputype) {
+       case CPU_R3081:
+       case CPU_R3081E:
+               cpu_wait = r3081_wait;
+               break;
+       case CPU_TX3927:
+               cpu_wait = r39xx_wait;
+               break;
+       case CPU_R4200:
+/*     case CPU_R4300: */
+       case CPU_R4600:
+       case CPU_R4640:
+       case CPU_R4650:
+       case CPU_R4700:
+       case CPU_R5000:
+       case CPU_R5500:
+       case CPU_NEVADA:
+       case CPU_4KC:
+       case CPU_4KEC:
+       case CPU_4KSC:
+       case CPU_5KC:
+       case CPU_25KF:
+       case CPU_PR4450:
+       case CPU_BMIPS3300:
+       case CPU_BMIPS4350:
+       case CPU_BMIPS4380:
+       case CPU_BMIPS5000:
+       case CPU_CAVIUM_OCTEON:
+       case CPU_CAVIUM_OCTEON_PLUS:
+       case CPU_CAVIUM_OCTEON2:
+       case CPU_JZRISC:
+       case CPU_LOONGSON1:
+       case CPU_XLR:
+       case CPU_XLP:
+               cpu_wait = r4k_wait;
+               break;
+
+       case CPU_RM7000:
+               cpu_wait = rm7k_wait_irqoff;
+               break;
+
+       case CPU_M14KC:
+       case CPU_M14KEC:
+       case CPU_24K:
+       case CPU_34K:
+       case CPU_1004K:
+               cpu_wait = r4k_wait;
+               if (read_c0_config7() & MIPS_CONF7_WII)
+                       cpu_wait = r4k_wait_irqoff;
+               break;
+
+       case CPU_74K:
+               cpu_wait = r4k_wait;
+               if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
+                       cpu_wait = r4k_wait_irqoff;
+               break;
+
+       case CPU_TX49XX:
+               cpu_wait = r4k_wait_irqoff;
+               break;
+       case CPU_ALCHEMY:
+               cpu_wait = au1k_wait;
+               break;
+       case CPU_20KC:
+               /*
+                * WAIT on Rev1.0 has E1, E2, E3 and E16.
+                * WAIT on Rev2.0 and Rev3.0 has E16.
+                * Rev3.1 WAIT is nop, why bother
+                */
+               if ((c->processor_id & 0xff) <= 0x64)
+                       break;
+
+               /*
+                * Another rev is incremeting c0_count at a reduced clock
+                * rate while in WAIT mode.  So we basically have the choice
+                * between using the cp0 timer as clocksource or avoiding
+                * the WAIT instruction.  Until more details are known,
+                * disable the use of WAIT for 20Kc entirely.
+                  cpu_wait = r4k_wait;
+                */
+               break;
+       case CPU_RM9000:
+               if ((c->processor_id & 0x00ff) >= 0x40)
+                       cpu_wait = r4k_wait;
+               break;
+       default:
+               break;
+       }
+}
+
+static void smtc_idle_hook(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       void smtc_idle_loop_hook(void);
+
+       smtc_idle_loop_hook();
+#endif
+}
+
+void arch_cpu_idle(void)
+{
+       smtc_idle_hook();
+       if (cpu_wait)
+               cpu_wait();
+       else
+               local_irq_enable();
+}
index 485e6a961b317ea0f6778b04e57c8c08d3ad8c02..c01b307317a9635b88394d18164710262e206234 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/init.h>
 #include <linux/smp.h>
 #include <linux/irq.h>
+#include <linux/clocksource.h>
 
 #include <asm/io.h>
 #include <asm/gic.h>
@@ -19,6 +20,8 @@
 #include <linux/hardirq.h>
 #include <asm-generic/bitops/find.h>
 
+unsigned int gic_frequency;
+unsigned int gic_present;
 unsigned long _gic_base;
 unsigned int gic_irq_base;
 unsigned int gic_irq_flags[GIC_NUM_INTRS];
@@ -30,6 +33,39 @@ static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
 static struct gic_pending_regs pending_regs[NR_CPUS];
 static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
 
+#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
+cycle_t gic_read_count(void)
+{
+       unsigned int hi, hi2, lo;
+
+       do {
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
+       } while (hi2 != hi);
+
+       return (((cycle_t) hi) << 32) + lo;
+}
+
+void gic_write_compare(cycle_t cnt)
+{
+       GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
+                               (int)(cnt >> 32));
+       GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
+                               (int)(cnt & 0xffffffff));
+}
+
+cycle_t gic_read_compare(void)
+{
+       unsigned int hi, lo;
+
+       GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
+       GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
+
+       return (((cycle_t) hi) << 32) + lo;
+}
+#endif
+
 unsigned int gic_get_timer_pending(void)
 {
        unsigned int vpe_pending;
@@ -116,6 +152,17 @@ static void __init vpe_local_setup(unsigned int numvpes)
        }
 }
 
+unsigned int gic_compare_int(void)
+{
+       unsigned int pending;
+
+       GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
+       if (pending & GIC_VPE_PEND_CMP_MSK)
+               return 1;
+       else
+               return 0;
+}
+
 unsigned int gic_get_int(void)
 {
        unsigned int i;
index 12bc4ebdf55b6bf5e087532feafcaddca94b417e..1f8187ab0997be478f69b66eadb8ac4a6bf7e489 100644 (file)
@@ -207,7 +207,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
 
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
-       free_insn_slot(p->ainsn.insn, 0);
+       if (p->ainsn.insn) {
+               free_insn_slot(p->ainsn.insn, 0);
+               p->ainsn.insn = NULL;
+       }
 }
 
 static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
index d1d576b765f5ecdd86e19c63ade4634d326687fe..0b29646bcee770533e4a0d25cf1a50b4cc167b7b 100644 (file)
@@ -165,10 +165,3 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
        return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
                             merge_64(len_a4, len_a5));
 }
-
-SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
-               u64, a3, u64, a4, int, dfd, const char  __user *, pathname)
-{
-       return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
-                                dfd, pathname);
-}
index 411a058d2c536c7709edfcb9476b4fa185034942..876097529697250254b9c3fd58492348703381e3 100644 (file)
@@ -11,9 +11,9 @@
 #include <linux/slab.h>
 
 #include <asm/mips_machine.h>
+#include <asm/prom.h>
 
 static struct mips_machine *mips_machine __initdata;
-static char *mips_machine_name = "Unknown";
 
 #define for_each_machine(mach) \
        for ((mach) = (struct mips_machine *)&__mips_machines_start; \
@@ -21,25 +21,6 @@ static char *mips_machine_name = "Unknown";
             (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \
             (mach)++)
 
-__init void mips_set_machine_name(const char *name)
-{
-       char *p;
-
-       if (name == NULL)
-               return;
-
-       p = kstrdup(name, GFP_KERNEL);
-       if (!p)
-               pr_err("MIPS: no memory for machine_name\n");
-
-       mips_machine_name = p;
-}
-
-char *mips_get_machine_name(void)
-{
-       return mips_machine_name;
-}
-
 __init int mips_machtype_setup(char *id)
 {
        struct mips_machine *mach;
@@ -79,7 +60,6 @@ __init void mips_machine_setup(void)
                return;
 
        mips_set_machine_name(mips_machine->mach_name);
-       pr_info("MIPS: machine is %s\n", mips_machine_name);
 
        if (mips_machine->mach_setup)
                mips_machine->mach_setup();
index 7a54f74b7818ad402a70221eb2090dc91f5643f8..acb34373679e21f9940f62f251e64dc1e32e5f17 100644 (file)
 #include <asm/bootinfo.h>
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
+#include <asm/idle.h>
 #include <asm/mipsregs.h>
 #include <asm/processor.h>
-#include <asm/mips_machine.h>
+#include <asm/prom.h>
 
 unsigned int vced_count, vcei_count;
 
@@ -99,6 +100,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (cpu_has_vz)         seq_printf(m, "%s", " vz");
        seq_printf(m, "\n");
 
+       if (cpu_has_mmips) {
+               seq_printf(m, "micromips kernel\t: %s\n",
+                     (read_c0_config3() & MIPS_CONF3_ISA_OE) ?  "yes" : "no");
+       }
        seq_printf(m, "shadow register sets\t: %d\n",
                      cpu_data[n].srsets);
        seq_printf(m, "kscratch registers\t: %d\n",
index cfc742d75b7f3a74f1ace85269d55e0e0b85a48d..c6a041d9d05d57fcd71f91a228c86524e00efd08 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2013  Imagination Technologies Ltd.
  */
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -50,19 +51,6 @@ void arch_cpu_idle_dead(void)
 }
 #endif
 
-void arch_cpu_idle(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
-       extern void smtc_idle_loop_hook(void);
-
-       smtc_idle_loop_hook();
-#endif
-       if (cpu_wait)
-               (*cpu_wait)();
-       else
-               local_irq_enable();
-}
-
 asmlinkage void ret_from_fork(void);
 asmlinkage void ret_from_kernel_thread(void);
 
@@ -223,36 +211,122 @@ struct mips_frame_info {
        int             pc_offset;
 };
 
+#define J_TARGET(pc,target)    \
+               (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
+
 static inline int is_ra_save_ins(union mips_instruction *ip)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       union mips_instruction mmi;
+
+       /*
+        * swsp ra,offset
+        * swm16 reglist,offset(sp)
+        * swm32 reglist,offset(sp)
+        * sw32 ra,offset(sp)
+        * jradiussp - NOT SUPPORTED
+        *
+        * microMIPS is way more fun...
+        */
+       if (mm_insn_16bit(ip->halfword[0])) {
+               mmi.word = (ip->halfword[0] << 16);
+               return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
+                        mmi.mm16_r5_format.rt == 31) ||
+                       (mmi.mm16_m_format.opcode == mm_pool16c_op &&
+                        mmi.mm16_m_format.func == mm_swm16_op));
+       }
+       else {
+               mmi.halfword[0] = ip->halfword[1];
+               mmi.halfword[1] = ip->halfword[0];
+               return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
+                        mmi.mm_m_format.rd > 9 &&
+                        mmi.mm_m_format.base == 29 &&
+                        mmi.mm_m_format.func == mm_swm32_func) ||
+                       (mmi.i_format.opcode == mm_sw32_op &&
+                        mmi.i_format.rs == 29 &&
+                        mmi.i_format.rt == 31));
+       }
+#else
        /* sw / sd $ra, offset($sp) */
        return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
                ip->i_format.rs == 29 &&
                ip->i_format.rt == 31;
+#endif
 }
 
-static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
+static inline int is_jump_ins(union mips_instruction *ip)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * jr16,jrc,jalr16,jalr16
+        * jal
+        * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
+        * jraddiusp - NOT SUPPORTED
+        *
+        * microMIPS is kind of more fun...
+        */
+       union mips_instruction mmi;
+
+       mmi.word = (ip->halfword[0] << 16);
+
+       if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
+           (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
+           ip->j_format.opcode == mm_jal32_op)
+               return 1;
+       if (ip->r_format.opcode != mm_pool32a_op ||
+                       ip->r_format.func != mm_pool32axf_op)
+               return 0;
+       return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
+#else
+       if (ip->j_format.opcode == j_op)
+               return 1;
        if (ip->j_format.opcode == jal_op)
                return 1;
        if (ip->r_format.opcode != spec_op)
                return 0;
        return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
+#endif
 }
 
 static inline int is_sp_move_ins(union mips_instruction *ip)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * addiusp -imm
+        * addius5 sp,-imm
+        * addiu32 sp,sp,-imm
+        * jradiussp - NOT SUPPORTED
+        *
+        * microMIPS is not more fun...
+        */
+       if (mm_insn_16bit(ip->halfword[0])) {
+               union mips_instruction mmi;
+
+               mmi.word = (ip->halfword[0] << 16);
+               return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
+                        mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
+                       (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
+                        mmi.mm16_r5_format.rt == 29));
+       }
+       return (ip->mm_i_format.opcode == mm_addiu32_op &&
+                ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
+#else
        /* addiu/daddiu sp,sp,-imm */
        if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
                return 0;
        if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
                return 1;
+#endif
        return 0;
 }
 
 static int get_frame_info(struct mips_frame_info *info)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       union mips_instruction *ip = (void *) (((char *) info->func) - 1);
+#else
        union mips_instruction *ip = info->func;
+#endif
        unsigned max_insns = info->func_size / sizeof(union mips_instruction);
        unsigned i;
 
@@ -268,11 +342,30 @@ static int get_frame_info(struct mips_frame_info *info)
 
        for (i = 0; i < max_insns; i++, ip++) {
 
-               if (is_jal_jalr_jr_ins(ip))
+               if (is_jump_ins(ip))
                        break;
                if (!info->frame_size) {
                        if (is_sp_move_ins(ip))
+                       {
+#ifdef CONFIG_CPU_MICROMIPS
+                               if (mm_insn_16bit(ip->halfword[0]))
+                               {
+                                       unsigned short tmp;
+
+                                       if (ip->halfword[0] & mm_addiusp_func)
+                                       {
+                                               tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
+                                               info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
+                                       } else {
+                                               tmp = (ip->halfword[0] >> 1);
+                                               info->frame_size = -(signed short)(tmp & 0xf);
+                                       }
+                                       ip = (void *) &ip->halfword[1];
+                                       ip--;
+                               } else
+#endif
                                info->frame_size = - ip->i_format.simmediate;
+                       }
                        continue;
                }
                if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
@@ -292,15 +385,42 @@ static int get_frame_info(struct mips_frame_info *info)
 
 static struct mips_frame_info schedule_mfi __read_mostly;
 
+#ifdef CONFIG_KALLSYMS
+static unsigned long get___schedule_addr(void)
+{
+       return kallsyms_lookup_name("__schedule");
+}
+#else
+static unsigned long get___schedule_addr(void)
+{
+       union mips_instruction *ip = (void *)schedule;
+       int max_insns = 8;
+       int i;
+
+       for (i = 0; i < max_insns; i++, ip++) {
+               if (ip->j_format.opcode == j_op)
+                       return J_TARGET(ip, ip->j_format.target);
+       }
+       return 0;
+}
+#endif
+
 static int __init frame_info_init(void)
 {
        unsigned long size = 0;
 #ifdef CONFIG_KALLSYMS
        unsigned long ofs;
+#endif
+       unsigned long addr;
+
+       addr = get___schedule_addr();
+       if (!addr)
+               addr = (unsigned long)schedule;
 
-       kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs);
+#ifdef CONFIG_KALLSYMS
+       kallsyms_lookup_size_offset(addr, &size, &ofs);
 #endif
-       schedule_mfi.func = schedule;
+       schedule_mfi.func = (void *)addr;
        schedule_mfi.func_size = size;
 
        get_frame_info(&schedule_mfi);
index 028f6f837ef9c975697cd763fa685126a5be6b48..5712bb5322454f1a98d169e58490e77b62e149be 100644 (file)
 #include <asm/page.h>
 #include <asm/prom.h>
 
+static char mips_machine_name[64] = "Unknown";
+
+__init void mips_set_machine_name(const char *name)
+{
+       if (name == NULL)
+               return;
+
+       strncpy(mips_machine_name, name, sizeof(mips_machine_name));
+       pr_info("MIPS: machine is %s\n", mips_get_machine_name());
+}
+
+char *mips_get_machine_name(void)
+{
+       return mips_machine_name;
+}
+
+#ifdef CONFIG_OF
 int __init early_init_dt_scan_memory_arch(unsigned long node,
                                          const char *uname, int depth,
                                          void *data)
@@ -50,6 +67,18 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start,
 }
 #endif
 
+int __init early_init_dt_scan_model(unsigned long node,        const char *uname,
+                                   int depth, void *data)
+{
+       if (!depth) {
+               char *model = of_get_flat_dt_prop(node, "model", NULL);
+
+               if (model)
+                       mips_set_machine_name(model);
+       }
+       return 0;
+}
+
 void __init early_init_devtree(void *params)
 {
        /* Setup flat device-tree pointer */
@@ -65,6 +94,9 @@ void __init early_init_devtree(void *params)
        /* Scan memory nodes */
        of_scan_flat_dt(early_init_dt_scan_root, NULL);
        of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
+
+       /* try to load the mips machine name */
+       of_scan_flat_dt(early_init_dt_scan_model, NULL);
 }
 
 void __init __dt_setup_arch(struct boot_param_header *bph)
@@ -79,3 +111,4 @@ void __init __dt_setup_arch(struct boot_param_header *bph)
 
        early_init_devtree(initial_boot_params);
 }
+#endif
index 9ea29649fc28b6c119e965e750fe77efeaef77ec..9b36424b03c5f41aa48312c770f90e69a43f6fae 100644 (file)
@@ -138,9 +138,18 @@ stackargs:
 5:     jr      t1
         sw     t5, 16(sp)              # argument #5 to ksp
 
+#ifdef CONFIG_CPU_MICROMIPS
        sw      t8, 28(sp)              # argument #8 to ksp
+       nop
        sw      t7, 24(sp)              # argument #7 to ksp
+       nop
        sw      t6, 20(sp)              # argument #6 to ksp
+       nop
+#else
+       sw      t8, 28(sp)              # argument #8 to ksp
+       sw      t7, 24(sp)              # argument #7 to ksp
+       sw      t6, 20(sp)              # argument #6 to ksp
+#endif
 6:     j       stack_done              # go back
         nop
        .set    pop
index 36cfd4060e1f423eed05869a1adc9591f36ab662..97a5909a61cf0c623dfdf8284eaf11bf13e7adf7 100644 (file)
@@ -423,4 +423,5 @@ sys_call_table:
        PTR     sys_process_vm_writev           /* 5305 */
        PTR     sys_kcmp
        PTR     sys_finit_module
+       PTR     sys_getdents64
        .size   sys_call_table,.-sys_call_table
index 103bfe570fe8ee375d94732b074efee7723ec550..74f485d3c0ef41bb7f73204f06a7a801d0465f13 100644 (file)
@@ -529,7 +529,7 @@ sys_call_table:
        PTR     sys_accept4
        PTR     compat_sys_recvmmsg             /* 4335 */
        PTR     sys_fanotify_init
-       PTR     sys_32_fanotify_mark
+       PTR     compat_sys_fanotify_mark
        PTR     sys_prlimit64
        PTR     sys_name_to_handle_at
        PTR     compat_sys_open_by_handle_at    /* 4340 */
index 4c774d5d50874ab515e023809ffd48fcb611c053..c7f90519e58ce0ade98dd826752fbaa1d2287da0 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/pfn.h>
 #include <linux/debugfs.h>
 #include <linux/kexec.h>
+#include <linux/sizes.h>
 
 #include <asm/addrspace.h>
 #include <asm/bootinfo.h>
@@ -77,6 +78,8 @@ EXPORT_SYMBOL(mips_io_port_base);
 static struct resource code_resource = { .name = "Kernel code", };
 static struct resource data_resource = { .name = "Kernel data", };
 
+static void *detect_magic __initdata = detect_memory_region;
+
 void __init add_memory_region(phys_t start, phys_t size, long type)
 {
        int x = boot_mem_map.nr_map;
@@ -122,6 +125,25 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
        boot_mem_map.nr_map++;
 }
 
+void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max)
+{
+       void *dm = &detect_magic;
+       phys_t size;
+
+       for (size = sz_min; size < sz_max; size <<= 1) {
+               if (!memcmp(dm, dm + size, sizeof(detect_magic)))
+                       break;
+       }
+
+       pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
+               ((unsigned long long) size) / SZ_1M,
+               (unsigned long long) start,
+               ((unsigned long long) sz_min) / SZ_1M,
+               ((unsigned long long) sz_max) / SZ_1M);
+
+       add_memory_region(start, size, BOOT_MEM_RAM);
+}
+
 static void __init print_memory_map(void)
 {
        int i;
index b5e88fd832775009a774b727bfcb489667e3cca8..fd3ef2c2afbc37732d9bedcb9fff59d1dda935ea 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/war.h>
 #include <asm/vdso.h>
 #include <asm/dsp.h>
+#include <asm/inst.h>
 
 #include "signal-common.h"
 
@@ -480,7 +481,15 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
        sigset_t *oldset = sigmask_to_save();
        int ret;
        struct mips_abi *abi = current->thread.abi;
+#ifdef CONFIG_CPU_MICROMIPS
+       void *vdso;
+       unsigned int tmp = (unsigned int)current->mm->context.vdso;
+
+       set_isa16_mode(tmp);
+       vdso = (void *)tmp;
+#else
        void *vdso = current->mm->context.vdso;
+#endif
 
        if (regs->regs[0]) {
                switch(regs->regs[2]) {
index bfede063d96a52e3dd8f12aedf67f15468d7c3aa..3e5164c11cacabe66ea021cb29c979b93d920e00 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/mips_mt.h>
+#include <asm/gic.h>
 
 static void __init smvp_copy_vpe_config(void)
 {
@@ -151,8 +152,6 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
 static void __cpuinit vsmp_init_secondary(void)
 {
 #ifdef CONFIG_IRQ_GIC
-       extern int gic_present;
-
        /* This is Malta specific: IPI,performance and timer interrupts */
        if (gic_present)
                change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
index aee04af213c5895d5b273b4f17fe896621707705..6e7862ab46cc4a6fef3c31e1ade85e04b357824f 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/atomic.h>
 #include <asm/cpu.h>
 #include <asm/processor.h>
+#include <asm/idle.h>
 #include <asm/r4k-timer.h>
 #include <asm/mmu_context.h>
 #include <asm/time.h>
@@ -83,6 +84,7 @@ static inline void set_cpu_sibling_map(int cpu)
 }
 
 struct plat_smp_ops *mp_ops;
+EXPORT_SYMBOL(mp_ops);
 
 __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
 {
index 76016ac0a9c8818960e61f055ac47e4d55a9bdf2..2866863a39df269da6ba9710acf96808197e33ea 100644 (file)
@@ -49,6 +49,9 @@ CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
        .text
        .align 5
 FEXPORT(__smtc_ipi_vector)
+#ifdef CONFIG_CPU_MICROMIPS
+       nop
+#endif
        .set    noat
        /* Disable thread scheduling to make Status update atomic */
        DMT     27                                      # dmt   k1
index 7186222dc5bb285a4ff74a23a5851dd03b9356c1..75a4fd709841a9df42d4eba3218e053f7bf8c351 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/hardirq.h>
 #include <asm/hazards.h>
 #include <asm/irq.h>
+#include <asm/idle.h>
 #include <asm/mmu_context.h>
 #include <asm/mipsregs.h>
 #include <asm/cacheflush.h>
@@ -858,7 +859,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
        unsigned long flags;
        int mtflags;
        unsigned long tcrestart;
-       extern void r4k_wait_irqoff(void), __pastwait(void);
        int set_resched_flag = (type == LINUX_SMP_IPI &&
                                action == SMP_RESCHEDULE_YOURSELF);
 
@@ -914,8 +914,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
                         */
                        if (cpu_wait == r4k_wait_irqoff) {
                                tcrestart = read_tc_c0_tcrestart();
-                               if (tcrestart >= (unsigned long)r4k_wait_irqoff
-                                   && tcrestart < (unsigned long)__pastwait) {
+                               if (address_is_in_r4k_wait_irqoff(tcrestart)) {
                                        write_tc_c0_tcrestart(__pastwait);
                                        tcstatus &= ~TCSTATUS_IXMT;
                                        write_tc_c0_tcstatus(tcstatus);
index 25225515451f8f348d4f2ffa91a4c64c9b004a82..e3be67012d7882d23d657841d4bc7d436839c648 100644 (file)
@@ -8,8 +8,8 @@
  * Copyright (C) 1998 Ulf Carlsson
  * Copyright (C) 1999 Silicon Graphics, Inc.
  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000, 01 MIPS Technologies, Inc.
  * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #include <linux/bug.h>
 #include <linux/compiler.h>
@@ -41,6 +41,7 @@
 #include <asm/dsp.h>
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
+#include <asm/idle.h>
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/module.h>
 #include <asm/uasm.h>
 
 extern void check_wait(void);
-extern asmlinkage void r4k_wait(void);
 extern asmlinkage void rollback_handle_int(void);
 extern asmlinkage void handle_int(void);
-extern asmlinkage void handle_tlbm(void);
-extern asmlinkage void handle_tlbl(void);
-extern asmlinkage void handle_tlbs(void);
+extern u32 handle_tlbl[];
+extern u32 handle_tlbs[];
+extern u32 handle_tlbm[];
 extern asmlinkage void handle_adel(void);
 extern asmlinkage void handle_ades(void);
 extern asmlinkage void handle_ibe(void);
@@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void);
 extern asmlinkage void handle_mcheck(void);
 extern asmlinkage void handle_reserved(void);
 
-extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
-                                   struct mips_fpu_struct *ctx, int has_fpu,
-                                   void *__user *fault_addr);
-
 void (*board_be_init)(void);
 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 void (*board_nmi_handler_setup)(void);
@@ -482,6 +478,12 @@ asmlinkage void do_be(struct pt_regs *regs)
 #define SYNC   0x0000000f
 #define RDHWR  0x0000003b
 
+/*  microMIPS definitions   */
+#define MM_POOL32A_FUNC 0xfc00ffff
+#define MM_RDHWR        0x00006b3c
+#define MM_RS           0x001f0000
+#define MM_RT           0x03e00000
+
 /*
  * The ll_bit is cleared by r*_switch.S
  */
@@ -596,42 +598,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
  * Simulate trapping 'rdhwr' instructions to provide user accessible
  * registers not implemented in hardware.
  */
-static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
+static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 {
        struct thread_info *ti = task_thread_info(current);
 
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+                       1, regs, 0);
+       switch (rd) {
+       case 0:         /* CPU number */
+               regs->regs[rt] = smp_processor_id();
+               return 0;
+       case 1:         /* SYNCI length */
+               regs->regs[rt] = min(current_cpu_data.dcache.linesz,
+                                    current_cpu_data.icache.linesz);
+               return 0;
+       case 2:         /* Read count register */
+               regs->regs[rt] = read_c0_count();
+               return 0;
+       case 3:         /* Count register resolution */
+               switch (current_cpu_data.cputype) {
+               case CPU_20KC:
+               case CPU_25KF:
+                       regs->regs[rt] = 1;
+                       break;
+               default:
+                       regs->regs[rt] = 2;
+               }
+               return 0;
+       case 29:
+               regs->regs[rt] = ti->tp_value;
+               return 0;
+       default:
+               return -1;
+       }
+}
+
+static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
+{
        if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
                int rd = (opcode & RD) >> 11;
                int rt = (opcode & RT) >> 16;
-               perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
-                               1, regs, 0);
-               switch (rd) {
-               case 0:         /* CPU number */
-                       regs->regs[rt] = smp_processor_id();
-                       return 0;
-               case 1:         /* SYNCI length */
-                       regs->regs[rt] = min(current_cpu_data.dcache.linesz,
-                                            current_cpu_data.icache.linesz);
-                       return 0;
-               case 2:         /* Read count register */
-                       regs->regs[rt] = read_c0_count();
-                       return 0;
-               case 3:         /* Count register resolution */
-                       switch (current_cpu_data.cputype) {
-                       case CPU_20KC:
-                       case CPU_25KF:
-                               regs->regs[rt] = 1;
-                               break;
-                       default:
-                               regs->regs[rt] = 2;
-                       }
-                       return 0;
-               case 29:
-                       regs->regs[rt] = ti->tp_value;
-                       return 0;
-               default:
-                       return -1;
-               }
+
+               simulate_rdhwr(regs, rd, rt);
+               return 0;
+       }
+
+       /* Not ours.  */
+       return -1;
+}
+
+static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
+{
+       if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
+               int rd = (opcode & MM_RS) >> 16;
+               int rt = (opcode & MM_RT) >> 21;
+               simulate_rdhwr(regs, rd, rt);
+               return 0;
        }
 
        /* Not ours.  */
@@ -662,7 +684,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
        force_sig_info(SIGFPE, &info, current);
 }
 
-static int process_fpemu_return(int sig, void __user *fault_addr)
+int process_fpemu_return(int sig, void __user *fault_addr)
 {
        if (sig == SIGSEGV || sig == SIGBUS) {
                struct siginfo si = {0};
@@ -813,9 +835,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
 asmlinkage void do_bp(struct pt_regs *regs)
 {
        unsigned int opcode, bcode;
-
-       if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
-               goto out_sigsegv;
+       unsigned long epc;
+       u16 instr[2];
+
+       if (get_isa16_mode(regs->cp0_epc)) {
+               /* Calculate EPC. */
+               epc = exception_epc(regs);
+               if (cpu_has_mmips) {
+                       if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
+                           (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
+                               goto out_sigsegv;
+                   opcode = (instr[0] << 16) | instr[1];
+               } else {
+                   /* MIPS16e mode */
+                   if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
+                               goto out_sigsegv;
+                   bcode = (instr[0] >> 6) & 0x3f;
+                   do_trap_or_bp(regs, bcode, "Break");
+                   return;
+               }
+       } else {
+               if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+                       goto out_sigsegv;
+       }
 
        /*
         * There is the ancient bug in the MIPS assemblers that the break
@@ -856,13 +898,22 @@ asmlinkage void do_bp(struct pt_regs *regs)
 asmlinkage void do_tr(struct pt_regs *regs)
 {
        unsigned int opcode, tcode = 0;
+       u16 instr[2];
+       unsigned long epc = exception_epc(regs);
 
-       if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
-               goto out_sigsegv;
+       if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
+               (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
+                       goto out_sigsegv;
+       opcode = (instr[0] << 16) | instr[1];
 
        /* Immediate versions don't provide a code.  */
-       if (!(opcode & OPCODE))
-               tcode = ((opcode >> 6) & ((1 << 10) - 1));
+       if (!(opcode & OPCODE)) {
+               if (get_isa16_mode(regs->cp0_epc))
+                       /* microMIPS */
+                       tcode = (opcode >> 12) & 0x1f;
+               else
+                       tcode = ((opcode >> 6) & ((1 << 10) - 1));
+       }
 
        do_trap_or_bp(regs, tcode, "Trap");
        return;
@@ -875,6 +926,7 @@ asmlinkage void do_ri(struct pt_regs *regs)
 {
        unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
        unsigned long old_epc = regs->cp0_epc;
+       unsigned long old31 = regs->regs[31];
        unsigned int opcode = 0;
        int status = -1;
 
@@ -887,23 +939,37 @@ asmlinkage void do_ri(struct pt_regs *regs)
        if (unlikely(compute_return_epc(regs) < 0))
                return;
 
-       if (unlikely(get_user(opcode, epc) < 0))
-               status = SIGSEGV;
+       if (get_isa16_mode(regs->cp0_epc)) {
+               unsigned short mmop[2] = { 0 };
+
+               if (unlikely(get_user(mmop[0], epc) < 0))
+                       status = SIGSEGV;
+               if (unlikely(get_user(mmop[1], epc) < 0))
+                       status = SIGSEGV;
+               opcode = (mmop[0] << 16) | mmop[1];
 
-       if (!cpu_has_llsc && status < 0)
-               status = simulate_llsc(regs, opcode);
+               if (status < 0)
+                       status = simulate_rdhwr_mm(regs, opcode);
+       } else {
+               if (unlikely(get_user(opcode, epc) < 0))
+                       status = SIGSEGV;
 
-       if (status < 0)
-               status = simulate_rdhwr(regs, opcode);
+               if (!cpu_has_llsc && status < 0)
+                       status = simulate_llsc(regs, opcode);
 
-       if (status < 0)
-               status = simulate_sync(regs, opcode);
+               if (status < 0)
+                       status = simulate_rdhwr_normal(regs, opcode);
+
+               if (status < 0)
+                       status = simulate_sync(regs, opcode);
+       }
 
        if (status < 0)
                status = SIGILL;
 
        if (unlikely(status > 0)) {
                regs->cp0_epc = old_epc;                /* Undo skip-over.  */
+               regs->regs[31] = old31;
                force_sig(status, current);
        }
 }
@@ -973,7 +1039,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
 asmlinkage void do_cpu(struct pt_regs *regs)
 {
        unsigned int __user *epc;
-       unsigned long old_epc;
+       unsigned long old_epc, old31;
        unsigned int opcode;
        unsigned int cpid;
        int status;
@@ -987,26 +1053,41 @@ asmlinkage void do_cpu(struct pt_regs *regs)
        case 0:
                epc = (unsigned int __user *)exception_epc(regs);
                old_epc = regs->cp0_epc;
+               old31 = regs->regs[31];
                opcode = 0;
                status = -1;
 
                if (unlikely(compute_return_epc(regs) < 0))
                        return;
 
-               if (unlikely(get_user(opcode, epc) < 0))
-                       status = SIGSEGV;
+               if (get_isa16_mode(regs->cp0_epc)) {
+                       unsigned short mmop[2] = { 0 };
 
-               if (!cpu_has_llsc && status < 0)
-                       status = simulate_llsc(regs, opcode);
+                       if (unlikely(get_user(mmop[0], epc) < 0))
+                               status = SIGSEGV;
+                       if (unlikely(get_user(mmop[1], epc) < 0))
+                               status = SIGSEGV;
+                       opcode = (mmop[0] << 16) | mmop[1];
 
-               if (status < 0)
-                       status = simulate_rdhwr(regs, opcode);
+                       if (status < 0)
+                               status = simulate_rdhwr_mm(regs, opcode);
+               } else {
+                       if (unlikely(get_user(opcode, epc) < 0))
+                               status = SIGSEGV;
+
+                       if (!cpu_has_llsc && status < 0)
+                               status = simulate_llsc(regs, opcode);
+
+                       if (status < 0)
+                               status = simulate_rdhwr_normal(regs, opcode);
+               }
 
                if (status < 0)
                        status = SIGILL;
 
                if (unlikely(status > 0)) {
                        regs->cp0_epc = old_epc;        /* Undo skip-over.  */
+                       regs->regs[31] = old31;
                        force_sig(status, current);
                }
 
@@ -1320,7 +1401,7 @@ asmlinkage void cache_parity_error(void)
 void ejtag_exception_handler(struct pt_regs *regs)
 {
        const int field = 2 * sizeof(unsigned long);
-       unsigned long depc, old_epc;
+       unsigned long depc, old_epc, old_ra;
        unsigned int debug;
 
        printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
@@ -1335,10 +1416,12 @@ void ejtag_exception_handler(struct pt_regs *regs)
                 * calculation.
                 */
                old_epc = regs->cp0_epc;
+               old_ra = regs->regs[31];
                regs->cp0_epc = depc;
-               __compute_return_epc(regs);
+               compute_return_epc(regs);
                depc = regs->cp0_epc;
                regs->cp0_epc = old_epc;
+               regs->regs[31] = old_ra;
        } else
                depc += 4;
        write_c0_depc(depc);
@@ -1377,11 +1460,27 @@ unsigned long vi_handlers[64];
 void __init *set_except_vector(int n, void *addr)
 {
        unsigned long handler = (unsigned long) addr;
-       unsigned long old_handler = exception_handlers[n];
+       unsigned long old_handler;
+
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * Only the TLB handlers are cache aligned with an even
+        * address. All other handlers are on an odd address and
+        * require no modification. Otherwise, MIPS32 mode will
+        * be entered when handling any TLB exceptions. That
+        * would be bad...since we must stay in microMIPS mode.
+        */
+       if (!(handler & 0x1))
+               handler |= 1;
+#endif
+       old_handler = xchg(&exception_handlers[n], handler);
 
-       exception_handlers[n] = handler;
        if (n == 0 && cpu_has_divec) {
+#ifdef CONFIG_CPU_MICROMIPS
+               unsigned long jump_mask = ~((1 << 27) - 1);
+#else
                unsigned long jump_mask = ~((1 << 28) - 1);
+#endif
                u32 *buf = (u32 *)(ebase + 0x200);
                unsigned int k0 = 26;
                if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
@@ -1397,7 +1496,7 @@ void __init *set_except_vector(int n, void *addr)
        return (void *)old_handler;
 }
 
-static asmlinkage void do_default_vi(void)
+static void do_default_vi(void)
 {
        show_regs(get_irq_regs());
        panic("Caught unexpected vectored interrupt.");
@@ -1408,17 +1507,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
        unsigned long handler;
        unsigned long old_handler = vi_handlers[n];
        int srssets = current_cpu_data.srsets;
-       u32 *w;
+       u16 *h;
        unsigned char *b;
 
        BUG_ON(!cpu_has_veic && !cpu_has_vint);
+       BUG_ON((n < 0) && (n > 9));
 
        if (addr == NULL) {
                handler = (unsigned long) do_default_vi;
                srs = 0;
        } else
                handler = (unsigned long) addr;
-       vi_handlers[n] = (unsigned long) addr;
+       vi_handlers[n] = handler;
 
        b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
 
@@ -1437,13 +1537,12 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
        if (srs == 0) {
                /*
                 * If no shadow set is selected then use the default handler
-                * that does normal register saving and standard interrupt exit
+                * that does normal register saving and standard interrupt exit
                 */
-
                extern char except_vec_vi, except_vec_vi_lui;
                extern char except_vec_vi_ori, except_vec_vi_end;
                extern char rollback_except_vec_vi;
-               char *vec_start = (cpu_wait == r4k_wait) ?
+               char *vec_start = using_rollback_handler() ?
                        &rollback_except_vec_vi : &except_vec_vi;
 #ifdef CONFIG_MIPS_MT_SMTC
                /*
@@ -1452,11 +1551,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
                 * Status.IM bit to be masked before going there.
                 */
                extern char except_vec_vi_mori;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+               const int mori_offset = &except_vec_vi_mori - vec_start + 2;
+#else
                const int mori_offset = &except_vec_vi_mori - vec_start;
+#endif
 #endif /* CONFIG_MIPS_MT_SMTC */
-               const int handler_len = &except_vec_vi_end - vec_start;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+               const int lui_offset = &except_vec_vi_lui - vec_start + 2;
+               const int ori_offset = &except_vec_vi_ori - vec_start + 2;
+#else
                const int lui_offset = &except_vec_vi_lui - vec_start;
                const int ori_offset = &except_vec_vi_ori - vec_start;
+#endif
+               const int handler_len = &except_vec_vi_end - vec_start;
 
                if (handler_len > VECTORSPACING) {
                        /*
@@ -1466,30 +1574,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
                        panic("VECTORSPACING too small");
                }
 
-               memcpy(b, vec_start, handler_len);
+               set_handler(((unsigned long)b - ebase), vec_start,
+#ifdef CONFIG_CPU_MICROMIPS
+                               (handler_len - 1));
+#else
+                               handler_len);
+#endif
 #ifdef CONFIG_MIPS_MT_SMTC
                BUG_ON(n > 7);  /* Vector index %d exceeds SMTC maximum. */
 
-               w = (u32 *)(b + mori_offset);
-               *w = (*w & 0xffff0000) | (0x100 << n);
+               h = (u16 *)(b + mori_offset);
+               *h = (0x100 << n);
 #endif /* CONFIG_MIPS_MT_SMTC */
-               w = (u32 *)(b + lui_offset);
-               *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
-               w = (u32 *)(b + ori_offset);
-               *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
+               h = (u16 *)(b + lui_offset);
+               *h = (handler >> 16) & 0xffff;
+               h = (u16 *)(b + ori_offset);
+               *h = (handler & 0xffff);
                local_flush_icache_range((unsigned long)b,
                                         (unsigned long)(b+handler_len));
        }
        else {
                /*
-                * In other cases jump directly to the interrupt handler
-                *
-                * It is the handlers responsibility to save registers if required
-                * (eg hi/lo) and return from the exception using "eret"
+                * In other cases jump directly to the interrupt handler. It
+                * is the handler's responsibility to save registers if required
+                * (eg hi/lo) and return from the exception using "eret".
                 */
-               w = (u32 *)b;
-               *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
-               *w = 0;
+               u32 insn;
+
+               h = (u16 *)b;
+               /* j handler */
+#ifdef CONFIG_CPU_MICROMIPS
+               insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
+#else
+               insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
+#endif
+               h[0] = (insn >> 16) & 0xffff;
+               h[1] = insn & 0xffff;
+               h[2] = 0;
+               h[3] = 0;
                local_flush_icache_range((unsigned long)b,
                                         (unsigned long)(b+8));
        }
@@ -1648,7 +1770,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
 /* Install CPU exception handler */
 void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
+#else
        memcpy((void *)(ebase + offset), addr, size);
+#endif
        local_flush_icache_range(ebase + offset, ebase + offset + size);
 }
 
@@ -1682,13 +1808,12 @@ __setup("rdhwr_noopt", set_rdhwr_noopt);
 
 void __init trap_init(void)
 {
-       extern char except_vec3_generic, except_vec3_r4000;
+       extern char except_vec3_generic;
        extern char except_vec4;
+       extern char except_vec3_r4000;
        unsigned long i;
-       int rollback;
 
        check_wait();
-       rollback = (cpu_wait == r4k_wait);
 
 #if defined(CONFIG_KGDB)
        if (kgdb_early_setup)
@@ -1700,7 +1825,12 @@ void __init trap_init(void)
                ebase = (unsigned long)
                        __alloc_bootmem(size, 1 << fls(size), 0);
        } else {
-               ebase = CKSEG0;
+#ifdef CONFIG_KVM_GUEST
+#define KVM_GUEST_KSEG0     0x40000000
+        ebase = KVM_GUEST_KSEG0;
+#else
+        ebase = CKSEG0;
+#endif
                if (cpu_has_mips_r2)
                        ebase += (read_c0_ebase() & 0x3ffff000);
        }
@@ -1760,7 +1890,8 @@ void __init trap_init(void)
        if (board_be_init)
                board_be_init();
 
-       set_except_vector(0, rollback ? rollback_handle_int : handle_int);
+       set_except_vector(0, using_rollback_handler() ? rollback_handle_int
+                                                     : handle_int);
        set_except_vector(1, handle_tlbm);
        set_except_vector(2, handle_tlbl);
        set_except_vector(3, handle_tlbs);
@@ -1816,11 +1947,11 @@ void __init trap_init(void)
 
        if (cpu_has_vce)
                /* Special exception: R4[04]00 uses also the divec space. */
-               memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
+               set_handler(0x180, &except_vec3_r4000, 0x100);
        else if (cpu_has_4kex)
-               memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
+               set_handler(0x180, &except_vec3_generic, 0x80);
        else
-               memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
+               set_handler(0x080, &except_vec3_generic, 0x80);
 
        local_flush_icache_range(ebase, ebase + 0x400);
        flush_tlb_handlers();
index 6087a54c86a0dbc5e6e9a167659df9408a1dc3e1..203d8857070dd225f2d8fdea7bf986ad7a6560cc 100644 (file)
 #include <asm/branch.h>
 #include <asm/byteorder.h>
 #include <asm/cop2.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
 #include <asm/inst.h>
 #include <asm/uaccess.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
 
 #define STR(x) __STR(x)
 #define __STR(x)  #x
@@ -102,12 +106,332 @@ static u32 unaligned_action;
 #endif
 extern void show_registers(struct pt_regs *regs);
 
+#ifdef __BIG_ENDIAN
+#define     LoadHW(addr, value, res)  \
+               __asm__ __volatile__ (".set\tnoat\n"        \
+                       "1:\tlb\t%0, 0(%2)\n"               \
+                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\t.set\tat\n\t"                  \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadW(addr, value, res)   \
+               __asm__ __volatile__ (                      \
+                       "1:\tlwl\t%0, (%2)\n"               \
+                       "2:\tlwr\t%0, 3(%2)\n\t"            \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadHWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlbu\t%0, 0(%2)\n"              \
+                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".set\tat\n\t"                      \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadWU(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tlwl\t%0, (%2)\n"               \
+                       "2:\tlwr\t%0, 3(%2)\n\t"            \
+                       "dsll\t%0, %0, 32\n\t"              \
+                       "dsrl\t%0, %0, 32\n\t"              \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       "\t.section\t.fixup,\"ax\"\n\t"     \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tldl\t%0, (%2)\n"               \
+                       "2:\tldr\t%0, 7(%2)\n\t"            \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       "\t.section\t.fixup,\"ax\"\n\t"     \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     StoreHW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tsb\t%1, 1(%2)\n\t"             \
+                       "srl\t$1, %1, 0x8\n"                \
+                       "2:\tsb\t$1, 0(%2)\n\t"             \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=r" (res)                        \
+                       : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tswl\t%1,(%2)\n"                \
+                       "2:\tswr\t%1, 3(%2)\n\t"            \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       "1:\tsdl\t%1,(%2)\n"                \
+                       "2:\tsdr\t%1, 7(%2)\n\t"            \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+#endif
+
+#ifdef __LITTLE_ENDIAN
+#define     LoadHW(addr, value, res)  \
+               __asm__ __volatile__ (".set\tnoat\n"        \
+                       "1:\tlb\t%0, 1(%2)\n"               \
+                       "2:\tlbu\t$1, 0(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\t.set\tat\n\t"                  \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadW(addr, value, res)   \
+               __asm__ __volatile__ (                      \
+                       "1:\tlwl\t%0, 3(%2)\n"              \
+                       "2:\tlwr\t%0, (%2)\n\t"             \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadHWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlbu\t%0, 1(%2)\n"              \
+                       "2:\tlbu\t$1, 0(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".set\tat\n\t"                      \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadWU(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tlwl\t%0, 3(%2)\n"              \
+                       "2:\tlwr\t%0, (%2)\n\t"             \
+                       "dsll\t%0, %0, 32\n\t"              \
+                       "dsrl\t%0, %0, 32\n\t"              \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       "\t.section\t.fixup,\"ax\"\n\t"     \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tldl\t%0, 7(%2)\n"              \
+                       "2:\tldr\t%0, (%2)\n\t"             \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       "\t.section\t.fixup,\"ax\"\n\t"     \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     StoreHW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tsb\t%1, 0(%2)\n\t"             \
+                       "srl\t$1,%1, 0x8\n"                 \
+                       "2:\tsb\t$1, 1(%2)\n\t"             \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=r" (res)                        \
+                       : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tswl\t%1, 3(%2)\n"              \
+                       "2:\tswr\t%1, (%2)\n\t"             \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       "1:\tsdl\t%1, 7(%2)\n"              \
+                       "2:\tsdr\t%1, (%2)\n\t"             \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+#endif
+
 static void emulate_load_store_insn(struct pt_regs *regs,
        void __user *addr, unsigned int __user *pc)
 {
        union mips_instruction insn;
        unsigned long value;
        unsigned int res;
+       unsigned long origpc;
+       unsigned long orig31;
+       void __user *fault_addr = NULL;
+
+       origpc = (unsigned long)pc;
+       orig31 = regs->regs[31];
 
        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
@@ -117,22 +441,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        __get_user(insn.word, pc);
 
        switch (insn.i_format.opcode) {
-       /*
-        * These are instructions that a compiler doesn't generate.  We
-        * can assume therefore that the code is MIPS-aware and
-        * really buggy.  Emulating these instructions would break the
-        * semantics anyway.
-        */
+               /*
+                * These are instructions that a compiler doesn't generate.  We
+                * can assume therefore that the code is MIPS-aware and
+                * really buggy.  Emulating these instructions would break the
+                * semantics anyway.
+                */
        case ll_op:
        case lld_op:
        case sc_op:
        case scd_op:
 
-       /*
-        * For these instructions the only way to create an address
-        * error is an attempted access to kernel/supervisor address
-        * space.
-        */
+               /*
+                * For these instructions the only way to create an address
+                * error is an attempted access to kernel/supervisor address
+                * space.
+                */
        case ldl_op:
        case ldr_op:
        case lwl_op:
@@ -146,36 +470,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        case sb_op:
                goto sigbus;
 
-       /*
-        * The remaining opcodes are the ones that are really of interest.
-        */
+               /*
+                * The remaining opcodes are the ones that are really of
+                * interest.
+                */
        case lh_op:
                if (!access_ok(VERIFY_READ, addr, 2))
                        goto sigbus;
 
-               __asm__ __volatile__ (".set\tnoat\n"
-#ifdef __BIG_ENDIAN
-                       "1:\tlb\t%0, 0(%2)\n"
-                       "2:\tlbu\t$1, 1(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tlb\t%0, 1(%2)\n"
-                       "2:\tlbu\t$1, 0(%2)\n\t"
-#endif
-                       "sll\t%0, 0x8\n\t"
-                       "or\t%0, $1\n\t"
-                       "li\t%1, 0\n"
-                       "3:\t.set\tat\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadHW(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -186,26 +489,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 4))
                        goto sigbus;
 
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tlwl\t%0, (%2)\n"
-                       "2:\tlwr\t%0, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tlwl\t%0, 3(%2)\n"
-                       "2:\tlwr\t%0, (%2)\n\t"
-#endif
-                       "li\t%1, 0\n"
-                       "3:\t.section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadW(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -216,30 +500,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 2))
                        goto sigbus;
 
-               __asm__ __volatile__ (
-                       ".set\tnoat\n"
-#ifdef __BIG_ENDIAN
-                       "1:\tlbu\t%0, 0(%2)\n"
-                       "2:\tlbu\t$1, 1(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tlbu\t%0, 1(%2)\n"
-                       "2:\tlbu\t$1, 0(%2)\n\t"
-#endif
-                       "sll\t%0, 0x8\n\t"
-                       "or\t%0, $1\n\t"
-                       "li\t%1, 0\n"
-                       "3:\t.set\tat\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadHWU(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -258,28 +519,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 4))
                        goto sigbus;
 
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tlwl\t%0, (%2)\n"
-                       "2:\tlwr\t%0, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tlwl\t%0, 3(%2)\n"
-                       "2:\tlwr\t%0, (%2)\n\t"
-#endif
-                       "dsll\t%0, %0, 32\n\t"
-                       "dsrl\t%0, %0, 32\n\t"
-                       "li\t%1, 0\n"
-                       "3:\t.section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadWU(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -302,26 +542,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 8))
                        goto sigbus;
 
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tldl\t%0, (%2)\n"
-                       "2:\tldr\t%0, 7(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tldl\t%0, 7(%2)\n"
-                       "2:\tldr\t%0, (%2)\n\t"
-#endif
-                       "li\t%1, 0\n"
-                       "3:\t.section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadDW(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -336,68 +557,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_WRITE, addr, 2))
                        goto sigbus;
 
+               compute_return_epc(regs);
                value = regs->regs[insn.i_format.rt];
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       ".set\tnoat\n"
-                       "1:\tsb\t%1, 1(%2)\n\t"
-                       "srl\t$1, %1, 0x8\n"
-                       "2:\tsb\t$1, 0(%2)\n\t"
-                       ".set\tat\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       ".set\tnoat\n"
-                       "1:\tsb\t%1, 0(%2)\n\t"
-                       "srl\t$1,%1, 0x8\n"
-                       "2:\tsb\t$1, 1(%2)\n\t"
-                       ".set\tat\n\t"
-#endif
-                       "li\t%0, 0\n"
-                       "3:\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%0, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=r" (res)
-                       : "r" (value), "r" (addr), "i" (-EFAULT));
+               StoreHW(addr, value, res);
                if (res)
                        goto fault;
-               compute_return_epc(regs);
                break;
 
        case sw_op:
                if (!access_ok(VERIFY_WRITE, addr, 4))
                        goto sigbus;
 
+               compute_return_epc(regs);
                value = regs->regs[insn.i_format.rt];
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tswl\t%1,(%2)\n"
-                       "2:\tswr\t%1, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tswl\t%1, 3(%2)\n"
-                       "2:\tswr\t%1, (%2)\n\t"
-#endif
-                       "li\t%0, 0\n"
-                       "3:\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%0, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-               : "=r" (res)
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               StoreW(addr, value, res);
                if (res)
                        goto fault;
-               compute_return_epc(regs);
                break;
 
        case sd_op:
@@ -412,31 +587,11 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_WRITE, addr, 8))
                        goto sigbus;
 
+               compute_return_epc(regs);
                value = regs->regs[insn.i_format.rt];
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tsdl\t%1,(%2)\n"
-                       "2:\tsdr\t%1, 7(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tsdl\t%1, 7(%2)\n"
-                       "2:\tsdr\t%1, (%2)\n\t"
-#endif
-                       "li\t%0, 0\n"
-                       "3:\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%0, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-               : "=r" (res)
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               StoreDW(addr, value, res);
                if (res)
                        goto fault;
-               compute_return_epc(regs);
                break;
 #endif /* CONFIG_64BIT */
 
@@ -447,10 +602,21 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        case ldc1_op:
        case swc1_op:
        case sdc1_op:
-               /*
-                * I herewith declare: this does not happen.  So send SIGBUS.
-                */
-               goto sigbus;
+               die_if_kernel("Unaligned FP access in kernel code", regs);
+               BUG_ON(!used_math());
+               BUG_ON(!is_fpu_owner());
+
+               lose_fpu(1);    /* Save FPU state for the emulator. */
+               res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+                                              &fault_addr);
+               own_fpu(1);     /* Restore FPU state. */
+
+               /* Signal if something went wrong. */
+               process_fpemu_return(res, fault_addr);
+
+               if (res == 0)
+                       break;
+               return;
 
        /*
         * COP2 is available to implementor for application specific use.
@@ -488,6 +654,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        return;
 
 fault:
+       /* roll back jump/branch */
+       regs->cp0_epc = origpc;
+       regs->regs[31] = orig31;
        /* Did we have an exception handler installed? */
        if (fixup_exception(regs))
                return;
@@ -504,10 +673,881 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        return;
 
 sigill:
-       die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
+       die_if_kernel
+           ("Unhandled kernel unaligned access or invalid instruction", regs);
        force_sig(SIGILL, current);
 }
 
+/* Recode table from 16-bit register notation to 32-bit GPR. */
+const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
+const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
+
+void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr)
+{
+       unsigned long value;
+       unsigned int res;
+       int i;
+       unsigned int reg = 0, rvar;
+       unsigned long orig31;
+       u16 __user *pc16;
+       u16 halfword;
+       unsigned int word;
+       unsigned long origpc, contpc;
+       union mips_instruction insn;
+       struct mm_decoded_insn mminsn;
+       void __user *fault_addr = NULL;
+
+       origpc = regs->cp0_epc;
+       orig31 = regs->regs[31];
+
+       mminsn.micro_mips_mode = 1;
+
+       /*
+        * This load never faults.
+        */
+       pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+       __get_user(halfword, pc16);
+       pc16++;
+       contpc = regs->cp0_epc + 2;
+       word = ((unsigned int)halfword << 16);
+       mminsn.pc_inc = 2;
+
+       if (!mm_insn_16bit(halfword)) {
+               __get_user(halfword, pc16);
+               pc16++;
+               contpc = regs->cp0_epc + 4;
+               mminsn.pc_inc = 4;
+               word |= halfword;
+       }
+       mminsn.insn = word;
+
+       if (get_user(halfword, pc16))
+               goto fault;
+       mminsn.next_pc_inc = 2;
+       word = ((unsigned int)halfword << 16);
+
+       if (!mm_insn_16bit(halfword)) {
+               pc16++;
+               if (get_user(halfword, pc16))
+                       goto fault;
+               mminsn.next_pc_inc = 4;
+               word |= halfword;
+       }
+       mminsn.next_insn = word;
+
+       insn = (union mips_instruction)(mminsn.insn);
+       if (mm_isBranchInstr(regs, mminsn, &contpc))
+               insn = (union mips_instruction)(mminsn.next_insn);
+
+       /*  Parse instruction to find what to do */
+
+       switch (insn.mm_i_format.opcode) {
+
+       case mm_pool32a_op:
+               switch (insn.mm_x_format.func) {
+               case mm_lwxs_op:
+                       reg = insn.mm_x_format.rd;
+                       goto loadW;
+               }
+
+               goto sigbus;
+
+       case mm_pool32b_op:
+               switch (insn.mm_m_format.func) {
+               case mm_lwp_func:
+                       reg = insn.mm_m_format.rd;
+                       if (reg == 31)
+                               goto sigbus;
+
+                       if (!access_ok(VERIFY_READ, addr, 8))
+                               goto sigbus;
+
+                       LoadW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[reg] = value;
+                       addr += 4;
+                       LoadW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[reg + 1] = value;
+                       goto success;
+
+               case mm_swp_func:
+                       reg = insn.mm_m_format.rd;
+                       if (reg == 31)
+                               goto sigbus;
+
+                       if (!access_ok(VERIFY_WRITE, addr, 8))
+                               goto sigbus;
+
+                       value = regs->regs[reg];
+                       StoreW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       addr += 4;
+                       value = regs->regs[reg + 1];
+                       StoreW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       goto success;
+
+               case mm_ldp_func:
+#ifdef CONFIG_64BIT
+                       reg = insn.mm_m_format.rd;
+                       if (reg == 31)
+                               goto sigbus;
+
+                       if (!access_ok(VERIFY_READ, addr, 16))
+                               goto sigbus;
+
+                       LoadDW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[reg] = value;
+                       addr += 8;
+                       LoadDW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[reg + 1] = value;
+                       goto success;
+#endif /* CONFIG_64BIT */
+
+                       goto sigill;
+
+               case mm_sdp_func:
+#ifdef CONFIG_64BIT
+                       reg = insn.mm_m_format.rd;
+                       if (reg == 31)
+                               goto sigbus;
+
+                       if (!access_ok(VERIFY_WRITE, addr, 16))
+                               goto sigbus;
+
+                       value = regs->regs[reg];
+                       StoreDW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       addr += 8;
+                       value = regs->regs[reg + 1];
+                       StoreDW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       goto success;
+#endif /* CONFIG_64BIT */
+
+                       goto sigill;
+
+               case mm_lwm32_func:
+                       reg = insn.mm_m_format.rd;
+                       rvar = reg & 0xf;
+                       if ((rvar > 9) || !reg)
+                               goto sigill;
+                       if (reg & 0x10) {
+                               if (!access_ok
+                                   (VERIFY_READ, addr, 4 * (rvar + 1)))
+                                       goto sigbus;
+                       } else {
+                               if (!access_ok(VERIFY_READ, addr, 4 * rvar))
+                                       goto sigbus;
+                       }
+                       if (rvar == 9)
+                               rvar = 8;
+                       for (i = 16; rvar; rvar--, i++) {
+                               LoadW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                               regs->regs[i] = value;
+                       }
+                       if ((reg & 0xf) == 9) {
+                               LoadW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                               regs->regs[30] = value;
+                       }
+                       if (reg & 0x10) {
+                               LoadW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               regs->regs[31] = value;
+                       }
+                       goto success;
+
+               case mm_swm32_func:
+                       reg = insn.mm_m_format.rd;
+                       rvar = reg & 0xf;
+                       if ((rvar > 9) || !reg)
+                               goto sigill;
+                       if (reg & 0x10) {
+                               if (!access_ok
+                                   (VERIFY_WRITE, addr, 4 * (rvar + 1)))
+                                       goto sigbus;
+                       } else {
+                               if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
+                                       goto sigbus;
+                       }
+                       if (rvar == 9)
+                               rvar = 8;
+                       for (i = 16; rvar; rvar--, i++) {
+                               value = regs->regs[i];
+                               StoreW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                       }
+                       if ((reg & 0xf) == 9) {
+                               value = regs->regs[30];
+                               StoreW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                       }
+                       if (reg & 0x10) {
+                               value = regs->regs[31];
+                               StoreW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                       }
+                       goto success;
+
+               case mm_ldm_func:
+#ifdef CONFIG_64BIT
+                       reg = insn.mm_m_format.rd;
+                       rvar = reg & 0xf;
+                       if ((rvar > 9) || !reg)
+                               goto sigill;
+                       if (reg & 0x10) {
+                               if (!access_ok
+                                   (VERIFY_READ, addr, 8 * (rvar + 1)))
+                                       goto sigbus;
+                       } else {
+                               if (!access_ok(VERIFY_READ, addr, 8 * rvar))
+                                       goto sigbus;
+                       }
+                       if (rvar == 9)
+                               rvar = 8;
+
+                       for (i = 16; rvar; rvar--, i++) {
+                               LoadDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                               regs->regs[i] = value;
+                       }
+                       if ((reg & 0xf) == 9) {
+                               LoadDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 8;
+                               regs->regs[30] = value;
+                       }
+                       if (reg & 0x10) {
+                               LoadDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               regs->regs[31] = value;
+                       }
+                       goto success;
+#endif /* CONFIG_64BIT */
+
+                       goto sigill;
+
+               case mm_sdm_func:
+#ifdef CONFIG_64BIT
+                       reg = insn.mm_m_format.rd;
+                       rvar = reg & 0xf;
+                       if ((rvar > 9) || !reg)
+                               goto sigill;
+                       if (reg & 0x10) {
+                               if (!access_ok
+                                   (VERIFY_WRITE, addr, 8 * (rvar + 1)))
+                                       goto sigbus;
+                       } else {
+                               if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
+                                       goto sigbus;
+                       }
+                       if (rvar == 9)
+                               rvar = 8;
+
+                       for (i = 16; rvar; rvar--, i++) {
+                               value = regs->regs[i];
+                               StoreDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 8;
+                       }
+                       if ((reg & 0xf) == 9) {
+                               value = regs->regs[30];
+                               StoreDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 8;
+                       }
+                       if (reg & 0x10) {
+                               value = regs->regs[31];
+                               StoreDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                       }
+                       goto success;
+#endif /* CONFIG_64BIT */
+
+                       goto sigill;
+
+                       /*  LWC2, SWC2, LDC2, SDC2 are not serviced */
+               }
+
+               goto sigbus;
+
+       case mm_pool32c_op:
+               switch (insn.mm_m_format.func) {
+               case mm_lwu_func:
+                       reg = insn.mm_m_format.rd;
+                       goto loadWU;
+               }
+
+               /*  LL,SC,LLD,SCD are not serviced */
+               goto sigbus;
+
+       case mm_pool32f_op:
+               switch (insn.mm_x_format.func) {
+               case mm_lwxc1_func:
+               case mm_swxc1_func:
+               case mm_ldxc1_func:
+               case mm_sdxc1_func:
+                       goto fpu_emul;
+               }
+
+               goto sigbus;
+
+       case mm_ldc132_op:
+       case mm_sdc132_op:
+       case mm_lwc132_op:
+       case mm_swc132_op:
+fpu_emul:
+               /* roll back jump/branch */
+               regs->cp0_epc = origpc;
+               regs->regs[31] = orig31;
+
+               die_if_kernel("Unaligned FP access in kernel code", regs);
+               BUG_ON(!used_math());
+               BUG_ON(!is_fpu_owner());
+
+               lose_fpu(1);    /* save the FPU state for the emulator */
+               res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+                                              &fault_addr);
+               own_fpu(1);     /* restore FPU state */
+
+               /* If something went wrong, signal */
+               process_fpemu_return(res, fault_addr);
+
+               if (res == 0)
+                       goto success;
+               return;
+
+       case mm_lh32_op:
+               reg = insn.mm_i_format.rt;
+               goto loadHW;
+
+       case mm_lhu32_op:
+               reg = insn.mm_i_format.rt;
+               goto loadHWU;
+
+       case mm_lw32_op:
+               reg = insn.mm_i_format.rt;
+               goto loadW;
+
+       case mm_sh32_op:
+               reg = insn.mm_i_format.rt;
+               goto storeHW;
+
+       case mm_sw32_op:
+               reg = insn.mm_i_format.rt;
+               goto storeW;
+
+       case mm_ld32_op:
+               reg = insn.mm_i_format.rt;
+               goto loadDW;
+
+       case mm_sd32_op:
+               reg = insn.mm_i_format.rt;
+               goto storeDW;
+
+       case mm_pool16c_op:
+               switch (insn.mm16_m_format.func) {
+               case mm_lwm16_op:
+                       reg = insn.mm16_m_format.rlist;
+                       rvar = reg + 1;
+                       if (!access_ok(VERIFY_READ, addr, 4 * rvar))
+                               goto sigbus;
+
+                       for (i = 16; rvar; rvar--, i++) {
+                               LoadW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                               regs->regs[i] = value;
+                       }
+                       LoadW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[31] = value;
+
+                       goto success;
+
+               case mm_swm16_op:
+                       reg = insn.mm16_m_format.rlist;
+                       rvar = reg + 1;
+                       if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
+                               goto sigbus;
+
+                       for (i = 16; rvar; rvar--, i++) {
+                               value = regs->regs[i];
+                               StoreW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                       }
+                       value = regs->regs[31];
+                       StoreW(addr, value, res);
+                       if (res)
+                               goto fault;
+
+                       goto success;
+
+               }
+
+               goto sigbus;
+
+       case mm_lhu16_op:
+               reg = reg16to32[insn.mm16_rb_format.rt];
+               goto loadHWU;
+
+       case mm_lw16_op:
+               reg = reg16to32[insn.mm16_rb_format.rt];
+               goto loadW;
+
+       case mm_sh16_op:
+               reg = reg16to32st[insn.mm16_rb_format.rt];
+               goto storeHW;
+
+       case mm_sw16_op:
+               reg = reg16to32st[insn.mm16_rb_format.rt];
+               goto storeW;
+
+       case mm_lwsp16_op:
+               reg = insn.mm16_r5_format.rt;
+               goto loadW;
+
+       case mm_swsp16_op:
+               reg = insn.mm16_r5_format.rt;
+               goto storeW;
+
+       case mm_lwgp16_op:
+               reg = reg16to32[insn.mm16_r3_format.rt];
+               goto loadW;
+
+       default:
+               goto sigill;
+       }
+
+loadHW:
+       if (!access_ok(VERIFY_READ, addr, 2))
+               goto sigbus;
+
+       LoadHW(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+
+loadHWU:
+       if (!access_ok(VERIFY_READ, addr, 2))
+               goto sigbus;
+
+       LoadHWU(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+
+loadW:
+       if (!access_ok(VERIFY_READ, addr, 4))
+               goto sigbus;
+
+       LoadW(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+
+loadWU:
+#ifdef CONFIG_64BIT
+       /*
+        * A 32-bit kernel might be running on a 64-bit processor.  But
+        * if we're on a 32-bit processor and an i-cache incoherency
+        * or race makes us see a 64-bit instruction here the sdl/sdr
+        * would blow up, so for now we don't handle unaligned 64-bit
+        * instructions on 32-bit kernels.
+        */
+       if (!access_ok(VERIFY_READ, addr, 4))
+               goto sigbus;
+
+       LoadWU(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+#endif /* CONFIG_64BIT */
+
+       /* Cannot handle 64-bit instructions in 32-bit kernel */
+       goto sigill;
+
+loadDW:
+#ifdef CONFIG_64BIT
+       /*
+        * A 32-bit kernel might be running on a 64-bit processor.  But
+        * if we're on a 32-bit processor and an i-cache incoherency
+        * or race makes us see a 64-bit instruction here the sdl/sdr
+        * would blow up, so for now we don't handle unaligned 64-bit
+        * instructions on 32-bit kernels.
+        */
+       if (!access_ok(VERIFY_READ, addr, 8))
+               goto sigbus;
+
+       LoadDW(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+#endif /* CONFIG_64BIT */
+
+       /* Cannot handle 64-bit instructions in 32-bit kernel */
+       goto sigill;
+
+storeHW:
+       if (!access_ok(VERIFY_WRITE, addr, 2))
+               goto sigbus;
+
+       value = regs->regs[reg];
+       StoreHW(addr, value, res);
+       if (res)
+               goto fault;
+       goto success;
+
+storeW:
+       if (!access_ok(VERIFY_WRITE, addr, 4))
+               goto sigbus;
+
+       value = regs->regs[reg];
+       StoreW(addr, value, res);
+       if (res)
+               goto fault;
+       goto success;
+
+storeDW:
+#ifdef CONFIG_64BIT
+       /*
+        * A 32-bit kernel might be running on a 64-bit processor.  But
+        * if we're on a 32-bit processor and an i-cache incoherency
+        * or race makes us see a 64-bit instruction here the sdl/sdr
+        * would blow up, so for now we don't handle unaligned 64-bit
+        * instructions on 32-bit kernels.
+        */
+       if (!access_ok(VERIFY_WRITE, addr, 8))
+               goto sigbus;
+
+       value = regs->regs[reg];
+       StoreDW(addr, value, res);
+       if (res)
+               goto fault;
+       goto success;
+#endif /* CONFIG_64BIT */
+
+       /* Cannot handle 64-bit instructions in 32-bit kernel */
+       goto sigill;
+
+success:
+       regs->cp0_epc = contpc; /* advance or branch */
+
+#ifdef CONFIG_DEBUG_FS
+       unaligned_instructions++;
+#endif
+       return;
+
+fault:
+       /* roll back jump/branch */
+       regs->cp0_epc = origpc;
+       regs->regs[31] = orig31;
+       /* Did we have an exception handler installed? */
+       if (fixup_exception(regs))
+               return;
+
+       die_if_kernel("Unhandled kernel unaligned access", regs);
+       force_sig(SIGSEGV, current);
+
+       return;
+
+sigbus:
+       die_if_kernel("Unhandled kernel unaligned access", regs);
+       force_sig(SIGBUS, current);
+
+       return;
+
+sigill:
+       die_if_kernel
+           ("Unhandled kernel unaligned access or invalid instruction", regs);
+       force_sig(SIGILL, current);
+}
+
+static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
+{
+       unsigned long value;
+       unsigned int res;
+       int reg;
+       unsigned long orig31;
+       u16 __user *pc16;
+       unsigned long origpc;
+       union mips16e_instruction mips16inst, oldinst;
+
+       origpc = regs->cp0_epc;
+       orig31 = regs->regs[31];
+       pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
+       /*
+        * This load never faults.
+        */
+       __get_user(mips16inst.full, pc16);
+       oldinst = mips16inst;
+
+       /* skip EXTEND instruction */
+       if (mips16inst.ri.opcode == MIPS16e_extend_op) {
+               pc16++;
+               __get_user(mips16inst.full, pc16);
+       } else if (delay_slot(regs)) {
+               /*  skip jump instructions */
+               /*  JAL/JALX are 32 bits but have OPCODE in first short int */
+               if (mips16inst.ri.opcode == MIPS16e_jal_op)
+                       pc16++;
+               pc16++;
+               if (get_user(mips16inst.full, pc16))
+                       goto sigbus;
+       }
+
+       switch (mips16inst.ri.opcode) {
+       case MIPS16e_i64_op:    /* I64 or RI64 instruction */
+               switch (mips16inst.i64.func) {  /* I64/RI64 func field check */
+               case MIPS16e_ldpc_func:
+               case MIPS16e_ldsp_func:
+                       reg = reg16to32[mips16inst.ri64.ry];
+                       goto loadDW;
+
+               case MIPS16e_sdsp_func:
+                       reg = reg16to32[mips16inst.ri64.ry];
+                       goto writeDW;
+
+               case MIPS16e_sdrasp_func:
+                       reg = 29;       /* GPRSP */
+                       goto writeDW;
+               }
+
+               goto sigbus;
+
+       case MIPS16e_swsp_op:
+       case MIPS16e_lwpc_op:
+       case MIPS16e_lwsp_op:
+               reg = reg16to32[mips16inst.ri.rx];
+               break;
+
+       case MIPS16e_i8_op:
+               if (mips16inst.i8.func != MIPS16e_swrasp_func)
+                       goto sigbus;
+               reg = 29;       /* GPRSP */
+               break;
+
+       default:
+               reg = reg16to32[mips16inst.rri.ry];
+               break;
+       }
+
+       switch (mips16inst.ri.opcode) {
+
+       case MIPS16e_lb_op:
+       case MIPS16e_lbu_op:
+       case MIPS16e_sb_op:
+               goto sigbus;
+
+       case MIPS16e_lh_op:
+               if (!access_ok(VERIFY_READ, addr, 2))
+                       goto sigbus;
+
+               LoadHW(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+
+       case MIPS16e_lhu_op:
+               if (!access_ok(VERIFY_READ, addr, 2))
+                       goto sigbus;
+
+               LoadHWU(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+
+       case MIPS16e_lw_op:
+       case MIPS16e_lwpc_op:
+       case MIPS16e_lwsp_op:
+               if (!access_ok(VERIFY_READ, addr, 4))
+                       goto sigbus;
+
+               LoadW(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+
+       case MIPS16e_lwu_op:
+#ifdef CONFIG_64BIT
+               /*
+                * A 32-bit kernel might be running on a 64-bit processor.  But
+                * if we're on a 32-bit processor and an i-cache incoherency
+                * or race makes us see a 64-bit instruction here the sdl/sdr
+                * would blow up, so for now we don't handle unaligned 64-bit
+                * instructions on 32-bit kernels.
+                */
+               if (!access_ok(VERIFY_READ, addr, 4))
+                       goto sigbus;
+
+               LoadWU(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+#endif /* CONFIG_64BIT */
+
+               /* Cannot handle 64-bit instructions in 32-bit kernel */
+               goto sigill;
+
+       case MIPS16e_ld_op:
+loadDW:
+#ifdef CONFIG_64BIT
+               /*
+                * A 32-bit kernel might be running on a 64-bit processor.  But
+                * if we're on a 32-bit processor and an i-cache incoherency
+                * or race makes us see a 64-bit instruction here the sdl/sdr
+                * would blow up, so for now we don't handle unaligned 64-bit
+                * instructions on 32-bit kernels.
+                */
+               if (!access_ok(VERIFY_READ, addr, 8))
+                       goto sigbus;
+
+               LoadDW(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+#endif /* CONFIG_64BIT */
+
+               /* Cannot handle 64-bit instructions in 32-bit kernel */
+               goto sigill;
+
+       case MIPS16e_sh_op:
+               if (!access_ok(VERIFY_WRITE, addr, 2))
+                       goto sigbus;
+
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               value = regs->regs[reg];
+               StoreHW(addr, value, res);
+               if (res)
+                       goto fault;
+               break;
+
+       case MIPS16e_sw_op:
+       case MIPS16e_swsp_op:
+       case MIPS16e_i8_op:     /* actually - MIPS16e_swrasp_func */
+               if (!access_ok(VERIFY_WRITE, addr, 4))
+                       goto sigbus;
+
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               value = regs->regs[reg];
+               StoreW(addr, value, res);
+               if (res)
+                       goto fault;
+               break;
+
+       case MIPS16e_sd_op:
+writeDW:
+#ifdef CONFIG_64BIT
+               /*
+                * A 32-bit kernel might be running on a 64-bit processor.  But
+                * if we're on a 32-bit processor and an i-cache incoherency
+                * or race makes us see a 64-bit instruction here the sdl/sdr
+                * would blow up, so for now we don't handle unaligned 64-bit
+                * instructions on 32-bit kernels.
+                */
+               if (!access_ok(VERIFY_WRITE, addr, 8))
+                       goto sigbus;
+
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               value = regs->regs[reg];
+               StoreDW(addr, value, res);
+               if (res)
+                       goto fault;
+               break;
+#endif /* CONFIG_64BIT */
+
+               /* Cannot handle 64-bit instructions in 32-bit kernel */
+               goto sigill;
+
+       default:
+               /*
+                * Pheeee...  We encountered an yet unknown instruction or
+                * cache coherence problem.  Die sucker, die ...
+                */
+               goto sigill;
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       unaligned_instructions++;
+#endif
+
+       return;
+
+fault:
+       /* roll back jump/branch */
+       regs->cp0_epc = origpc;
+       regs->regs[31] = orig31;
+       /* Did we have an exception handler installed? */
+       if (fixup_exception(regs))
+               return;
+
+       die_if_kernel("Unhandled kernel unaligned access", regs);
+       force_sig(SIGSEGV, current);
+
+       return;
+
+sigbus:
+       die_if_kernel("Unhandled kernel unaligned access", regs);
+       force_sig(SIGBUS, current);
+
+       return;
+
+sigill:
+       die_if_kernel
+           ("Unhandled kernel unaligned access or invalid instruction", regs);
+       force_sig(SIGILL, current);
+}
 asmlinkage void do_ade(struct pt_regs *regs)
 {
        unsigned int __user *pc;
@@ -517,23 +1557,62 @@ asmlinkage void do_ade(struct pt_regs *regs)
                        1, regs, regs->cp0_badvaddr);
        /*
         * Did we catch a fault trying to load an instruction?
-        * Or are we running in MIPS16 mode?
         */
-       if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
+       if (regs->cp0_badvaddr == regs->cp0_epc)
                goto sigbus;
 
-       pc = (unsigned int __user *) exception_epc(regs);
        if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
                goto sigbus;
        if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
                goto sigbus;
-       else if (unaligned_action == UNALIGNED_ACTION_SHOW)
-               show_registers(regs);
 
        /*
         * Do branch emulation only if we didn't forward the exception.
         * This is all so but ugly ...
         */
+
+       /*
+        * Are we running in microMIPS mode?
+        */
+       if (get_isa16_mode(regs->cp0_epc)) {
+               /*
+                * Did we catch a fault trying to load an instruction in
+                * 16-bit mode?
+                */
+               if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
+                       goto sigbus;
+               if (unaligned_action == UNALIGNED_ACTION_SHOW)
+                       show_registers(regs);
+
+               if (cpu_has_mmips) {
+                       seg = get_fs();
+                       if (!user_mode(regs))
+                               set_fs(KERNEL_DS);
+                       emulate_load_store_microMIPS(regs,
+                               (void __user *)regs->cp0_badvaddr);
+                       set_fs(seg);
+
+                       return;
+               }
+
+               if (cpu_has_mips16) {
+                       seg = get_fs();
+                       if (!user_mode(regs))
+                               set_fs(KERNEL_DS);
+                       emulate_load_store_MIPS16e(regs,
+                               (void __user *)regs->cp0_badvaddr);
+                       set_fs(seg);
+
+                       return;
+       }
+
+               goto sigbus;
+       }
+
+       if (unaligned_action == UNALIGNED_ACTION_SHOW)
+               show_registers(regs);
+       pc = (unsigned int __user *)exception_epc(regs);
+
        seg = get_fs();
        if (!user_mode(regs))
                set_fs(KERNEL_DS);
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
new file mode 100644 (file)
index 0000000..51617e4
--- /dev/null
@@ -0,0 +1,31 @@
+KVM/MIPS Trap & Emulate Release Notes
+=====================================
+
+(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
+    Malta Board with FPGA based 34K
+    Sigma Designs TangoX board with a 24K based 8654 SoC.
+    Malta Board with 74K @ 1GHz
+
+(2) Both Guest kernel and Guest Userspace execute in UM.
+    Guest User address space:   0x00000000 -> 0x40000000
+    Guest Kernel Unmapped:      0x40000000 -> 0x60000000
+    Guest Kernel Mapped:        0x60000000 -> 0x80000000
+
+    Guest Usermode virtual memory is limited to 1GB.
+
+(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
+    Note that due to cache aliasing issues, 4K page sizes are NOT supported.
+
+(3) No HugeTLB Support
+    Both the host kernel and Guest kernel should have the page size set to 16K.
+    This will be implemented in a future release.
+
+(4) KVM/MIPS does not have support for SMP Guests
+    Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
+       LL/TLBP/SC.  Since the TLBP instruction causes a trap the reservation gets cleared
+       when we ERET back to the guest. This causes the guest to hang in an infinite loop.
+       This will be fixed in a future release.
+
+(5) Use Host FPU
+    Currently KVM/MIPS emulates a 24K CPU without a FPU.
+    This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644 (file)
index 0000000..2c15590
--- /dev/null
@@ -0,0 +1,49 @@
+#
+# KVM configuration
+#
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       depends on HAVE_KVM
+       ---help---
+         Say Y here to get to see options for using your Linux host to run
+         other operating systems inside virtual machines (guests).
+         This option alone does not add any kernel code.
+
+         If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+       tristate "Kernel-based Virtual Machine (KVM) support"
+       depends on HAVE_KVM
+       select PREEMPT_NOTIFIERS
+       select ANON_INODES
+       select KVM_MMIO
+       ---help---
+         Support for hosting Guest kernels.
+         Currently supported on MIPS32 processors.
+
+config KVM_MIPS_DYN_TRANS
+       bool "KVM/MIPS: Dynamic binary translation to reduce traps"
+       depends on KVM
+       ---help---
+         When running in Trap & Emulate mode patch privileged
+         instructions to reduce the number of traps.
+
+         If unsure, say Y.
+
+config KVM_MIPS_DEBUG_COP0_COUNTERS
+       bool "Maintain counters for COP0 accesses"
+       depends on KVM
+       ---help---
+         Maintain statistics for Guest COP0 accesses.
+         A histogram of COP0 accesses is printed when the VM is
+         shutdown.
+
+         If unsure, say N.
+
+source drivers/vhost/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644 (file)
index 0000000..78d87bb
--- /dev/null
@@ -0,0 +1,13 @@
+# Makefile for KVM support for MIPS
+#
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
+
+kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
+           kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
+           kvm_mips_dyntrans.o kvm_trap_emul.o
+
+obj-$(CONFIG_KVM)      += kvm.o
+obj-y                  += kvm_cb.o kvm_tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
new file mode 100644 (file)
index 0000000..313c2e3
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kvm_host.h>
+
+struct kvm_mips_callbacks *kvm_mips_callbacks;
+EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
new file mode 100644 (file)
index 0000000..dca2aa6
--- /dev/null
@@ -0,0 +1,650 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Main entry point for the guest, exception handling.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+
+
+#define _C_LABEL(x)     x
+#define MIPSX(name)     mips32_ ## name
+#define CALLFRAME_SIZ   32
+
+/*
+ * VECTOR
+ *  exception vector entrypoint
+ */
+#define VECTOR(x, regmask)      \
+    .ent    _C_LABEL(x),0;      \
+    EXPORT(x);
+
+#define VECTOR_END(x)      \
+    EXPORT(x);
+
+/* Overload, Danger Will Robinson!! */
+#define PT_HOST_ASID        PT_BVADDR
+#define PT_HOST_USERLOCAL   PT_EPC
+
+#define CP0_DDATA_LO        $28,3
+#define CP0_EBASE           $15,1
+
+#define CP0_INTCTL          $12,1
+#define CP0_SRSCTL          $12,2
+#define CP0_SRSMAP          $12,3
+#define CP0_HWRENA          $7,0
+
+/* Resume Flags */
+#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+/*
+ * __kvm_mips_vcpu_run: entry point to the guest
+ * a0: run
+ * a1: vcpu
+ */
+
+FEXPORT(__kvm_mips_vcpu_run)
+    .set    push
+    .set    noreorder
+    .set    noat
+
+    /* k0/k1 not being used in host kernel context */
+       addiu           k1,sp, -PT_SIZE
+    LONG_S         $0, PT_R0(k1)
+    LONG_S             $1, PT_R1(k1)
+    LONG_S             $2, PT_R2(k1)
+    LONG_S             $3, PT_R3(k1)
+
+    LONG_S             $4, PT_R4(k1)
+    LONG_S             $5, PT_R5(k1)
+    LONG_S             $6, PT_R6(k1)
+    LONG_S             $7, PT_R7(k1)
+
+    LONG_S             $8,  PT_R8(k1)
+    LONG_S             $9,  PT_R9(k1)
+    LONG_S             $10, PT_R10(k1)
+    LONG_S             $11, PT_R11(k1)
+    LONG_S             $12, PT_R12(k1)
+    LONG_S             $13, PT_R13(k1)
+    LONG_S             $14, PT_R14(k1)
+    LONG_S             $15, PT_R15(k1)
+    LONG_S             $16, PT_R16(k1)
+    LONG_S             $17, PT_R17(k1)
+
+    LONG_S             $18, PT_R18(k1)
+    LONG_S             $19, PT_R19(k1)
+    LONG_S             $20, PT_R20(k1)
+    LONG_S             $21, PT_R21(k1)
+    LONG_S             $22, PT_R22(k1)
+    LONG_S             $23, PT_R23(k1)
+    LONG_S             $24, PT_R24(k1)
+    LONG_S             $25, PT_R25(k1)
+
+       /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
+
+    LONG_S             $28, PT_R28(k1)
+    LONG_S             $29, PT_R29(k1)
+    LONG_S             $30, PT_R30(k1)
+    LONG_S             $31, PT_R31(k1)
+
+    /* Save hi/lo */
+       mflo            v0
+       LONG_S          v0, PT_LO(k1)
+       mfhi            v1
+       LONG_S          v1, PT_HI(k1)
+
+       /* Save host status */
+       mfc0            v0, CP0_STATUS
+       LONG_S          v0, PT_STATUS(k1)
+
+       /* Save host ASID, shove it into the BVADDR location */
+       mfc0            v1,CP0_ENTRYHI
+       andi            v1, 0xff
+       LONG_S          v1, PT_HOST_ASID(k1)
+
+    /* Save DDATA_LO, will be used to store pointer to vcpu */
+    mfc0        v1, CP0_DDATA_LO
+    LONG_S      v1, PT_HOST_USERLOCAL(k1)
+
+    /* DDATA_LO has pointer to vcpu */
+    mtc0        a1,CP0_DDATA_LO
+
+    /* Offset into vcpu->arch */
+       addiu           k1, a1, VCPU_HOST_ARCH
+
+    /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */
+    LONG_S      sp, VCPU_HOST_STACK(k1)
+
+    /* Save the kernel gp as well */
+    LONG_S      gp, VCPU_HOST_GP(k1)
+
+       /* Setup status register for running the guest in UM, interrupts are disabled */
+       li                      k0,(ST0_EXL | KSU_USER| ST0_BEV)
+       mtc0            k0,CP0_STATUS
+    ehb
+
+    /* load up the new EBASE */
+    LONG_L      k0, VCPU_GUEST_EBASE(k1)
+    mtc0        k0,CP0_EBASE
+
+    /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was
+     * but make sure that timer interrupts are enabled
+     */
+    li          k0,(ST0_EXL | KSU_USER | ST0_IE)
+    andi        v0, v0, ST0_IM
+    or          k0, k0, v0
+    mtc0        k0,CP0_STATUS
+    ehb
+
+
+       /* Set Guest EPC */
+       LONG_L          t0, VCPU_PC(k1)
+       mtc0            t0, CP0_EPC
+
+FEXPORT(__kvm_mips_load_asid)
+    /* Set the ASID for the Guest Kernel */
+    sll         t0, t0, 1                       /* with kseg0 @ 0x40000000, kernel */
+                                                /* addresses shift to 0x80000000 */
+    bltz        t0, 1f                          /* If kernel */
+       addiu       t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+    addiu       t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+    /* t1: contains the base of the ASID array, need to get the cpu id  */
+    LONG_L      t2, TI_CPU($28)             /* smp_processor_id */
+    sll         t2, t2, 2                   /* x4 */
+    addu        t3, t1, t2
+    LONG_L      k0, (t3)
+    andi        k0, k0, 0xff
+       mtc0            k0,CP0_ENTRYHI
+    ehb
+
+    /* Disable RDHWR access */
+    mtc0    zero,  CP0_HWRENA
+
+    /* Now load up the Guest Context from VCPU */
+    LONG_L             $1, VCPU_R1(k1)
+    LONG_L             $2, VCPU_R2(k1)
+    LONG_L             $3, VCPU_R3(k1)
+
+    LONG_L             $4, VCPU_R4(k1)
+    LONG_L             $5, VCPU_R5(k1)
+    LONG_L             $6, VCPU_R6(k1)
+    LONG_L             $7, VCPU_R7(k1)
+
+    LONG_L             $8,  VCPU_R8(k1)
+    LONG_L             $9,  VCPU_R9(k1)
+    LONG_L             $10, VCPU_R10(k1)
+    LONG_L             $11, VCPU_R11(k1)
+    LONG_L             $12, VCPU_R12(k1)
+    LONG_L             $13, VCPU_R13(k1)
+    LONG_L             $14, VCPU_R14(k1)
+    LONG_L             $15, VCPU_R15(k1)
+    LONG_L             $16, VCPU_R16(k1)
+    LONG_L             $17, VCPU_R17(k1)
+    LONG_L             $18, VCPU_R18(k1)
+    LONG_L             $19, VCPU_R19(k1)
+    LONG_L             $20, VCPU_R20(k1)
+    LONG_L             $21, VCPU_R21(k1)
+    LONG_L             $22, VCPU_R22(k1)
+    LONG_L             $23, VCPU_R23(k1)
+    LONG_L             $24, VCPU_R24(k1)
+    LONG_L             $25, VCPU_R25(k1)
+
+    /* k0/k1 loaded up later */
+
+    LONG_L             $28, VCPU_R28(k1)
+    LONG_L             $29, VCPU_R29(k1)
+    LONG_L             $30, VCPU_R30(k1)
+    LONG_L             $31, VCPU_R31(k1)
+
+    /* Restore hi/lo */
+       LONG_L          k0, VCPU_LO(k1)
+       mtlo            k0
+
+       LONG_L          k0, VCPU_HI(k1)
+       mthi            k0
+
+FEXPORT(__kvm_mips_load_k0k1)
+       /* Restore the guest's k0/k1 registers */
+    LONG_L             k0, VCPU_R26(k1)
+    LONG_L             k1, VCPU_R27(k1)
+
+    /* Jump to guest */
+       eret
+       .set    pop
+
+VECTOR(MIPSX(exception), unknown)
+/*
+ * Find out what mode we came from and jump to the proper handler.
+ */
+    .set    push
+       .set    noat
+    .set    noreorder
+    mtc0    k0, CP0_ERROREPC    #01: Save guest k0
+    ehb                         #02:
+
+    mfc0    k0, CP0_EBASE       #02: Get EBASE
+    srl     k0, k0, 10          #03: Get rid of CPUNum
+    sll     k0, k0, 10          #04
+    LONG_S  k1, 0x3000(k0)      #05: Save k1 @ offset 0x3000
+    addiu   k0, k0, 0x2000      #06: Exception handler is installed @ offset 0x2000
+       j       k0                                      #07: jump to the function
+       nop                                             #08: branch delay slot
+       .set    push
+VECTOR_END(MIPSX(exceptionEnd))
+.end MIPSX(exception)
+
+/*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ *
+ */
+NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
+    .set    push
+    .set    noat
+    .set    noreorder
+
+    /* Get the VCPU pointer from DDTATA_LO */
+    mfc0        k1, CP0_DDATA_LO
+       addiu           k1, k1, VCPU_HOST_ARCH
+
+    /* Start saving Guest context to VCPU */
+    LONG_S  $0, VCPU_R0(k1)
+    LONG_S  $1, VCPU_R1(k1)
+    LONG_S  $2, VCPU_R2(k1)
+    LONG_S  $3, VCPU_R3(k1)
+    LONG_S  $4, VCPU_R4(k1)
+    LONG_S  $5, VCPU_R5(k1)
+    LONG_S  $6, VCPU_R6(k1)
+    LONG_S  $7, VCPU_R7(k1)
+    LONG_S  $8, VCPU_R8(k1)
+    LONG_S  $9, VCPU_R9(k1)
+    LONG_S  $10, VCPU_R10(k1)
+    LONG_S  $11, VCPU_R11(k1)
+    LONG_S  $12, VCPU_R12(k1)
+    LONG_S  $13, VCPU_R13(k1)
+    LONG_S  $14, VCPU_R14(k1)
+    LONG_S  $15, VCPU_R15(k1)
+    LONG_S  $16, VCPU_R16(k1)
+    LONG_S  $17,VCPU_R17(k1)
+    LONG_S  $18, VCPU_R18(k1)
+    LONG_S  $19, VCPU_R19(k1)
+    LONG_S  $20, VCPU_R20(k1)
+    LONG_S  $21, VCPU_R21(k1)
+    LONG_S  $22, VCPU_R22(k1)
+    LONG_S  $23, VCPU_R23(k1)
+    LONG_S  $24, VCPU_R24(k1)
+    LONG_S  $25, VCPU_R25(k1)
+
+    /* Guest k0/k1 saved later */
+
+    LONG_S  $28, VCPU_R28(k1)
+    LONG_S  $29, VCPU_R29(k1)
+    LONG_S  $30, VCPU_R30(k1)
+    LONG_S  $31, VCPU_R31(k1)
+
+    /* We need to save hi/lo and restore them on
+     * the way out
+     */
+    mfhi    t0
+    LONG_S  t0, VCPU_HI(k1)
+
+    mflo    t0
+    LONG_S  t0, VCPU_LO(k1)
+
+    /* Finally save guest k0/k1 to VCPU */
+    mfc0    t0, CP0_ERROREPC
+    LONG_S  t0, VCPU_R26(k1)
+
+    /* Get GUEST k1 and save it in VCPU */
+    la      t1, ~0x2ff
+    mfc0    t0, CP0_EBASE
+    and     t0, t0, t1
+    LONG_L  t0, 0x3000(t0)
+    LONG_S  t0, VCPU_R27(k1)
+
+    /* Now that context has been saved, we can use other registers */
+
+    /* Restore vcpu */
+    mfc0        a1, CP0_DDATA_LO
+    move        s1, a1
+
+   /* Restore run (vcpu->run) */
+    LONG_L      a0, VCPU_RUN(a1)
+    /* Save pointer to run in s0, will be saved by the compiler */
+    move        s0, a0
+
+
+    /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */
+    mfc0    k0,CP0_EPC
+    LONG_S  k0, VCPU_PC(k1)
+
+    mfc0    k0, CP0_BADVADDR
+    LONG_S  k0, VCPU_HOST_CP0_BADVADDR(k1)
+
+    mfc0    k0, CP0_CAUSE
+    LONG_S  k0, VCPU_HOST_CP0_CAUSE(k1)
+
+    mfc0    k0, CP0_ENTRYHI
+    LONG_S  k0, VCPU_HOST_ENTRYHI(k1)
+
+    /* Now restore the host state just enough to run the handlers */
+
+    /* Swtich EBASE to the one used by Linux */
+    /* load up the host EBASE */
+    mfc0        v0, CP0_STATUS
+
+    .set at
+       or          k0, v0, ST0_BEV
+    .set noat
+
+    mtc0        k0, CP0_STATUS
+    ehb
+
+    LONG_L      k0, VCPU_HOST_EBASE(k1)
+    mtc0        k0,CP0_EBASE
+
+
+    /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+    .set at
+       and         v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
+    or          v0, v0, ST0_CU0
+    .set noat
+    mtc0        v0, CP0_STATUS
+    ehb
+
+    /* Load up host GP */
+    LONG_L  gp, VCPU_HOST_GP(k1)
+
+    /* Need a stack before we can jump to "C" */
+    LONG_L  sp, VCPU_HOST_STACK(k1)
+
+    /* Saved host state */
+    addiu   sp,sp, -PT_SIZE
+
+    /* XXXKYMA do we need to load the host ASID, maybe not because the
+     * kernel entries are marked GLOBAL, need to verify
+     */
+
+    /* Restore host DDATA_LO */
+    LONG_L      k0, PT_HOST_USERLOCAL(sp)
+    mtc0        k0, CP0_DDATA_LO
+
+    /* Restore RDHWR access */
+    la      k0, 0x2000000F
+    mtc0    k0,  CP0_HWRENA
+
+    /* Jump to handler */
+FEXPORT(__kvm_mips_jump_to_handler)
+    /* XXXKYMA: not sure if this is safe, how large is the stack?? */
+    /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */
+    la          t9,kvm_mips_handle_exit
+    jalr.hb     t9
+    addiu       sp,sp, -CALLFRAME_SIZ           /* BD Slot */
+
+    /* Return from handler Make sure interrupts are disabled */
+    di
+    ehb
+
+    /* XXXKYMA: k0/k1 could have been blown away if we processed an exception
+     * while we were handling the exception from the guest, reload k1
+     */
+    move        k1, s1
+       addiu           k1, k1, VCPU_HOST_ARCH
+
+    /* Check return value, should tell us if we are returning to the host (handle I/O etc)
+     * or resuming the guest
+     */
+    andi        t0, v0, RESUME_HOST
+    bnez        t0, __kvm_mips_return_to_host
+    nop
+
+__kvm_mips_return_to_guest:
+    /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
+    mtc0        s1, CP0_DDATA_LO
+
+    /* Load up the Guest EBASE to minimize the window where BEV is set */
+    LONG_L      t0, VCPU_GUEST_EBASE(k1)
+
+    /* Switch EBASE back to the one used by KVM */
+    mfc0        v1, CP0_STATUS
+    .set at
+       or          k0, v1, ST0_BEV
+    .set noat
+    mtc0        k0, CP0_STATUS
+    ehb
+    mtc0        t0,CP0_EBASE
+
+    /* Setup status register for running guest in UM */
+    .set at
+    or     v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+    and     v1, v1, ~ST0_CU0
+    .set noat
+    mtc0    v1, CP0_STATUS
+    ehb
+
+
+       /* Set Guest EPC */
+       LONG_L          t0, VCPU_PC(k1)
+       mtc0            t0, CP0_EPC
+
+    /* Set the ASID for the Guest Kernel */
+    sll         t0, t0, 1                       /* with kseg0 @ 0x40000000, kernel */
+                                                /* addresses shift to 0x80000000 */
+    bltz        t0, 1f                          /* If kernel */
+       addiu       t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+    addiu       t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+    /* t1: contains the base of the ASID array, need to get the cpu id  */
+    LONG_L      t2, TI_CPU($28)             /* smp_processor_id */
+    sll         t2, t2, 2                   /* x4 */
+    addu        t3, t1, t2
+    LONG_L      k0, (t3)
+    andi        k0, k0, 0xff
+       mtc0            k0,CP0_ENTRYHI
+    ehb
+
+    /* Disable RDHWR access */
+    mtc0    zero,  CP0_HWRENA
+
+    /* load the guest context from VCPU and return */
+    LONG_L  $0, VCPU_R0(k1)
+    LONG_L  $1, VCPU_R1(k1)
+    LONG_L  $2, VCPU_R2(k1)
+    LONG_L  $3, VCPU_R3(k1)
+    LONG_L  $4, VCPU_R4(k1)
+    LONG_L  $5, VCPU_R5(k1)
+    LONG_L  $6, VCPU_R6(k1)
+    LONG_L  $7, VCPU_R7(k1)
+    LONG_L  $8, VCPU_R8(k1)
+    LONG_L  $9, VCPU_R9(k1)
+    LONG_L  $10, VCPU_R10(k1)
+    LONG_L  $11, VCPU_R11(k1)
+    LONG_L  $12, VCPU_R12(k1)
+    LONG_L  $13, VCPU_R13(k1)
+    LONG_L  $14, VCPU_R14(k1)
+    LONG_L  $15, VCPU_R15(k1)
+    LONG_L  $16, VCPU_R16(k1)
+    LONG_L  $17, VCPU_R17(k1)
+    LONG_L  $18, VCPU_R18(k1)
+    LONG_L  $19, VCPU_R19(k1)
+    LONG_L  $20, VCPU_R20(k1)
+    LONG_L  $21, VCPU_R21(k1)
+    LONG_L  $22, VCPU_R22(k1)
+    LONG_L  $23, VCPU_R23(k1)
+    LONG_L  $24, VCPU_R24(k1)
+    LONG_L  $25, VCPU_R25(k1)
+
+    /* $/k1 loaded later */
+    LONG_L  $28, VCPU_R28(k1)
+    LONG_L  $29, VCPU_R29(k1)
+    LONG_L  $30, VCPU_R30(k1)
+    LONG_L  $31, VCPU_R31(k1)
+
+FEXPORT(__kvm_mips_skip_guest_restore)
+    LONG_L  k0, VCPU_HI(k1)
+    mthi    k0
+
+    LONG_L  k0, VCPU_LO(k1)
+    mtlo    k0
+
+    LONG_L  k0, VCPU_R26(k1)
+    LONG_L  k1, VCPU_R27(k1)
+
+    eret
+
+__kvm_mips_return_to_host:
+    /* EBASE is already pointing to Linux */
+    LONG_L  k1, VCPU_HOST_STACK(k1)
+       addiu   k1,k1, -PT_SIZE
+
+    /* Restore host DDATA_LO */
+    LONG_L      k0, PT_HOST_USERLOCAL(k1)
+    mtc0        k0, CP0_DDATA_LO
+
+    /* Restore host ASID */
+    LONG_L      k0, PT_HOST_ASID(sp)
+    andi        k0, 0xff
+    mtc0        k0,CP0_ENTRYHI
+    ehb
+
+    /* Load context saved on the host stack */
+    LONG_L  $0, PT_R0(k1)
+    LONG_L  $1, PT_R1(k1)
+
+    /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code  */
+    sra     k0, v0, 2
+    move    $2, k0
+
+    LONG_L  $3, PT_R3(k1)
+    LONG_L  $4, PT_R4(k1)
+    LONG_L  $5, PT_R5(k1)
+    LONG_L  $6, PT_R6(k1)
+    LONG_L  $7, PT_R7(k1)
+    LONG_L  $8, PT_R8(k1)
+    LONG_L  $9, PT_R9(k1)
+    LONG_L  $10, PT_R10(k1)
+    LONG_L  $11, PT_R11(k1)
+    LONG_L  $12, PT_R12(k1)
+    LONG_L  $13, PT_R13(k1)
+    LONG_L  $14, PT_R14(k1)
+    LONG_L  $15, PT_R15(k1)
+    LONG_L  $16, PT_R16(k1)
+    LONG_L  $17, PT_R17(k1)
+    LONG_L  $18, PT_R18(k1)
+    LONG_L  $19, PT_R19(k1)
+    LONG_L  $20, PT_R20(k1)
+    LONG_L  $21, PT_R21(k1)
+    LONG_L  $22, PT_R22(k1)
+    LONG_L  $23, PT_R23(k1)
+    LONG_L  $24, PT_R24(k1)
+    LONG_L  $25, PT_R25(k1)
+
+    /* Host k0/k1 were not saved */
+
+    LONG_L  $28, PT_R28(k1)
+    LONG_L  $29, PT_R29(k1)
+    LONG_L  $30, PT_R30(k1)
+
+    LONG_L  k0, PT_HI(k1)
+    mthi    k0
+
+    LONG_L  k0, PT_LO(k1)
+    mtlo    k0
+
+    /* Restore RDHWR access */
+    la      k0, 0x2000000F
+    mtc0    k0,  CP0_HWRENA
+
+
+    /* Restore RA, which is the address we will return to */
+    LONG_L  ra, PT_R31(k1)
+    j       ra
+    nop
+
+    .set    pop
+VECTOR_END(MIPSX(GuestExceptionEnd))
+.end MIPSX(GuestException)
+
+MIPSX(exceptions):
+       ####
+       ##### The exception handlers.
+       #####
+       .word _C_LABEL(MIPSX(GuestException))   #  0
+       .word _C_LABEL(MIPSX(GuestException))   #  1
+       .word _C_LABEL(MIPSX(GuestException))   #  2
+       .word _C_LABEL(MIPSX(GuestException))   #  3
+       .word _C_LABEL(MIPSX(GuestException))   #  4
+       .word _C_LABEL(MIPSX(GuestException))   #  5
+       .word _C_LABEL(MIPSX(GuestException))   #  6
+       .word _C_LABEL(MIPSX(GuestException))   #  7
+       .word _C_LABEL(MIPSX(GuestException))   #  8
+       .word _C_LABEL(MIPSX(GuestException))   #  9
+       .word _C_LABEL(MIPSX(GuestException))   # 10
+       .word _C_LABEL(MIPSX(GuestException))   # 11
+       .word _C_LABEL(MIPSX(GuestException))   # 12
+       .word _C_LABEL(MIPSX(GuestException))   # 13
+       .word _C_LABEL(MIPSX(GuestException))   # 14
+       .word _C_LABEL(MIPSX(GuestException))   # 15
+       .word _C_LABEL(MIPSX(GuestException))   # 16
+       .word _C_LABEL(MIPSX(GuestException))   # 17
+       .word _C_LABEL(MIPSX(GuestException))   # 18
+       .word _C_LABEL(MIPSX(GuestException))   # 19
+       .word _C_LABEL(MIPSX(GuestException))   # 20
+       .word _C_LABEL(MIPSX(GuestException))   # 21
+       .word _C_LABEL(MIPSX(GuestException))   # 22
+       .word _C_LABEL(MIPSX(GuestException))   # 23
+       .word _C_LABEL(MIPSX(GuestException))   # 24
+       .word _C_LABEL(MIPSX(GuestException))   # 25
+       .word _C_LABEL(MIPSX(GuestException))   # 26
+       .word _C_LABEL(MIPSX(GuestException))   # 27
+       .word _C_LABEL(MIPSX(GuestException))   # 28
+       .word _C_LABEL(MIPSX(GuestException))   # 29
+       .word _C_LABEL(MIPSX(GuestException))   # 30
+       .word _C_LABEL(MIPSX(GuestException))   # 31
+
+
+/* This routine makes changes to the instruction stream effective to the hardware.
+ * It should be called after the instruction stream is written.
+ * On return, the new instructions are effective.
+ * Inputs:
+ * a0 = Start address of new instruction stream
+ * a1 = Size, in bytes, of new instruction stream
+ */
+
+#define HW_SYNCI_Step       $1
+LEAF(MIPSX(SyncICache))
+    .set    push
+       .set    mips32r2
+    beq     a1, zero, 20f
+    nop
+    addu    a1, a0, a1
+    rdhwr   v0, HW_SYNCI_Step
+    beq     v0, zero, 20f
+    nop
+
+10:
+    synci   0(a0)
+    addu    a0, a0, v0
+    sltu    v1, a0, a1
+    bne     v1, zero, 10b
+    nop
+    sync
+20:
+    jr.hb   ra
+    nop
+    .set pop
+END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
new file mode 100644 (file)
index 0000000..d934b01
--- /dev/null
@@ -0,0 +1,1196 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifndef VECTORSPACING
+#define VECTORSPACING 0x100    /* for EI/VI mode */
+#endif
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { "wait", VCPU_STAT(wait_exits) },
+       { "cache", VCPU_STAT(cache_exits) },
+       { "signal", VCPU_STAT(signal_exits) },
+       { "interrupt", VCPU_STAT(int_exits) },
+       { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
+       { "tlbmod", VCPU_STAT(tlbmod_exits) },
+       { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
+       { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
+       { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
+       { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
+       { "syscall", VCPU_STAT(syscall_exits) },
+       { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
+       { "break_inst", VCPU_STAT(break_inst_exits) },
+       { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
+       { "halt_wakeup", VCPU_STAT(halt_wakeup) },
+       {NULL}
+};
+
+static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+       int i;
+       for_each_possible_cpu(i) {
+               vcpu->arch.guest_kernel_asid[i] = 0;
+               vcpu->arch.guest_user_asid[i] = 0;
+       }
+       return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+       return gfn;
+}
+
+/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
+ * are "runnable" if interrupts are pending
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+       return !!(vcpu->arch.pending_exceptions);
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+       int *r = (int *)rtn;
+       *r = 0;
+       return;
+}
+
+static void kvm_mips_init_tlbs(struct kvm *kvm)
+{
+       unsigned long wired;
+
+       /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
+       wired = read_c0_wired();
+       write_c0_wired(wired + 1);
+       mtc0_tlbw_hazard();
+       kvm->arch.commpage_tlb = wired;
+
+       kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
+                 kvm->arch.commpage_tlb);
+}
+
+static void kvm_mips_init_vm_percpu(void *arg)
+{
+       struct kvm *kvm = (struct kvm *)arg;
+
+       kvm_mips_init_tlbs(kvm);
+       kvm_mips_callbacks->vm_init(kvm);
+
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+       if (atomic_inc_return(&kvm_mips_instance) == 1) {
+               kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
+                        __func__);
+               on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
+       }
+
+
+       return 0;
+}
+
+void kvm_mips_free_vcpus(struct kvm *kvm)
+{
+       unsigned int i;
+       struct kvm_vcpu *vcpu;
+
+       /* Put the pages we reserved for the guest pmap */
+       for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
+               if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+                       kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+       }
+
+       if (kvm->arch.guest_pmap)
+               kfree(kvm->arch.guest_pmap);
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               kvm_arch_vcpu_free(vcpu);
+       }
+
+       mutex_lock(&kvm->lock);
+
+       for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+               kvm->vcpus[i] = NULL;
+
+       atomic_set(&kvm->online_vcpus, 0);
+
+       mutex_unlock(&kvm->lock);
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+static void kvm_mips_uninit_tlbs(void *arg)
+{
+       /* Restore wired count */
+       write_c0_wired(0);
+       mtc0_tlbw_hazard();
+       /* Clear out all the TLBs */
+       kvm_local_flush_tlb_all();
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+       kvm_mips_free_vcpus(kvm);
+
+       /* If this is the last instance, restore wired count */
+       if (atomic_dec_return(&kvm_mips_instance) == 0) {
+               kvm_info("%s: last KVM instance, restoring TLB parameters\n",
+                        __func__);
+               on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
+       }
+}
+
+long
+kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       return -ENOIOCTLCMD;
+}
+
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+       return 0;
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                struct kvm_memory_slot *memslot,
+                                struct kvm_userspace_memory_region *mem,
+                                enum kvm_mr_change change)
+{
+       return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                                struct kvm_userspace_memory_region *mem,
+                                const struct kvm_memory_slot *old,
+                                enum kvm_mr_change change)
+{
+       unsigned long npages = 0;
+       int i, err = 0;
+
+       kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
+                 __func__, kvm, mem->slot, mem->guest_phys_addr,
+                 mem->memory_size, mem->userspace_addr);
+
+       /* Setup Guest PMAP table */
+       if (!kvm->arch.guest_pmap) {
+               if (mem->slot == 0)
+                       npages = mem->memory_size >> PAGE_SHIFT;
+
+               if (npages) {
+                       kvm->arch.guest_pmap_npages = npages;
+                       kvm->arch.guest_pmap =
+                           kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
+
+                       if (!kvm->arch.guest_pmap) {
+                               kvm_err("Failed to allocate guest PMAP");
+                               err = -ENOMEM;
+                               goto out;
+                       }
+
+                       kvm_info
+                           ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
+                            npages, kvm->arch.guest_pmap);
+
+                       /* Now setup the page table */
+                       for (i = 0; i < npages; i++) {
+                               kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
+                       }
+               }
+       }
+out:
+       return;
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+{
+}
+
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+       extern char mips32_exception[], mips32_exceptionEnd[];
+       extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+       int err, size, offset;
+       void *gebase;
+       int i;
+
+       struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+
+       if (!vcpu) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = kvm_vcpu_init(vcpu, kvm, id);
+
+       if (err)
+               goto out_free_cpu;
+
+       kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+
+       /* Allocate space for host mode exception handlers that handle
+        * guest mode exits
+        */
+       if (cpu_has_veic || cpu_has_vint) {
+               size = 0x200 + VECTORSPACING * 64;
+       } else {
+               size = 0x200;
+       }
+
+       /* Save Linux EBASE */
+       vcpu->arch.host_ebase = (void *)read_c0_ebase();
+
+       gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+
+       if (!gebase) {
+               err = -ENOMEM;
+               goto out_free_cpu;
+       }
+       kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+                ALIGN(size, PAGE_SIZE), gebase);
+
+       /* Save new ebase */
+       vcpu->arch.guest_ebase = gebase;
+
+       /* Copy L1 Guest Exception handler to correct offset */
+
+       /* TLB Refill, EXL = 0 */
+       memcpy(gebase, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+
+       /* General Exception Entry point */
+       memcpy(gebase + 0x180, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+
+       /* For vectored interrupts poke the exception code @ all offsets 0-7 */
+       for (i = 0; i < 8; i++) {
+               kvm_debug("L1 Vectored handler @ %p\n",
+                         gebase + 0x200 + (i * VECTORSPACING));
+               memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
+                      mips32_exceptionEnd - mips32_exception);
+       }
+
+       /* General handler, relocate to unmapped space for sanity's sake */
+       offset = 0x2000;
+       kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
+                gebase + offset,
+                mips32_GuestExceptionEnd - mips32_GuestException);
+
+       memcpy(gebase + offset, mips32_GuestException,
+              mips32_GuestExceptionEnd - mips32_GuestException);
+
+       /* Invalidate the icache for these ranges */
+       mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
+
+       /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
+       vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
+
+       if (!vcpu->arch.kseg0_commpage) {
+               err = -ENOMEM;
+               goto out_free_gebase;
+       }
+
+       kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
+       kvm_mips_commpage_init(vcpu);
+
+       /* Init */
+       vcpu->arch.last_sched_cpu = -1;
+
+       /* Start off the timer */
+       kvm_mips_emulate_count(vcpu);
+
+       return vcpu;
+
+out_free_gebase:
+       kfree(gebase);
+
+out_free_cpu:
+       kfree(vcpu);
+
+out:
+       return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+       kvm_vcpu_uninit(vcpu);
+
+       kvm_mips_dump_stats(vcpu);
+
+       if (vcpu->arch.guest_ebase)
+               kfree(vcpu->arch.guest_ebase);
+
+       if (vcpu->arch.kseg0_commpage)
+               kfree(vcpu->arch.kseg0_commpage);
+
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_arch_vcpu_free(vcpu);
+}
+
+int
+kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                   struct kvm_guest_debug *dbg)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int r = 0;
+       sigset_t sigsaved;
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+       if (vcpu->mmio_needed) {
+               if (!vcpu->mmio_is_write)
+                       kvm_mips_complete_mmio_load(vcpu, run);
+               vcpu->mmio_needed = 0;
+       }
+
+       /* Check if we have any exceptions/interrupts pending */
+       kvm_mips_deliver_interrupts(vcpu,
+                                   kvm_read_c0_guest_cause(vcpu->arch.cop0));
+
+       local_irq_disable();
+       kvm_guest_enter();
+
+       r = __kvm_mips_vcpu_run(run, vcpu);
+
+       kvm_guest_exit();
+       local_irq_enable();
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       return r;
+}
+
+int
+kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+       struct kvm_vcpu *dvcpu = NULL;
+
+       if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+               kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
+                         (int)intr);
+
+       if (irq->cpu == -1)
+               dvcpu = vcpu;
+       else
+               dvcpu = vcpu->kvm->vcpus[irq->cpu];
+
+       if (intr == 2 || intr == 3 || intr == 4) {
+               kvm_mips_callbacks->queue_io_int(dvcpu, irq);
+
+       } else if (intr == -2 || intr == -3 || intr == -4) {
+               kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
+       } else {
+               kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+                       irq->cpu, irq->irq);
+               return -EINVAL;
+       }
+
+       dvcpu->arch.wait = 0;
+
+       if (waitqueue_active(&dvcpu->wq)) {
+               wake_up_interruptible(&dvcpu->wq);
+       }
+
+       return 0;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                               struct kvm_mp_state *mp_state)
+{
+       return -ENOIOCTLCMD;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                               struct kvm_mp_state *mp_state)
+{
+       return -ENOIOCTLCMD;
+}
+
+#define KVM_REG_MIPS_CP0_INDEX (0x10000 + 8 * 0 + 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO0 (0x10000 + 8 * 2 + 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO1 (0x10000 + 8 * 3 + 0)
+#define KVM_REG_MIPS_CP0_CONTEXT (0x10000 + 8 * 4 + 0)
+#define KVM_REG_MIPS_CP0_USERLOCAL (0x10000 + 8 * 4 + 2)
+#define KVM_REG_MIPS_CP0_PAGEMASK (0x10000 + 8 * 5 + 0)
+#define KVM_REG_MIPS_CP0_PAGEGRAIN (0x10000 + 8 * 5 + 1)
+#define KVM_REG_MIPS_CP0_WIRED (0x10000 + 8 * 6 + 0)
+#define KVM_REG_MIPS_CP0_HWRENA (0x10000 + 8 * 7 + 0)
+#define KVM_REG_MIPS_CP0_BADVADDR (0x10000 + 8 * 8 + 0)
+#define KVM_REG_MIPS_CP0_COUNT (0x10000 + 8 * 9 + 0)
+#define KVM_REG_MIPS_CP0_ENTRYHI (0x10000 + 8 * 10 + 0)
+#define KVM_REG_MIPS_CP0_COMPARE (0x10000 + 8 * 11 + 0)
+#define KVM_REG_MIPS_CP0_STATUS (0x10000 + 8 * 12 + 0)
+#define KVM_REG_MIPS_CP0_CAUSE (0x10000 + 8 * 13 + 0)
+#define KVM_REG_MIPS_CP0_EBASE (0x10000 + 8 * 15 + 1)
+#define KVM_REG_MIPS_CP0_CONFIG (0x10000 + 8 * 16 + 0)
+#define KVM_REG_MIPS_CP0_CONFIG1 (0x10000 + 8 * 16 + 1)
+#define KVM_REG_MIPS_CP0_CONFIG2 (0x10000 + 8 * 16 + 2)
+#define KVM_REG_MIPS_CP0_CONFIG3 (0x10000 + 8 * 16 + 3)
+#define KVM_REG_MIPS_CP0_CONFIG7 (0x10000 + 8 * 16 + 7)
+#define KVM_REG_MIPS_CP0_XCONTEXT (0x10000 + 8 * 20 + 0)
+#define KVM_REG_MIPS_CP0_ERROREPC (0x10000 + 8 * 30 + 0)
+
+static u64 kvm_mips_get_one_regs[] = {
+       KVM_REG_MIPS_R0,
+       KVM_REG_MIPS_R1,
+       KVM_REG_MIPS_R2,
+       KVM_REG_MIPS_R3,
+       KVM_REG_MIPS_R4,
+       KVM_REG_MIPS_R5,
+       KVM_REG_MIPS_R6,
+       KVM_REG_MIPS_R7,
+       KVM_REG_MIPS_R8,
+       KVM_REG_MIPS_R9,
+       KVM_REG_MIPS_R10,
+       KVM_REG_MIPS_R11,
+       KVM_REG_MIPS_R12,
+       KVM_REG_MIPS_R13,
+       KVM_REG_MIPS_R14,
+       KVM_REG_MIPS_R15,
+       KVM_REG_MIPS_R16,
+       KVM_REG_MIPS_R17,
+       KVM_REG_MIPS_R18,
+       KVM_REG_MIPS_R19,
+       KVM_REG_MIPS_R20,
+       KVM_REG_MIPS_R21,
+       KVM_REG_MIPS_R22,
+       KVM_REG_MIPS_R23,
+       KVM_REG_MIPS_R24,
+       KVM_REG_MIPS_R25,
+       KVM_REG_MIPS_R26,
+       KVM_REG_MIPS_R27,
+       KVM_REG_MIPS_R28,
+       KVM_REG_MIPS_R29,
+       KVM_REG_MIPS_R30,
+       KVM_REG_MIPS_R31,
+
+       KVM_REG_MIPS_HI,
+       KVM_REG_MIPS_LO,
+       KVM_REG_MIPS_PC,
+
+       KVM_REG_MIPS_CP0_INDEX,
+       KVM_REG_MIPS_CP0_CONTEXT,
+       KVM_REG_MIPS_CP0_PAGEMASK,
+       KVM_REG_MIPS_CP0_WIRED,
+       KVM_REG_MIPS_CP0_BADVADDR,
+       KVM_REG_MIPS_CP0_ENTRYHI,
+       KVM_REG_MIPS_CP0_STATUS,
+       KVM_REG_MIPS_CP0_CAUSE,
+       /* EPC set via kvm_regs, et al. */
+       KVM_REG_MIPS_CP0_CONFIG,
+       KVM_REG_MIPS_CP0_CONFIG1,
+       KVM_REG_MIPS_CP0_CONFIG2,
+       KVM_REG_MIPS_CP0_CONFIG3,
+       KVM_REG_MIPS_CP0_CONFIG7,
+       KVM_REG_MIPS_CP0_ERROREPC
+};
+
+static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
+                           const struct kvm_one_reg *reg)
+{
+       u64 __user *uaddr = (u64 __user *)(long)reg->addr;
+
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       s64 v;
+
+       switch (reg->id) {
+       case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
+               v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
+               break;
+       case KVM_REG_MIPS_HI:
+               v = (long)vcpu->arch.hi;
+               break;
+       case KVM_REG_MIPS_LO:
+               v = (long)vcpu->arch.lo;
+               break;
+       case KVM_REG_MIPS_PC:
+               v = (long)vcpu->arch.pc;
+               break;
+
+       case KVM_REG_MIPS_CP0_INDEX:
+               v = (long)kvm_read_c0_guest_index(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONTEXT:
+               v = (long)kvm_read_c0_guest_context(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_PAGEMASK:
+               v = (long)kvm_read_c0_guest_pagemask(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_WIRED:
+               v = (long)kvm_read_c0_guest_wired(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_BADVADDR:
+               v = (long)kvm_read_c0_guest_badvaddr(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_ENTRYHI:
+               v = (long)kvm_read_c0_guest_entryhi(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_STATUS:
+               v = (long)kvm_read_c0_guest_status(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CAUSE:
+               v = (long)kvm_read_c0_guest_cause(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_ERROREPC:
+               v = (long)kvm_read_c0_guest_errorepc(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG:
+               v = (long)kvm_read_c0_guest_config(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG1:
+               v = (long)kvm_read_c0_guest_config1(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG2:
+               v = (long)kvm_read_c0_guest_config2(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG3:
+               v = (long)kvm_read_c0_guest_config3(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG7:
+               v = (long)kvm_read_c0_guest_config7(cop0);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return put_user(v, uaddr);
+}
+
+static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
+                           const struct kvm_one_reg *reg)
+{
+       u64 __user *uaddr = (u64 __user *)(long)reg->addr;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       u64 v;
+
+       if (get_user(v, uaddr) != 0)
+               return -EFAULT;
+
+       switch (reg->id) {
+       case KVM_REG_MIPS_R0:
+               /* Silently ignore requests to set $0 */
+               break;
+       case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
+               vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
+               break;
+       case KVM_REG_MIPS_HI:
+               vcpu->arch.hi = v;
+               break;
+       case KVM_REG_MIPS_LO:
+               vcpu->arch.lo = v;
+               break;
+       case KVM_REG_MIPS_PC:
+               vcpu->arch.pc = v;
+               break;
+
+       case KVM_REG_MIPS_CP0_INDEX:
+               kvm_write_c0_guest_index(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_CONTEXT:
+               kvm_write_c0_guest_context(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_PAGEMASK:
+               kvm_write_c0_guest_pagemask(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_WIRED:
+               kvm_write_c0_guest_wired(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_BADVADDR:
+               kvm_write_c0_guest_badvaddr(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_ENTRYHI:
+               kvm_write_c0_guest_entryhi(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_STATUS:
+               kvm_write_c0_guest_status(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_CAUSE:
+               kvm_write_c0_guest_cause(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_ERROREPC:
+               kvm_write_c0_guest_errorepc(cop0, v);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+long
+kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       long r;
+
+       switch (ioctl) {
+       case KVM_SET_ONE_REG:
+       case KVM_GET_ONE_REG: {
+               struct kvm_one_reg reg;
+               if (copy_from_user(&reg, argp, sizeof(reg)))
+                       return -EFAULT;
+               if (ioctl == KVM_SET_ONE_REG)
+                       return kvm_mips_set_reg(vcpu, &reg);
+               else
+                       return kvm_mips_get_reg(vcpu, &reg);
+       }
+       case KVM_GET_REG_LIST: {
+               struct kvm_reg_list __user *user_list = argp;
+               u64 __user *reg_dest;
+               struct kvm_reg_list reg_list;
+               unsigned n;
+
+               if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+                       return -EFAULT;
+               n = reg_list.n;
+               reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
+               if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+                       return -EFAULT;
+               if (n < reg_list.n)
+                       return -E2BIG;
+               reg_dest = user_list->reg;
+               if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
+                                sizeof(kvm_mips_get_one_regs)))
+                       return -EFAULT;
+               return 0;
+       }
+       case KVM_NMI:
+               /* Treat the NMI as a CPU reset */
+               r = kvm_mips_reset_vcpu(vcpu);
+               break;
+       case KVM_INTERRUPT:
+               {
+                       struct kvm_mips_interrupt irq;
+                       r = -EFAULT;
+                       if (copy_from_user(&irq, argp, sizeof(irq)))
+                               goto out;
+
+                       kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+                                 irq.irq);
+
+                       r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+                       break;
+               }
+       default:
+               r = -ENOIOCTLCMD;
+       }
+
+out:
+       return r;
+}
+
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+       struct kvm_memory_slot *memslot;
+       unsigned long ga, ga_end;
+       int is_dirty = 0;
+       int r;
+       unsigned long n;
+
+       mutex_lock(&kvm->slots_lock);
+
+       r = kvm_get_dirty_log(kvm, log, &is_dirty);
+       if (r)
+               goto out;
+
+       /* If nothing is dirty, don't bother messing with page tables. */
+       if (is_dirty) {
+               memslot = &kvm->memslots->memslots[log->slot];
+
+               ga = memslot->base_gfn << PAGE_SHIFT;
+               ga_end = ga + (memslot->npages << PAGE_SHIFT);
+
+               printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
+                      ga_end);
+
+               n = kvm_dirty_bitmap_bytes(memslot);
+               memset(memslot->dirty_bitmap, 0, n);
+       }
+
+       r = 0;
+out:
+       mutex_unlock(&kvm->slots_lock);
+       return r;
+
+}
+
+long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       long r;
+
+       switch (ioctl) {
+       default:
+               r = -ENOIOCTLCMD;
+       }
+
+       return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+       int ret;
+
+       if (kvm_mips_callbacks) {
+               kvm_err("kvm: module already exists\n");
+               return -EEXIST;
+       }
+
+       ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
+
+       return ret;
+}
+
+void kvm_arch_exit(void)
+{
+       kvm_mips_callbacks = NULL;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       return -ENOIOCTLCMD;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+       int r;
+
+       switch (ext) {
+       case KVM_CAP_ONE_REG:
+               r = 1;
+               break;
+       case KVM_CAP_COALESCED_MMIO:
+               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+               break;
+       default:
+               r = 0;
+               break;
+       }
+       return r;
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return kvm_mips_pending_timer(vcpu);
+}
+
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+{
+       int i;
+       struct mips_coproc *cop0;
+
+       if (!vcpu)
+               return -1;
+
+       printk("VCPU Register Dump:\n");
+       printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
+       printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+       for (i = 0; i < 32; i += 4) {
+               printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+                      vcpu->arch.gprs[i],
+                      vcpu->arch.gprs[i + 1],
+                      vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
+       }
+       printk("\thi: 0x%08lx\n", vcpu->arch.hi);
+       printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
+
+       cop0 = vcpu->arch.cop0;
+       printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+              kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
+
+       printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+               vcpu->arch.gprs[i] = regs->gpr[i];
+       vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
+       vcpu->arch.hi = regs->hi;
+       vcpu->arch.lo = regs->lo;
+       vcpu->arch.pc = regs->pc;
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+               regs->gpr[i] = vcpu->arch.gprs[i];
+
+       regs->hi = vcpu->arch.hi;
+       regs->lo = vcpu->arch.lo;
+       regs->pc = vcpu->arch.pc;
+
+       return 0;
+}
+
+void kvm_mips_comparecount_func(unsigned long data)
+{
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+       kvm_mips_callbacks->queue_timer_int(vcpu);
+
+       vcpu->arch.wait = 0;
+       if (waitqueue_active(&vcpu->wq)) {
+               wake_up_interruptible(&vcpu->wq);
+       }
+}
+
+/*
+ * low level hrtimer wake routine.
+ */
+enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+       struct kvm_vcpu *vcpu;
+
+       vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+       kvm_mips_comparecount_func((unsigned long) vcpu);
+       hrtimer_forward_now(&vcpu->arch.comparecount_timer,
+                           ktime_set(0, MS_TO_NS(10)));
+       return HRTIMER_RESTART;
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       kvm_mips_callbacks->vcpu_init(vcpu);
+       hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+                    HRTIMER_MODE_REL);
+       vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+       kvm_mips_init_shadow_tlb(vcpu);
+       return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+       return;
+}
+
+int
+kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
+{
+       return 0;
+}
+
+/* Initial guest state */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       return kvm_mips_callbacks->vcpu_setup(vcpu);
+}
+
+static
+void kvm_mips_set_c0_status(void)
+{
+       uint32_t status = read_c0_status();
+
+       if (cpu_has_fpu)
+               status |= (ST0_CU1);
+
+       if (cpu_has_dsp)
+               status |= (ST0_MX);
+
+       write_c0_status(status);
+       ehb();
+}
+
+/*
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       uint32_t cause = vcpu->arch.host_cp0_cause;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       /* Set a default exit reason */
+       run->exit_reason = KVM_EXIT_UNKNOWN;
+       run->ready_for_interrupt_injection = 1;
+
+       /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
+       kvm_mips_set_c0_status();
+
+       local_irq_enable();
+
+       kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
+                       cause, opc, run, vcpu);
+
+       /* Do a privilege check, if in UM most of these exit conditions end up
+        * causing an exception to be delivered to the Guest Kernel
+        */
+       er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+       if (er == EMULATE_PRIV_FAIL) {
+               goto skip_emul;
+       } else if (er == EMULATE_FAIL) {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               goto skip_emul;
+       }
+
+       switch (exccode) {
+       case T_INT:
+               kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+
+               ++vcpu->stat.int_exits;
+               trace_kvm_exit(vcpu, INT_EXITS);
+
+               if (need_resched()) {
+                       cond_resched();
+               }
+
+               ret = RESUME_GUEST;
+               break;
+
+       case T_COP_UNUSABLE:
+               kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+
+               ++vcpu->stat.cop_unusable_exits;
+               trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
+               ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
+               /* XXXKYMA: Might need to return to user space */
+               if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
+                       ret = RESUME_HOST;
+               }
+               break;
+
+       case T_TLB_MOD:
+               ++vcpu->stat.tlbmod_exits;
+               trace_kvm_exit(vcpu, TLBMOD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
+               break;
+
+       case T_TLB_ST_MISS:
+               kvm_debug
+                   ("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+                    badvaddr);
+
+               ++vcpu->stat.tlbmiss_st_exits;
+               trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
+               break;
+
+       case T_TLB_LD_MISS:
+               kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+                         cause, opc, badvaddr);
+
+               ++vcpu->stat.tlbmiss_ld_exits;
+               trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
+               break;
+
+       case T_ADDR_ERR_ST:
+               ++vcpu->stat.addrerr_st_exits;
+               trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
+               break;
+
+       case T_ADDR_ERR_LD:
+               ++vcpu->stat.addrerr_ld_exits;
+               trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
+               break;
+
+       case T_SYSCALL:
+               ++vcpu->stat.syscall_exits;
+               trace_kvm_exit(vcpu, SYSCALL_EXITS);
+               ret = kvm_mips_callbacks->handle_syscall(vcpu);
+               break;
+
+       case T_RES_INST:
+               ++vcpu->stat.resvd_inst_exits;
+               trace_kvm_exit(vcpu, RESVD_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_res_inst(vcpu);
+               break;
+
+       case T_BREAK:
+               ++vcpu->stat.break_inst_exits;
+               trace_kvm_exit(vcpu, BREAK_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_break(vcpu);
+               break;
+
+       default:
+               kvm_err
+                   ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
+                    exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+                    kvm_read_c0_guest_status(vcpu->arch.cop0));
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+
+       }
+
+skip_emul:
+       local_irq_disable();
+
+       if (er == EMULATE_DONE && !(ret & RESUME_HOST))
+               kvm_mips_deliver_interrupts(vcpu, cause);
+
+       if (!(ret & RESUME_HOST)) {
+               /* Only check for signals if not already exiting to userspace  */
+               if (signal_pending(current)) {
+                       run->exit_reason = KVM_EXIT_INTR;
+                       ret = (-EINTR << 2) | RESUME_HOST;
+                       ++vcpu->stat.signal_exits;
+                       trace_kvm_exit(vcpu, SIGNAL_EXITS);
+               }
+       }
+
+       return ret;
+}
+
+int __init kvm_mips_init(void)
+{
+       int ret;
+
+       ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+       if (ret)
+               return ret;
+
+       /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
+        * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
+        * to avoid the possibility of double faulting. The issue is that the TLB code
+        * references routines that are part of the the KVM module,
+        * which are only available once the module is loaded.
+        */
+       kvm_mips_gfn_to_pfn = gfn_to_pfn;
+       kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
+       kvm_mips_is_error_pfn = is_error_pfn;
+
+       pr_info("KVM/MIPS Initialized\n");
+       return 0;
+}
+
+void __exit kvm_mips_exit(void)
+{
+       kvm_exit();
+
+       kvm_mips_gfn_to_pfn = NULL;
+       kvm_mips_release_pfn_clean = NULL;
+       kvm_mips_is_error_pfn = NULL;
+
+       pr_info("KVM/MIPS unloaded\n");
+}
+
+module_init(kvm_mips_init);
+module_exit(kvm_mips_exit);
+
+EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
new file mode 100644 (file)
index 0000000..a4a8c85
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: commpage: mapped into get kernel space
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __KVM_MIPS_COMMPAGE_H__
+#define __KVM_MIPS_COMMPAGE_H__
+
+struct kvm_mips_commpage {
+       struct mips_coproc cop0;        /* COP0 state is mapped into Guest kernel via commpage */
+};
+
+#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
+
+extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
new file mode 100644 (file)
index 0000000..3873b1e
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* commpage, currently used for Virtual COP0 registers.
+* Mapped into the guest kernel @ 0x0.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_comm.h"
+
+void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
+       memset(page, 0, sizeof(struct kvm_mips_commpage));
+
+       /* Specific init values for fields */
+       vcpu->arch.cop0 = &page->cop0;
+       memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
+
+       return;
+}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
new file mode 100644 (file)
index 0000000..96528e2
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+
+#include "kvm_mips_comm.h"
+
+#define SYNCI_TEMPLATE  0x041f0000
+#define SYNCI_BASE(x)   (((x) >> 21) & 0x1f)
+#define SYNCI_OFFSET    ((x) & 0xffff)
+
+#define LW_TEMPLATE     0x8c000000
+#define CLEAR_TEMPLATE  0x00000020
+#define SW_TEMPLATE     0xac000000
+
+int
+kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+                          struct kvm_vcpu *vcpu)
+{
+       int result = 0;
+       unsigned long kseg0_opc;
+       uint32_t synci_inst = 0x0;
+
+       /* Replace the CACHE instruction, with a NOP */
+       kseg0_opc =
+           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                      (vcpu, (unsigned long) opc));
+       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+       mips32_SyncICache(kseg0_opc, 32);
+
+       return result;
+}
+
+/*
+ *  Address based CACHE instructions are transformed into synci(s). A little heavy
+ * for just D-cache invalidates, but avoids an expensive trap
+ */
+int
+kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+                       struct kvm_vcpu *vcpu)
+{
+       int result = 0;
+       unsigned long kseg0_opc;
+       uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
+
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       synci_inst |= (base << 21);
+       synci_inst |= offset;
+
+       kseg0_opc =
+           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                      (vcpu, (unsigned long) opc));
+       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+       mips32_SyncICache(kseg0_opc, 32);
+
+       return result;
+}
+
+int
+kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       int32_t rt, rd, sel;
+       uint32_t mfc0_inst;
+       unsigned long kseg0_opc, flags;
+
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+
+       if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+               mfc0_inst = CLEAR_TEMPLATE;
+               mfc0_inst |= ((rt & 0x1f) << 16);
+       } else {
+               mfc0_inst = LW_TEMPLATE;
+               mfc0_inst |= ((rt & 0x1f) << 16);
+               mfc0_inst |=
+                   offsetof(struct mips_coproc,
+                            reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
+                                                     cop0);
+       }
+
+       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               kseg0_opc =
+                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                              (vcpu, (unsigned long) opc));
+               memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
+               mips32_SyncICache(kseg0_opc, 32);
+       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
+               mips32_SyncICache((unsigned long) opc, 32);
+               local_irq_restore(flags);
+       } else {
+               kvm_err("%s: Invalid address: %p\n", __func__, opc);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int
+kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       int32_t rt, rd, sel;
+       uint32_t mtc0_inst = SW_TEMPLATE;
+       unsigned long kseg0_opc, flags;
+
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+
+       mtc0_inst |= ((rt & 0x1f) << 16);
+       mtc0_inst |=
+           offsetof(struct mips_coproc,
+                    reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
+
+       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               kseg0_opc =
+                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                              (vcpu, (unsigned long) opc));
+               memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
+               mips32_SyncICache(kseg0_opc, 32);
+       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
+               mips32_SyncICache((unsigned long) opc, 32);
+               local_irq_restore(flags);
+       } else {
+               kvm_err("%s: Invalid address: %p\n", __func__, opc);
+               return -EFAULT;
+       }
+
+       return 0;
+}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
new file mode 100644 (file)
index 0000000..4b6274b
--- /dev/null
@@ -0,0 +1,1829 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Instruction/Exception emulation
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <linux/random.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-info.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/inst.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#include "trace.h"
+
+/*
+ * Compute the return address and do emulate branch simulation, if required.
+ * This function should be called only in branch delay slot active.
+ */
+unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
+       unsigned long instpc)
+{
+       unsigned int dspcontrol;
+       union mips_instruction insn;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       long epc = instpc;
+       long nextpc = KVM_INVALID_INST;
+
+       if (epc & 3)
+               goto unaligned;
+
+       /*
+        * Read the instruction
+        */
+       insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+
+       if (insn.word == KVM_INVALID_INST)
+               return KVM_INVALID_INST;
+
+       switch (insn.i_format.opcode) {
+               /*
+                * jr and jalr are in r_format format.
+                */
+       case spec_op:
+               switch (insn.r_format.func) {
+               case jalr_op:
+                       arch->gprs[insn.r_format.rd] = epc + 8;
+                       /* Fall through */
+               case jr_op:
+                       nextpc = arch->gprs[insn.r_format.rs];
+                       break;
+               }
+               break;
+
+               /*
+                * This group contains:
+                * bltz_op, bgez_op, bltzl_op, bgezl_op,
+                * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+                */
+       case bcond_op:
+               switch (insn.i_format.rt) {
+               case bltz_op:
+               case bltzl_op:
+                       if ((long)arch->gprs[insn.i_format.rs] < 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bgez_op:
+               case bgezl_op:
+                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bltzal_op:
+               case bltzall_op:
+                       arch->gprs[31] = epc + 8;
+                       if ((long)arch->gprs[insn.i_format.rs] < 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bgezal_op:
+               case bgezall_op:
+                       arch->gprs[31] = epc + 8;
+                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+               case bposge32_op:
+                       if (!cpu_has_dsp)
+                               goto sigill;
+
+                       dspcontrol = rddsp(0x01);
+
+                       if (dspcontrol >= 32) {
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       } else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+               }
+               break;
+
+               /*
+                * These are unconditional and in j_format.
+                */
+       case jal_op:
+               arch->gprs[31] = instpc + 8;
+       case j_op:
+               epc += 4;
+               epc >>= 28;
+               epc <<= 28;
+               epc |= (insn.j_format.target << 2);
+               nextpc = epc;
+               break;
+
+               /*
+                * These are conditional and in i_format.
+                */
+       case beq_op:
+       case beql_op:
+               if (arch->gprs[insn.i_format.rs] ==
+                   arch->gprs[insn.i_format.rt])
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case bne_op:
+       case bnel_op:
+               if (arch->gprs[insn.i_format.rs] !=
+                   arch->gprs[insn.i_format.rt])
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case blez_op:           /* not really i_format */
+       case blezl_op:
+               /* rt field assumed to be zero */
+               if ((long)arch->gprs[insn.i_format.rs] <= 0)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case bgtz_op:
+       case bgtzl_op:
+               /* rt field assumed to be zero */
+               if ((long)arch->gprs[insn.i_format.rs] > 0)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+               /*
+                * And now the FPA/cp1 branch instructions.
+                */
+       case cop1_op:
+               printk("%s: unsupported cop1_op\n", __func__);
+               break;
+       }
+
+       return nextpc;
+
+unaligned:
+       printk("%s: unaligned epc\n", __func__);
+       return nextpc;
+
+sigill:
+       printk("%s: DSP branch but not DSP ASE\n", __func__);
+       return nextpc;
+}
+
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+       unsigned long branch_pc;
+       enum emulation_result er = EMULATE_DONE;
+
+       if (cause & CAUSEF_BD) {
+               branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
+               if (branch_pc == KVM_INVALID_INST) {
+                       er = EMULATE_FAIL;
+               } else {
+                       vcpu->arch.pc = branch_pc;
+                       kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+               }
+       } else
+               vcpu->arch.pc += 4;
+
+       kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+
+       return er;
+}
+
+/* Everytime the compare register is written to, we need to decide when to fire
+ * the timer that represents timer ticks to the GUEST.
+ *
+ */
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+
+       /* If COUNT is enabled */
+       if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
+               hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+               hrtimer_start(&vcpu->arch.comparecount_timer,
+                             ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
+       } else {
+               hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+
+       if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+               kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+                         kvm_read_c0_guest_epc(cop0));
+               kvm_clear_c0_guest_status(cop0, ST0_EXL);
+               vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+
+       } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+               kvm_clear_c0_guest_status(cop0, ST0_ERL);
+               vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+       } else {
+               printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+                      vcpu->arch.pc);
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+
+       kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
+                 vcpu->arch.pending_exceptions);
+
+       ++vcpu->stat.wait_exits;
+       trace_kvm_exit(vcpu, WAIT_EXITS);
+       if (!vcpu->arch.pending_exceptions) {
+               vcpu->arch.wait = 1;
+               kvm_vcpu_block(vcpu);
+
+               /* We we are runnable, then definitely go off to user space to check if any
+                * I/O interrupts are pending.
+                */
+               if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+                       clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+                       vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+               }
+       }
+
+       return er;
+}
+
+/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
+ * this, if things ever change
+ */
+enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_FAIL;
+       uint32_t pc = vcpu->arch.pc;
+
+       printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+       return er;
+}
+
+/* Write Guest TLB Entry @ Index */
+enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int index = kvm_read_c0_guest_index(cop0);
+       enum emulation_result er = EMULATE_DONE;
+       struct kvm_mips_tlb *tlb = NULL;
+       uint32_t pc = vcpu->arch.pc;
+
+       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+               printk("%s: illegal index: %d\n", __func__, index);
+               printk
+                   ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+                    pc, index, kvm_read_c0_guest_entryhi(cop0),
+                    kvm_read_c0_guest_entrylo0(cop0),
+                    kvm_read_c0_guest_entrylo1(cop0),
+                    kvm_read_c0_guest_pagemask(cop0));
+               index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
+       }
+
+       tlb = &vcpu->arch.guest_tlb[index];
+#if 1
+       /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+       kvm_debug
+           ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+            pc, index, kvm_read_c0_guest_entryhi(cop0),
+            kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
+            kvm_read_c0_guest_pagemask(cop0));
+
+       return er;
+}
+
+/* Write Guest TLB Entry @ Random Index */
+enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       struct kvm_mips_tlb *tlb = NULL;
+       uint32_t pc = vcpu->arch.pc;
+       int index;
+
+#if 1
+       get_random_bytes(&index, sizeof(index));
+       index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
+#else
+       index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
+#endif
+
+       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+               printk("%s: illegal index: %d\n", __func__, index);
+               return EMULATE_FAIL;
+       }
+
+       tlb = &vcpu->arch.guest_tlb[index];
+
+#if 1
+       /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+       kvm_debug
+           ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+            pc, index, kvm_read_c0_guest_entryhi(cop0),
+            kvm_read_c0_guest_entrylo0(cop0),
+            kvm_read_c0_guest_entrylo1(cop0));
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       long entryhi = kvm_read_c0_guest_entryhi(cop0);
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t pc = vcpu->arch.pc;
+       int index = -1;
+
+       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+
+       kvm_write_c0_guest_index(cop0, index);
+
+       kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+                 index);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
+                    struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       int32_t rt, rd, copz, sel, co_bit, op;
+       uint32_t pc = vcpu->arch.pc;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL) {
+               return er;
+       }
+
+       copz = (inst >> 21) & 0x1f;
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+       co_bit = (inst >> 25) & 1;
+
+       /* Verify that the register is valid */
+       if (rd > MIPS_CP0_DESAVE) {
+               printk("Invalid rd: %d\n", rd);
+               er = EMULATE_FAIL;
+               goto done;
+       }
+
+       if (co_bit) {
+               op = (inst) & 0xff;
+
+               switch (op) {
+               case tlbr_op:   /*  Read indexed TLB entry  */
+                       er = kvm_mips_emul_tlbr(vcpu);
+                       break;
+               case tlbwi_op:  /*  Write indexed  */
+                       er = kvm_mips_emul_tlbwi(vcpu);
+                       break;
+               case tlbwr_op:  /*  Write random  */
+                       er = kvm_mips_emul_tlbwr(vcpu);
+                       break;
+               case tlbp_op:   /* TLB Probe */
+                       er = kvm_mips_emul_tlbp(vcpu);
+                       break;
+               case rfe_op:
+                       printk("!!!COP0_RFE!!!\n");
+                       break;
+               case eret_op:
+                       er = kvm_mips_emul_eret(vcpu);
+                       goto dont_update_pc;
+                       break;
+               case wait_op:
+                       er = kvm_mips_emul_wait(vcpu);
+                       break;
+               }
+       } else {
+               switch (copz) {
+               case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[rd][sel]++;
+#endif
+                       /* Get reg */
+                       if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+                               /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
+                               vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
+                       } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+                               vcpu->arch.gprs[rt] = 0x0;
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+                       }
+                       else {
+                               vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+                       }
+
+                       kvm_debug
+                           ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
+                            pc, rd, sel, rt, vcpu->arch.gprs[rt]);
+
+                       break;
+
+               case dmfc_op:
+                       vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+                       break;
+
+               case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[rd][sel]++;
+#endif
+                       if ((rd == MIPS_CP0_TLB_INDEX)
+                           && (vcpu->arch.gprs[rt] >=
+                               KVM_MIPS_GUEST_TLB_SIZE)) {
+                               printk("Invalid TLB Index: %ld",
+                                      vcpu->arch.gprs[rt]);
+                               er = EMULATE_FAIL;
+                               break;
+                       }
+#define C0_EBASE_CORE_MASK 0xff
+                       if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
+                               /* Preserve CORE number */
+                               kvm_change_c0_guest_ebase(cop0,
+                                                         ~(C0_EBASE_CORE_MASK),
+                                                         vcpu->arch.gprs[rt]);
+                               printk("MTCz, cop0->reg[EBASE]: %#lx\n",
+                                      kvm_read_c0_guest_ebase(cop0));
+                       } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
+                               uint32_t nasid =
+                                   vcpu->arch.gprs[rt] & ASID_MASK;
+                               if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
+                                   &&
+                                   ((kvm_read_c0_guest_entryhi(cop0) &
+                                     ASID_MASK) != nasid)) {
+
+                                       kvm_debug
+                                           ("MTCz, change ASID from %#lx to %#lx\n",
+                                            kvm_read_c0_guest_entryhi(cop0) &
+                                            ASID_MASK,
+                                            vcpu->arch.gprs[rt] & ASID_MASK);
+
+                                       /* Blow away the shadow host TLBs */
+                                       kvm_mips_flush_host_tlb(1);
+                               }
+                               kvm_write_c0_guest_entryhi(cop0,
+                                                          vcpu->arch.gprs[rt]);
+                       }
+                       /* Are we writing to COUNT */
+                       else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+                               /* Linux doesn't seem to write into COUNT, we throw an error
+                                * if we notice a write to COUNT
+                                */
+                               /*er = EMULATE_FAIL; */
+                               goto done;
+                       } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
+                               kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
+                                         pc, kvm_read_c0_guest_compare(cop0),
+                                         vcpu->arch.gprs[rt]);
+
+                               /* If we are writing to COMPARE */
+                               /* Clear pending timer interrupt, if any */
+                               kvm_mips_callbacks->dequeue_timer_int(vcpu);
+                               kvm_write_c0_guest_compare(cop0,
+                                                          vcpu->arch.gprs[rt]);
+                       } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+                               kvm_write_c0_guest_status(cop0,
+                                                         vcpu->arch.gprs[rt]);
+                               /* Make sure that CU1 and NMI bits are never set */
+                               kvm_clear_c0_guest_status(cop0,
+                                                         (ST0_CU1 | ST0_NMI));
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+                       } else {
+                               cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+                       }
+
+                       kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
+                                 rd, sel, cop0->reg[rd][sel]);
+                       break;
+
+               case dmtc_op:
+                       printk
+                           ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
+                            vcpu->arch.pc, rt, rd, sel);
+                       er = EMULATE_FAIL;
+                       break;
+
+               case mfmcz_op:
+#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[MIPS_CP0_STATUS][0]++;
+#endif
+                       if (rt != 0) {
+                               vcpu->arch.gprs[rt] =
+                                   kvm_read_c0_guest_status(cop0);
+                       }
+                       /* EI */
+                       if (inst & 0x20) {
+                               kvm_debug("[%#lx] mfmcz_op: EI\n",
+                                         vcpu->arch.pc);
+                               kvm_set_c0_guest_status(cop0, ST0_IE);
+                       } else {
+                               kvm_debug("[%#lx] mfmcz_op: DI\n",
+                                         vcpu->arch.pc);
+                               kvm_clear_c0_guest_status(cop0, ST0_IE);
+                       }
+
+                       break;
+
+               case wrpgpr_op:
+                       {
+                               uint32_t css =
+                                   cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+                               uint32_t pss =
+                                   (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
+                               /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
+                               if (css || pss) {
+                                       er = EMULATE_FAIL;
+                                       break;
+                               }
+                               kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
+                                         vcpu->arch.gprs[rt]);
+                               vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
+                       }
+                       break;
+               default:
+                       printk
+                           ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
+                            vcpu->arch.pc, copz);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+       }
+
+done:
+       /*
+        * Rollback PC only if emulation was unsuccessful
+        */
+       if (er == EMULATE_FAIL) {
+               vcpu->arch.pc = curr_pc;
+       }
+
+dont_update_pc:
+       /*
+        * This is for special instructions whose emulation
+        * updates the PC, so do not overwrite the PC under
+        * any circumstances
+        */
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+                      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DO_MMIO;
+       int32_t op, base, rt, offset;
+       uint32_t bytes;
+       void *data = run->mmio.data;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       rt = (inst >> 16) & 0x1f;
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       op = (inst >> 26) & 0x3f;
+
+       switch (op) {
+       case sb_op:
+               bytes = 1;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(u8 *) data = vcpu->arch.gprs[rt];
+               kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
+                         *(uint8_t *) data);
+
+               break;
+
+       case sw_op:
+               bytes = 4;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(uint32_t *) data = vcpu->arch.gprs[rt];
+
+               kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+                         vcpu->arch.gprs[rt], *(uint32_t *) data);
+               break;
+
+       case sh_op:
+               bytes = 2;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(uint16_t *) data = vcpu->arch.gprs[rt];
+
+               kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+                         vcpu->arch.gprs[rt], *(uint32_t *) data);
+               break;
+
+       default:
+               printk("Store not yet supported");
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       /*
+        * Rollback PC if emulation was unsuccessful
+        */
+       if (er == EMULATE_FAIL) {
+               vcpu->arch.pc = curr_pc;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+                     struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DO_MMIO;
+       int32_t op, base, rt, offset;
+       uint32_t bytes;
+
+       rt = (inst >> 16) & 0x1f;
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       op = (inst >> 26) & 0x3f;
+
+       vcpu->arch.pending_load_cause = cause;
+       vcpu->arch.io_gpr = rt;
+
+       switch (op) {
+       case lw_op:
+               bytes = 4;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 0;
+               break;
+
+       case lh_op:
+       case lhu_op:
+               bytes = 2;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 0;
+
+               if (op == lh_op)
+                       vcpu->mmio_needed = 2;
+               else
+                       vcpu->mmio_needed = 1;
+
+               break;
+
+       case lbu_op:
+       case lb_op:
+               bytes = 1;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_is_write = 0;
+
+               if (op == lb_op)
+                       vcpu->mmio_needed = 2;
+               else
+                       vcpu->mmio_needed = 1;
+
+               break;
+
+       default:
+               printk("Load not yet supported");
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
+{
+       unsigned long offset = (va & ~PAGE_MASK);
+       struct kvm *kvm = vcpu->kvm;
+       unsigned long pa;
+       gfn_t gfn;
+       pfn_t pfn;
+
+       gfn = va >> PAGE_SHIFT;
+
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               return -1;
+       }
+       pfn = kvm->arch.guest_pmap[gfn];
+       pa = (pfn << PAGE_SHIFT) | offset;
+
+       printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
+
+       mips32_SyncICache(CKSEG0ADDR(pa), 32);
+       return 0;
+}
+
+#define MIPS_CACHE_OP_INDEX_INV         0x0
+#define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
+#define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
+#define MIPS_CACHE_OP_IMP               0x3
+#define MIPS_CACHE_OP_HIT_INV           0x4
+#define MIPS_CACHE_OP_FILL_WB_INV       0x5
+#define MIPS_CACHE_OP_HIT_HB            0x6
+#define MIPS_CACHE_OP_FETCH_LOCK        0x7
+
+#define MIPS_CACHE_ICACHE               0x0
+#define MIPS_CACHE_DCACHE               0x1
+#define MIPS_CACHE_SEC                  0x3
+
+enum emulation_result
+kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
+                      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       extern void (*r4k_blast_dcache) (void);
+       extern void (*r4k_blast_icache) (void);
+       enum emulation_result er = EMULATE_DONE;
+       int32_t offset, cache, op_inst, op, base;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long va;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       base = (inst >> 21) & 0x1f;
+       op_inst = (inst >> 16) & 0x1f;
+       offset = inst & 0xffff;
+       cache = (inst >> 16) & 0x3;
+       op = (inst >> 18) & 0x7;
+
+       va = arch->gprs[base] + offset;
+
+       kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                 cache, op, base, arch->gprs[base], offset);
+
+       /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
+        * the caches entirely by stepping through all the ways/indexes
+        */
+       if (op == MIPS_CACHE_OP_INDEX_INV) {
+               kvm_debug
+                   ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                    vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
+                    arch->gprs[base], offset);
+
+               if (cache == MIPS_CACHE_DCACHE)
+                       r4k_blast_dcache();
+               else if (cache == MIPS_CACHE_ICACHE)
+                       r4k_blast_icache();
+               else {
+                       printk("%s: unsupported CACHE INDEX operation\n",
+                              __func__);
+                       return EMULATE_FAIL;
+               }
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               kvm_mips_trans_cache_index(inst, opc, vcpu);
+#endif
+               goto done;
+       }
+
+       preempt_disable();
+       if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+
+               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
+                       kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+               }
+       } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+                  KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+               int index;
+
+               /* If an entry already exists then skip */
+               if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
+                       goto skip_fault;
+               }
+
+               /* If address not in the guest TLB, then give the guest a fault, the
+                * resulting handler will do the right thing
+                */
+               index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
+                                                 (kvm_read_c0_guest_entryhi
+                                                  (cop0) & ASID_MASK));
+
+               if (index < 0) {
+                       vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
+                       vcpu->arch.host_cp0_badvaddr = va;
+                       er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
+                                                        vcpu);
+                       preempt_enable();
+                       goto dont_update_pc;
+               } else {
+                       struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+                       /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+                       if (!TLB_IS_VALID(*tlb, va)) {
+                               er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
+                                                               run, vcpu);
+                               preempt_enable();
+                               goto dont_update_pc;
+                       } else {
+                               /* We fault an entry from the guest tlb to the shadow host TLB */
+                               kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+                                                                    NULL,
+                                                                    NULL);
+                       }
+               }
+       } else {
+               printk
+                   ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                    cache, op, base, arch->gprs[base], offset);
+               er = EMULATE_FAIL;
+               preempt_enable();
+               goto dont_update_pc;
+
+       }
+
+skip_fault:
+       /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
+       if (cache == MIPS_CACHE_DCACHE
+           && (op == MIPS_CACHE_OP_FILL_WB_INV
+               || op == MIPS_CACHE_OP_HIT_INV)) {
+               flush_dcache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
+               kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+       } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
+               flush_dcache_line(va);
+               flush_icache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               /* Replace the CACHE instruction, with a SYNCI */
+               kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+       } else {
+               printk
+                   ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                    cache, op, base, arch->gprs[base], offset);
+               er = EMULATE_FAIL;
+               preempt_enable();
+               goto dont_update_pc;
+       }
+
+       preempt_enable();
+
+      dont_update_pc:
+       /*
+        * Rollback PC
+        */
+       vcpu->arch.pc = curr_pc;
+      done:
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+                     struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t inst;
+
+       /*
+        *  Fetch the instruction.
+        */
+       if (cause & CAUSEF_BD) {
+               opc += 1;
+       }
+
+       inst = kvm_get_inst(opc, vcpu);
+
+       switch (((union mips_instruction)inst).r_format.opcode) {
+       case cop0_op:
+               er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
+               break;
+       case sb_op:
+       case sh_op:
+       case sw_op:
+               er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+               break;
+       case lb_op:
+       case lbu_op:
+       case lhu_op:
+       case lh_op:
+       case lw_op:
+               er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+               break;
+
+       case cache_op:
+               ++vcpu->stat.cache_exits;
+               trace_kvm_exit(vcpu, CACHE_EXITS);
+               er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
+               break;
+
+       default:
+               printk("Instruction emulation not supported (%p/%#x)\n", opc,
+                      inst);
+               kvm_arch_vcpu_dump_regs(vcpu);
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
+                        struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_SYSCALL << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               printk("Trying to deliver SYSCALL when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
+                           struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* set pc to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x0;
+
+       } else {
+               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
+                          struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi =
+               (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
+                         arch->pc);
+
+               /* set pc to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
+                           struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x0;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
+                          struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+/* TLBMOD: store into address matching TLB with Dirty bit off */
+enum emulation_result
+kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+                      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+
+#ifdef DEBUG
+       /*
+        * If address not in the guest TLB, then we are in trouble
+        */
+       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+       if (index < 0) {
+               /* XXXKYMA Invalidate and retry */
+               kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
+               kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
+                    __func__, entryhi);
+               kvm_mips_dump_guest_tlbs(vcpu);
+               kvm_mips_dump_host_tlbs();
+               return EMULATE_FAIL;
+       }
+#endif
+
+       er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
+                         arch->pc);
+
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
+                        struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+       }
+
+       arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+       kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_RES_INST << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_err("Trying to deliver RI when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_BREAK << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               printk("Trying to deliver BP when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+/*
+ * ll/sc, rdhwr, sync emulation
+ */
+
+#define OPCODE 0xfc000000
+#define BASE   0x03e00000
+#define RT     0x001f0000
+#define OFFSET 0x0000ffff
+#define LL     0xc0000000
+#define SC     0xe0000000
+#define SPEC0  0x00000000
+#define SPEC3  0x7c000000
+#define RD     0x0000f800
+#define FUNC   0x0000003f
+#define SYNC   0x0000000f
+#define RDHWR  0x0000003b
+
+enum emulation_result
+kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+                  struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long curr_pc;
+       uint32_t inst;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       /*
+        *  Fetch the instruction.
+        */
+       if (cause & CAUSEF_BD)
+               opc += 1;
+
+       inst = kvm_get_inst(opc, vcpu);
+
+       if (inst == KVM_INVALID_INST) {
+               printk("%s: Cannot get inst @ %p\n", __func__, opc);
+               return EMULATE_FAIL;
+       }
+
+       if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+               int rd = (inst & RD) >> 11;
+               int rt = (inst & RT) >> 16;
+               switch (rd) {
+               case 0: /* CPU number */
+                       arch->gprs[rt] = 0;
+                       break;
+               case 1: /* SYNCI length */
+                       arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
+                                            current_cpu_data.icache.linesz);
+                       break;
+               case 2: /* Read count register */
+                       printk("RDHWR: Cont register\n");
+                       arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
+                       break;
+               case 3: /* Count register resolution */
+                       switch (current_cpu_data.cputype) {
+                       case CPU_20KC:
+                       case CPU_25KF:
+                               arch->gprs[rt] = 1;
+                               break;
+                       default:
+                               arch->gprs[rt] = 2;
+                       }
+                       break;
+               case 29:
+#if 1
+                       arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
+#else
+                       /* UserLocal not implemented */
+                       er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+#endif
+                       break;
+
+               default:
+                       printk("RDHWR not supported\n");
+                       er = EMULATE_FAIL;
+                       break;
+               }
+       } else {
+               printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
+               er = EMULATE_FAIL;
+       }
+
+       /*
+        * Rollback PC only if emulation was unsuccessful
+        */
+       if (er == EMULATE_FAIL) {
+               vcpu->arch.pc = curr_pc;
+       }
+       return er;
+}
+
+enum emulation_result
+kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long curr_pc;
+
+       if (run->mmio.len > sizeof(*gpr)) {
+               printk("Bad MMIO length: %d", run->mmio.len);
+               er = EMULATE_FAIL;
+               goto done;
+       }
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, vcpu->arch.pending_load_cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       switch (run->mmio.len) {
+       case 4:
+               *gpr = *(int32_t *) run->mmio.data;
+               break;
+
+       case 2:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(int16_t *) run->mmio.data;
+               else
+                       *gpr = *(int16_t *) run->mmio.data;
+
+               break;
+       case 1:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(int8_t *) run->mmio.data;
+               else
+                       *gpr = *(u8 *) run->mmio.data;
+               break;
+       }
+
+       if (vcpu->arch.pending_load_cause & CAUSEF_BD)
+               kvm_debug
+                   ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+                    vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+                    vcpu->mmio_needed);
+
+done:
+       return er;
+}
+
+static enum emulation_result
+kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
+                    struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (exccode << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+               kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+
+               kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
+                         exccode, kvm_read_c0_guest_epc(cop0),
+                         kvm_read_c0_guest_badvaddr(cop0));
+       } else {
+               printk("Trying to deliver EXC when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
+                        struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+
+       int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+
+       if (usermode) {
+               switch (exccode) {
+               case T_INT:
+               case T_SYSCALL:
+               case T_BREAK:
+               case T_RES_INST:
+                       break;
+
+               case T_COP_UNUSABLE:
+                       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
+                               er = EMULATE_PRIV_FAIL;
+                       break;
+
+               case T_TLB_MOD:
+                       break;
+
+               case T_TLB_LD_MISS:
+                       /* We we are accessing Guest kernel space, then send an address error exception to the guest */
+                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+                               printk("%s: LD MISS @ %#lx\n", __func__,
+                                      badvaddr);
+                               cause &= ~0xff;
+                               cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+                               er = EMULATE_PRIV_FAIL;
+                       }
+                       break;
+
+               case T_TLB_ST_MISS:
+                       /* We we are accessing Guest kernel space, then send an address error exception to the guest */
+                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+                               printk("%s: ST MISS @ %#lx\n", __func__,
+                                      badvaddr);
+                               cause &= ~0xff;
+                               cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+                               er = EMULATE_PRIV_FAIL;
+                       }
+                       break;
+
+               case T_ADDR_ERR_ST:
+                       printk("%s: address error ST @ %#lx\n", __func__,
+                              badvaddr);
+                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+                               cause &= ~0xff;
+                               cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+                       }
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               case T_ADDR_ERR_LD:
+                       printk("%s: address error LD @ %#lx\n", __func__,
+                              badvaddr);
+                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+                               cause &= ~0xff;
+                               cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+                       }
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               default:
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               }
+       }
+
+       if (er == EMULATE_PRIV_FAIL) {
+               kvm_mips_emulate_exc(cause, opc, run, vcpu);
+       }
+       return er;
+}
+
+/* User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ *     case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ *     case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+enum emulation_result
+kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       unsigned long va = vcpu->arch.host_cp0_badvaddr;
+       int index;
+
+       kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
+                 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+
+       /* KVM would not have got the exception if this entry was valid in the shadow host TLB
+        * Check the Guest TLB, if the entry is not there then send the guest an
+        * exception. The guest exc handler should then inject an entry into the
+        * guest TLB
+        */
+       index = kvm_mips_guest_tlb_lookup(vcpu,
+                                         (va & VPN2_MASK) |
+                                         (kvm_read_c0_guest_entryhi
+                                          (vcpu->arch.cop0) & ASID_MASK));
+       if (index < 0) {
+               if (exccode == T_TLB_LD_MISS) {
+                       er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
+               } else if (exccode == T_TLB_ST_MISS) {
+                       er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
+               } else {
+                       printk("%s: invalid exc code: %d\n", __func__, exccode);
+                       er = EMULATE_FAIL;
+               }
+       } else {
+               struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+
+               /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+               if (!TLB_IS_VALID(*tlb, va)) {
+                       if (exccode == T_TLB_LD_MISS) {
+                               er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
+                                                               vcpu);
+                       } else if (exccode == T_TLB_ST_MISS) {
+                               er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
+                                                               vcpu);
+                       } else {
+                               printk("%s: invalid exc code: %d\n", __func__,
+                                      exccode);
+                               er = EMULATE_FAIL;
+                       }
+               } else {
+#ifdef DEBUG
+                       kvm_debug
+                           ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
+                            tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+                       /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
+                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+                                                            NULL);
+               }
+       }
+
+       return er;
+}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
new file mode 100644 (file)
index 0000000..1e5de16
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupt delivery
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+       set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+       clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+       /* Cause bits to reflect the pending timer interrupt,
+        * the EXC code will be set when we are actually
+        * delivering the interrupt:
+        */
+       kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+
+       /* Queue up an INT exception for the core */
+       kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+}
+
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+       kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+       kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+void
+kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+
+       /* Cause bits to reflect the pending IO interrupt,
+        * the EXC code will be set when we are actually
+        * delivering the interrupt:
+        */
+       switch (intr) {
+       case 2:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+               /* Queue up an INT exception for the core */
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
+               break;
+
+       case 3:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+               break;
+
+       case 4:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+               break;
+
+       default:
+               break;
+       }
+
+}
+
+void
+kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+                          struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+       switch (intr) {
+       case -2:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
+               break;
+
+       case -3:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+               break;
+
+       case -4:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+               break;
+
+       default:
+               break;
+       }
+
+}
+
+/* Deliver the interrupt of the corresponding priority, if possible. */
+int
+kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                       uint32_t cause)
+{
+       int allowed = 0;
+       uint32_t exccode;
+
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       switch (priority) {
+       case MIPS_EXC_INT_TIMER:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IO:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IPI_1:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IPI_2:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       /* Are we allowed to deliver the interrupt ??? */
+       if (allowed) {
+
+               if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+                       /* save old pc */
+                       kvm_write_c0_guest_epc(cop0, arch->pc);
+                       kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+                       if (cause & CAUSEF_BD)
+                               kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+                       else
+                               kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+                       kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
+
+               } else
+                       kvm_err("Trying to deliver interrupt when EXL is already set\n");
+
+               kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
+                                         (exccode << CAUSEB_EXCCODE));
+
+               /* XXXSL Set PC to the interrupt exception entry point */
+               if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
+                       arch->pc = KVM_GUEST_KSEG0 + 0x200;
+               else
+                       arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+               clear_bit(priority, &vcpu->arch.pending_exceptions);
+       }
+
+       return allowed;
+}
+
+int
+kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                     uint32_t cause)
+{
+       return 1;
+}
+
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+       unsigned long *pending = &vcpu->arch.pending_exceptions;
+       unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
+       unsigned int priority;
+
+       if (!(*pending) && !(*pending_clr))
+               return;
+
+       priority = __ffs(*pending_clr);
+       while (priority <= MIPS_EXC_MAX) {
+               if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
+                       if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
+                               break;
+               }
+
+               priority = find_next_bit(pending_clr,
+                                        BITS_PER_BYTE * sizeof(*pending_clr),
+                                        priority + 1);
+       }
+
+       priority = __ffs(*pending);
+       while (priority <= MIPS_EXC_MAX) {
+               if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
+                       if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
+                               break;
+               }
+
+               priority = find_next_bit(pending,
+                                        BITS_PER_BYTE * sizeof(*pending),
+                                        priority + 1);
+       }
+
+}
+
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
+}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
new file mode 100644 (file)
index 0000000..20da7d2
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupts
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
+ * for the guest in the order specified by their priorities
+ */
+
+#define MIPS_EXC_RESET              0
+#define MIPS_EXC_SRESET             1
+#define MIPS_EXC_DEBUG_ST           2
+#define MIPS_EXC_DEBUG              3
+#define MIPS_EXC_DDB                4
+#define MIPS_EXC_NMI                5
+#define MIPS_EXC_MCHK               6
+#define MIPS_EXC_INT_TIMER          7
+#define MIPS_EXC_INT_IO             8
+#define MIPS_EXC_EXECUTE            9
+#define MIPS_EXC_INT_IPI_1          10
+#define MIPS_EXC_INT_IPI_2          11
+#define MIPS_EXC_MAX                12
+/* XXXSL More to follow */
+
+#define C_TI        (_ULCAST_(1) << 30)
+
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE   (0)
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+                             struct kvm_mips_interrupt *irq);
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+                               struct kvm_mips_interrupt *irq);
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                           uint32_t cause);
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                         uint32_t cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
new file mode 100644 (file)
index 0000000..86d3b4c
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/*
+ * Define opcode values not defined in <asm/isnt.h>
+ */
+
+#ifndef __KVM_MIPS_OPCODE_H__
+#define __KVM_MIPS_OPCODE_H__
+
+/* COP0 Ops */
+#define     mfmcz_op         0x0b      /*  01011  */
+#define     wrpgpr_op        0x0e      /*  01110  */
+
+/*  COP0 opcodes (only if COP0 and CO=1):  */
+#define     wait_op               0x20 /*  100000  */
+
+#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
new file mode 100644 (file)
index 0000000..075904b
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: COP0 access histogram
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/kvm_host.h>
+
+char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
+       "WAIT",
+       "CACHE",
+       "Signal",
+       "Interrupt",
+       "COP0/1 Unusable",
+       "TLB Mod",
+       "TLB Miss (LD)",
+       "TLB Miss (ST)",
+       "Address Err (ST)",
+       "Address Error (LD)",
+       "System Call",
+       "Reserved Inst",
+       "Break Inst",
+       "D-Cache Flushes",
+};
+
+char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
+       "Index",
+       "Random",
+       "EntryLo0",
+       "EntryLo1",
+       "Context",
+       "PG Mask",
+       "Wired",
+       "HWREna",
+       "BadVAddr",
+       "Count",
+       "EntryHI",
+       "Compare",
+       "Status",
+       "Cause",
+       "EXC PC",
+       "PRID",
+       "Config",
+       "LLAddr",
+       "Watch Lo",
+       "Watch Hi",
+       "X Context",
+       "Reserved",
+       "Impl Dep",
+       "Debug",
+       "DEPC",
+       "PerfCnt",
+       "ErrCtl",
+       "CacheErr",
+       "TagLo",
+       "TagHi",
+       "ErrorEPC",
+       "DESAVE"
+};
+
+int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+       int i, j;
+
+       printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+       for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
+               for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
+                       if (vcpu->arch.cop0->stat[i][j])
+                               printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+                                      vcpu->arch.cop0->stat[i][j]);
+               }
+       }
+#endif
+
+       return 0;
+}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
new file mode 100644 (file)
index 0000000..c777dd3
--- /dev/null
@@ -0,0 +1,949 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
+* TLB handlers run from KSEG0
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+#include <linux/srcu.h>
+
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#define KVM_GUEST_PC_TLB    0
+#define KVM_GUEST_SP_TLB    1
+
+#define PRIx64 "llx"
+
+/* Use VZ EntryHi.EHINV to invalidate TLB entries */
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+atomic_t kvm_mips_instance;
+EXPORT_SYMBOL(kvm_mips_instance);
+
+/* These function pointers are initialized once the KVM module is loaded */
+pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
+
+void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
+
+bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_is_error_pfn);
+
+uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
+}
+
+
+uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
+}
+
+inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
+{
+       return vcpu->kvm->arch.commpage_tlb;
+}
+
+
+/*
+ * Structure defining an tlb entry data set.
+ */
+
+void kvm_mips_dump_host_tlbs(void)
+{
+       unsigned long old_entryhi;
+       unsigned long old_pagemask;
+       struct kvm_mips_tlb tlb;
+       unsigned long flags;
+       int i;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       printk("HOST TLBs:\n");
+       printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
+
+       for (i = 0; i < current_cpu_data.tlbsize; i++) {
+               write_c0_index(i);
+               mtc0_tlbw_hazard();
+
+               tlb_read();
+               tlbw_use_hazard();
+
+               tlb.tlb_hi = read_c0_entryhi();
+               tlb.tlb_lo0 = read_c0_entrylo0();
+               tlb.tlb_lo1 = read_c0_entrylo1();
+               tlb.tlb_mask = read_c0_pagemask();
+
+               printk("TLB%c%3d Hi 0x%08lx ",
+                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                      i, tlb.tlb_hi);
+               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo0 >> 3) & 7);
+               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       local_irq_restore(flags);
+}
+
+void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_mips_tlb tlb;
+       int i;
+
+       printk("Guest TLBs:\n");
+       printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
+
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               tlb = vcpu->arch.guest_tlb[i];
+               printk("TLB%c%3d Hi 0x%08lx ",
+                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                      i, tlb.tlb_hi);
+               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo0 >> 3) & 7);
+               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+}
+
+void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
+{
+       int i;
+       volatile struct kvm_mips_tlb tlb;
+
+       printk("Shadow TLBs:\n");
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
+               printk("TLB%c%3d Hi 0x%08lx ",
+                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                      i, tlb.tlb_hi);
+               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo0 >> 3) & 7);
+               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+}
+
+static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+       int srcu_idx, err = 0;
+       pfn_t pfn;
+
+       if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+               return 0;
+
+        srcu_idx = srcu_read_lock(&kvm->srcu);
+       pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+
+       if (kvm_mips_is_error_pfn(pfn)) {
+               kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+               err = -EFAULT;
+               goto out;
+       }
+
+       kvm->arch.guest_pmap[gfn] = pfn;
+out:
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+       return err;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+       unsigned long gva)
+{
+       gfn_t gfn;
+       uint32_t offset = gva & ~PAGE_MASK;
+       struct kvm *kvm = vcpu->kvm;
+
+       if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+               kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+                       __builtin_return_address(0), gva);
+               return KVM_INVALID_PAGE;
+       }
+
+       gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+                       gva);
+               return KVM_INVALID_PAGE;
+       }
+
+       if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+               return KVM_INVALID_ADDR;
+
+       return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+/* set flush_dcache_mask == 0 if no dcache flush required */
+int
+kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+       unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
+{
+       unsigned long flags;
+       unsigned long old_entryhi;
+       volatile int idx;
+
+       local_irq_save(flags);
+
+
+       old_entryhi = read_c0_entryhi();
+       write_c0_entryhi(entryhi);
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       if (idx > current_cpu_data.tlbsize) {
+               kvm_err("%s: Invalid Index: %d\n", __func__, idx);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+
+       if (idx < 0) {
+               idx = read_c0_random() % current_cpu_data.tlbsize;
+               write_c0_index(idx);
+               mtc0_tlbw_hazard();
+       }
+       write_c0_entrylo0(entrylo0);
+       write_c0_entrylo1(entrylo1);
+       mtc0_tlbw_hazard();
+
+       tlb_write_indexed();
+       tlbw_use_hazard();
+
+#ifdef DEBUG
+       if (debug) {
+               kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
+                         "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+                         vcpu->arch.pc, idx, read_c0_entryhi(),
+                         read_c0_entrylo0(), read_c0_entrylo1());
+       }
+#endif
+
+       /* Flush D-cache */
+       if (flush_dcache_mask) {
+               if (entrylo0 & MIPS3_PG_V) {
+                       ++vcpu->stat.flush_dcache_exits;
+                       flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
+               }
+               if (entrylo1 & MIPS3_PG_V) {
+                       ++vcpu->stat.flush_dcache_exits;
+                       flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
+                               (0x1 << PAGE_SHIFT));
+               }
+       }
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+       local_irq_restore(flags);
+       return 0;
+}
+
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+       struct kvm_vcpu *vcpu)
+{
+       gfn_t gfn;
+       pfn_t pfn0, pfn1;
+       unsigned long vaddr = 0;
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       int even;
+       struct kvm *kvm = vcpu->kvm;
+       const int flush_dcache_mask = 0;
+
+
+       if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+               kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+
+       gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+                       gfn, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+       even = !(gfn & 0x1);
+       vaddr = badvaddr & (PAGE_MASK << 1);
+
+       if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+               return -1;
+
+       if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
+               return -1;
+
+       if (even) {
+               pfn0 = kvm->arch.guest_pmap[gfn];
+               pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
+       } else {
+               pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
+               pfn1 = kvm->arch.guest_pmap[gfn];
+       }
+
+       entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+                       (0x1 << 1);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+                       (0x1 << 1);
+
+       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+                                      flush_dcache_mask);
+}
+
+int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+       struct kvm_vcpu *vcpu)
+{
+       pfn_t pfn0, pfn1;
+       unsigned long flags, old_entryhi = 0, vaddr = 0;
+       unsigned long entrylo0 = 0, entrylo1 = 0;
+
+
+       pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
+       pfn1 = 0;
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+                       (0x1 << 1);
+       entrylo1 = 0;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       vaddr = badvaddr & (PAGE_MASK << 1);
+       write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
+       mtc0_tlbw_hazard();
+       write_c0_entrylo0(entrylo0);
+       mtc0_tlbw_hazard();
+       write_c0_entrylo1(entrylo1);
+       mtc0_tlbw_hazard();
+       write_c0_index(kvm_mips_get_commpage_asid(vcpu));
+       mtc0_tlbw_hazard();
+       tlb_write_indexed();
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+#ifdef DEBUG
+       kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+            vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
+            read_c0_entrylo0(), read_c0_entrylo1());
+#endif
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+       local_irq_restore(flags);
+
+       return 0;
+}
+
+int
+kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+       struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
+{
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       struct kvm *kvm = vcpu->kvm;
+       pfn_t pfn0, pfn1;
+
+
+       if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+               pfn0 = 0;
+               pfn1 = 0;
+       } else {
+               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
+                       return -1;
+
+               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
+                       return -1;
+
+               pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
+               pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
+       }
+
+       if (hpa0)
+               *hpa0 = pfn0 << PAGE_SHIFT;
+
+       if (hpa1)
+               *hpa1 = pfn1 << PAGE_SHIFT;
+
+       /* Get attributes from the Guest TLB */
+       entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+                       kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+                       (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+                       (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+
+#ifdef DEBUG
+       kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+                 tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+
+       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+                                      tlb->tlb_mask);
+}
+
+int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
+{
+       int i;
+       int index = -1;
+       struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
+
+
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
+                       (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
+                       index = i;
+                       break;
+               }
+       }
+
+#ifdef DEBUG
+       kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
+                 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+#endif
+
+       return index;
+}
+
+int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
+{
+       unsigned long old_entryhi, flags;
+       volatile int idx;
+
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+
+       if (KVM_GUEST_KERNEL_MODE(vcpu))
+               write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
+       else {
+               write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+       }
+
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+#ifdef DEBUG
+       kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
+#endif
+
+       return idx;
+}
+
+int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
+{
+       int idx;
+       unsigned long flags, old_entryhi;
+
+       local_irq_save(flags);
+
+
+       old_entryhi = read_c0_entryhi();
+
+       write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       if (idx >= current_cpu_data.tlbsize)
+               BUG();
+
+       if (idx > 0) {
+               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+               mtc0_tlbw_hazard();
+
+               write_c0_entrylo0(0);
+               mtc0_tlbw_hazard();
+
+               write_c0_entrylo1(0);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               mtc0_tlbw_hazard();
+       }
+
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+#ifdef DEBUG
+       if (idx > 0) {
+               kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
+                         (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
+       }
+#endif
+
+       return 0;
+}
+
+/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
+int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
+{
+       unsigned long flags, old_entryhi;
+
+       if (index >= current_cpu_data.tlbsize)
+               BUG();
+
+       local_irq_save(flags);
+
+
+       old_entryhi = read_c0_entryhi();
+
+       write_c0_entryhi(UNIQUE_ENTRYHI(index));
+       mtc0_tlbw_hazard();
+
+       write_c0_index(index);
+       mtc0_tlbw_hazard();
+
+       write_c0_entrylo0(0);
+       mtc0_tlbw_hazard();
+
+       write_c0_entrylo1(0);
+       mtc0_tlbw_hazard();
+
+       tlb_write_indexed();
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+       return 0;
+}
+
+void kvm_mips_flush_host_tlb(int skip_kseg0)
+{
+       unsigned long flags;
+       unsigned long old_entryhi, entryhi;
+       unsigned long old_pagemask;
+       int entry = 0;
+       int maxentry = current_cpu_data.tlbsize;
+
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       /* Blast 'em all away. */
+       for (entry = 0; entry < maxentry; entry++) {
+
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+
+               if (skip_kseg0) {
+                       tlb_read();
+                       tlbw_use_hazard();
+
+                       entryhi = read_c0_entryhi();
+
+                       /* Don't blow away guest kernel entries */
+                       if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
+                               continue;
+                       }
+               }
+
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+               mtc0_tlbw_hazard();
+               write_c0_entrylo0(0);
+               mtc0_tlbw_hazard();
+               write_c0_entrylo1(0);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               mtc0_tlbw_hazard();
+       }
+
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+}
+
+void
+kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+                       struct kvm_vcpu *vcpu)
+{
+       unsigned long asid = asid_cache(cpu);
+
+       if (!((asid += ASID_INC) & ASID_MASK)) {
+               if (cpu_has_vtag_icache) {
+                       flush_icache_all();
+               }
+
+               kvm_local_flush_tlb_all();      /* start new asid cycle */
+
+               if (!asid)      /* fix version if needed */
+                       asid = ASID_FIRST_VERSION;
+       }
+
+       cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       unsigned long old_entryhi;
+       unsigned long old_pagemask;
+       int entry = 0;
+       int cpu = smp_processor_id();
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+               tlb_read();
+               tlbw_use_hazard();
+
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
+       }
+
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+
+       local_irq_restore(flags);
+
+}
+
+void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       unsigned long old_ctx;
+       int entry;
+       int cpu = smp_processor_id();
+
+       local_irq_save(flags);
+
+       old_ctx = read_c0_entryhi();
+
+       for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+               write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
+               mtc0_tlbw_hazard();
+               write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
+               write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               tlbw_use_hazard();
+       }
+
+       tlbw_use_hazard();
+       write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
+       local_irq_restore(flags);
+}
+
+
+void kvm_local_flush_tlb_all(void)
+{
+       unsigned long flags;
+       unsigned long old_ctx;
+       int entry = 0;
+
+       local_irq_save(flags);
+       /* Save old context and create impossible VPN2 value */
+       old_ctx = read_c0_entryhi();
+       write_c0_entrylo0(0);
+       write_c0_entrylo1(0);
+
+       /* Blast 'em all away. */
+       while (entry < current_cpu_data.tlbsize) {
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+               tlb_write_indexed();
+               entry++;
+       }
+       tlbw_use_hazard();
+       write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
+
+       local_irq_restore(flags);
+}
+
+void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
+{
+       int cpu, entry;
+
+       for_each_possible_cpu(cpu) {
+               for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
+                           UNIQUE_ENTRYHI(entry);
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
+                           read_c0_pagemask();
+#ifdef DEBUG
+                       kvm_debug
+                           ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
+                            cpu, entry,
+                            vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
+                            vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
+                            vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+#endif
+               }
+       }
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       unsigned long flags;
+       int newasid = 0;
+
+#ifdef DEBUG
+       kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+#endif
+
+       /* Alocate new kernel and user ASIDs if needed */
+
+       local_irq_save(flags);
+
+       if (((vcpu->arch.
+             guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
+               kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+               vcpu->arch.guest_kernel_asid[cpu] =
+                   vcpu->arch.guest_kernel_mm.context.asid[cpu];
+               kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+               vcpu->arch.guest_user_asid[cpu] =
+                   vcpu->arch.guest_user_mm.context.asid[cpu];
+               newasid++;
+
+               kvm_info("[%d]: cpu_context: %#lx\n", cpu,
+                        cpu_context(cpu, current->mm));
+               kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+                        cpu, vcpu->arch.guest_kernel_asid[cpu]);
+               kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+                        vcpu->arch.guest_user_asid[cpu]);
+       }
+
+       if (vcpu->arch.last_sched_cpu != cpu) {
+               kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
+                        vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+       }
+
+       /* Only reload shadow host TLB if new ASIDs haven't been allocated */
+#if 0
+       if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
+               kvm_mips_flush_host_tlb(0);
+               kvm_shadow_tlb_load(vcpu);
+       }
+#endif
+
+       if (!newasid) {
+               /* If we preempted while the guest was executing, then reload the pre-empted ASID */
+               if (current->flags & PF_VCPU) {
+                       write_c0_entryhi(vcpu->arch.
+                                        preempt_entryhi & ASID_MASK);
+                       ehb();
+               }
+       } else {
+               /* New ASIDs were allocated for the VM */
+
+               /* Were we in guest context? If so then the pre-empted ASID is no longer
+                * valid, we need to set it to what it should be based on the mode of
+                * the Guest (Kernel/User)
+                */
+               if (current->flags & PF_VCPU) {
+                       if (KVM_GUEST_KERNEL_MODE(vcpu))
+                               write_c0_entryhi(vcpu->arch.
+                                                guest_kernel_asid[cpu] &
+                                                ASID_MASK);
+                       else
+                               write_c0_entryhi(vcpu->arch.
+                                                guest_user_asid[cpu] &
+                                                ASID_MASK);
+                       ehb();
+               }
+       }
+
+       local_irq_restore(flags);
+
+}
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       uint32_t cpu;
+
+       local_irq_save(flags);
+
+       cpu = smp_processor_id();
+
+
+       vcpu->arch.preempt_entryhi = read_c0_entryhi();
+       vcpu->arch.last_sched_cpu = cpu;
+
+#if 0
+       if ((atomic_read(&kvm_mips_instance) > 1)) {
+               kvm_shadow_tlb_put(vcpu);
+       }
+#endif
+
+       if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+            ASID_VERSION_MASK)) {
+               kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
+                         cpu_context(cpu, current->mm));
+               drop_mmu_context(current->mm, cpu);
+       }
+       write_c0_entryhi(cpu_asid(cpu, current->mm));
+       ehb();
+
+       local_irq_restore(flags);
+}
+
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long paddr, flags;
+       uint32_t inst;
+       int index;
+
+       if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
+           KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
+               if (index >= 0) {
+                       inst = *(opc);
+               } else {
+                       index =
+                           kvm_mips_guest_tlb_lookup(vcpu,
+                                                     ((unsigned long) opc & VPN2_MASK)
+                                                     |
+                                                     (kvm_read_c0_guest_entryhi
+                                                      (cop0) & ASID_MASK));
+                       if (index < 0) {
+                               kvm_err
+                                   ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+                                    __func__, opc, vcpu, read_c0_entryhi());
+                               kvm_mips_dump_host_tlbs();
+                               local_irq_restore(flags);
+                               return KVM_INVALID_INST;
+                       }
+                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+                                                            &vcpu->arch.
+                                                            guest_tlb[index],
+                                                            NULL, NULL);
+                       inst = *(opc);
+               }
+               local_irq_restore(flags);
+       } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               paddr =
+                   kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+                                                        (unsigned long) opc);
+               inst = *(uint32_t *) CKSEG0ADDR(paddr);
+       } else {
+               kvm_err("%s: illegal address: %p\n", __func__, opc);
+               return KVM_INVALID_INST;
+       }
+
+       return inst;
+}
+
+EXPORT_SYMBOL(kvm_local_flush_tlb_all);
+EXPORT_SYMBOL(kvm_shadow_tlb_put);
+EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
+EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
+EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
+EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
+EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
+EXPORT_SYMBOL(kvm_shadow_tlb_load);
+EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
+EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
+EXPORT_SYMBOL(kvm_get_inst);
+EXPORT_SYMBOL(kvm_arch_vcpu_load);
+EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
new file mode 100644 (file)
index 0000000..30d7253
--- /dev/null
@@ -0,0 +1,432 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+
+static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
+{
+       gpa_t gpa;
+       uint32_t kseg = KSEGX(gva);
+
+       if ((kseg == CKSEG0) || (kseg == CKSEG1))
+               gpa = CPHYSADDR(gva);
+       else {
+               printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
+               kvm_mips_dump_host_tlbs();
+               gpa = KVM_INVALID_ADDR;
+       }
+
+#ifdef DEBUG
+       kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
+#endif
+
+       return gpa;
+}
+
+
+static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+               er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+       } else
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+
+       switch (er) {
+       case EMULATE_DONE:
+               ret = RESUME_GUEST;
+               break;
+
+       case EMULATE_FAIL:
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+
+       case EMULATE_WAIT:
+               run->exit_reason = KVM_EXIT_INTR;
+               ret = RESUME_HOST;
+               break;
+
+       default:
+               BUG();
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+           || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+               kvm_debug
+                   ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+#endif
+               er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
+
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
+                * using HIGHMEM. Need to address this in a HIGHMEM kernel
+                */
+               printk
+                   ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       } else {
+               printk
+                   ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+           && KVM_GUEST_KERNEL_MODE(vcpu)) {
+               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+               kvm_debug
+                   ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+#endif
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               /* All KSEG0 faults are handled by KVM, as the guest kernel does not
+                * expect to ever get them
+                */
+               if (kvm_mips_handle_kseg0_tlb_fault
+                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               kvm_err
+                   ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+           && KVM_GUEST_KERNEL_MODE(vcpu)) {
+               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+               kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
+                         vcpu->arch.pc, badvaddr);
+#endif
+
+               /* User Address (UA) fault, this could happen if
+                * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+                *     case we pass on the fault to the guest kernel and let it handle it.
+                * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+                *     case we inject the TLB from the Guest TLB into the shadow host TLB
+                */
+
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               if (kvm_mips_handle_kseg0_tlb_fault
+                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               printk
+                   ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KVM_GUEST_KERNEL_MODE(vcpu)
+           && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+#ifdef DEBUG
+               kvm_debug("Emulate Store to MMIO space\n");
+#endif
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       printk("Emulate Store to MMIO space failed\n");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               printk
+                   ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
+#ifdef DEBUG
+               kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
+#endif
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       printk("Emulate Load from MMIO space failed\n");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               printk
+                   ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               er = EMULATE_FAIL;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_handle_ri(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_vm_init(struct kvm *kvm)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t config1;
+       int vcpu_id = vcpu->vcpu_id;
+
+       /* Arch specific stuff, set up config registers properly so that the
+        * guest will come up as expected, for now we simulate a
+        * MIPS 24kc
+        */
+       kvm_write_c0_guest_prid(cop0, 0x00019300);
+       kvm_write_c0_guest_config(cop0,
+                                 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+                                 (MMU_TYPE_R4000 << CP0C0_MT));
+
+       /* Read the cache characteristics from the host Config1 Register */
+       config1 = (read_c0_config1() & ~0x7f);
+
+       /* Set up MMU size */
+       config1 &= ~(0x3f << 25);
+       config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
+
+       /* We unset some bits that we aren't emulating */
+       config1 &=
+           ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
+             (1 << CP0C1_WR) | (1 << CP0C1_CA));
+       kvm_write_c0_guest_config1(cop0, config1);
+
+       kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
+       /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
+       kvm_write_c0_guest_config3(cop0,
+                                  MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
+                                                                      CP0C3_ULRI));
+
+       /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
+       kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
+
+       /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
+       kvm_write_c0_guest_intctl(cop0, 0xFC000000);
+
+       /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
+       kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+
+       return 0;
+}
+
+static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+       /* exit handlers */
+       .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
+       .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
+       .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
+       .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
+       .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
+       .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
+       .handle_syscall = kvm_trap_emul_handle_syscall,
+       .handle_res_inst = kvm_trap_emul_handle_res_inst,
+       .handle_break = kvm_trap_emul_handle_break,
+
+       .vm_init = kvm_trap_emul_vm_init,
+       .vcpu_init = kvm_trap_emul_vcpu_init,
+       .vcpu_setup = kvm_trap_emul_vcpu_setup,
+       .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
+       .queue_timer_int = kvm_mips_queue_timer_int_cb,
+       .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
+       .queue_io_int = kvm_mips_queue_io_int_cb,
+       .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
+       .irq_deliver = kvm_mips_irq_deliver_cb,
+       .irq_clear = kvm_mips_irq_clear_cb,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+       *install_callbacks = &kvm_trap_emul_callbacks;
+       return 0;
+}
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644 (file)
index 0000000..bc9e0f4
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
+/*
+ * Tracepoints for VM eists
+ */
+extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
+
+TRACE_EVENT(kvm_exit,
+           TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+           TP_ARGS(vcpu, reason),
+           TP_STRUCT__entry(
+                       __field(struct kvm_vcpu *, vcpu)
+                       __field(unsigned int, reason)
+           ),
+
+           TP_fast_assign(
+                       __entry->vcpu = vcpu;
+                       __entry->reason = reason;
+           ),
+
+           TP_printk("[%s]PC: 0x%08lx",
+                     kvm_mips_exit_types_str[__entry->reason],
+                     __entry->vcpu->arch.pc)
+);
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 9861c8669fab4047da01ad6af587ef948817d169..850821df924c32d069acbce03607b832c3a0c48b 100644 (file)
@@ -144,10 +144,6 @@ static int gptu_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get resource\n");
-               return -ENOMEM;
-       }
 
        /* remap gptu register range */
        gptu_membase = devm_ioremap_resource(&pdev->dev, res);
@@ -169,6 +165,8 @@ static int gptu_probe(struct platform_device *pdev)
        if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) {
                dev_err(&pdev->dev, "Failed to find magic\n");
                gptu_hwexit();
+               clk_disable(clk);
+               clk_put(clk);
                return -ENAVAIL;
        }
 
index a64daee740ee0414ee6a8db474b32893ae51ab1d..3b2a1e78a54369fc43bf052288f3fc7acaac5ae4 100644 (file)
@@ -19,7 +19,7 @@
  */
 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(__mips_set_bit);
  */
 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(__mips_clear_bit);
  */
 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(__mips_change_bit);
 int __mips_test_and_set_bit(unsigned long nr,
                            volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -112,7 +112,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit);
 int __mips_test_and_set_bit_lock(unsigned long nr,
                                 volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
  */
 int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -162,7 +162,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
  */
 int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
index 053d3b0b0317b18d848b2b2419025ebb1f23db31..0580194e7402aa6508c579e2ff925833fd421948 100644 (file)
@@ -5,7 +5,8 @@
  *
  * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2007 by Maciej W. Rozycki
+ * Copyright (C) 2011, 2012 MIPS Technologies, Inc.
  */
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
 #define LONG_S_R sdr
 #endif
 
+#ifdef CONFIG_CPU_MICROMIPS
+#define STORSIZE (LONGSIZE * 2)
+#define STORMASK (STORSIZE - 1)
+#define FILL64RG t8
+#define FILLPTRG t7
+#undef  LONG_S
+#define LONG_S LONG_SP
+#else
+#define STORSIZE LONGSIZE
+#define STORMASK LONGMASK
+#define FILL64RG a1
+#define FILLPTRG t0
+#endif
+
 #define EX(insn,reg,addr,handler)                      \
 9:     insn    reg, addr;                              \
        .section __ex_table,"a";                        \
        .previous
 
        .macro  f_fill64 dst, offset, val, fixup
-       EX(LONG_S, \val, (\offset +  0 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  1 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  2 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  3 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  4 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  5 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  6 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  7 * LONGSIZE)(\dst), \fixup)
-#if LONGSIZE == 4
-       EX(LONG_S, \val, (\offset +  8 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  9 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 11 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 12 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  0 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  1 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  2 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  3 * STORSIZE)(\dst), \fixup)
+#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS))
+       EX(LONG_S, \val, (\offset +  4 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  5 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  6 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  7 * STORSIZE)(\dst), \fixup)
+#endif
+#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4))
+       EX(LONG_S, \val, (\offset +  8 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  9 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup)
 #endif
        .endm
 
@@ -71,16 +88,20 @@ LEAF(memset)
 1:
 
 FEXPORT(__bzero)
-       sltiu           t0, a2, LONGSIZE        /* very small region? */
+       sltiu           t0, a2, STORSIZE        /* very small region? */
        bnez            t0, .Lsmall_memset
-        andi           t0, a0, LONGMASK        /* aligned? */
+        andi           t0, a0, STORMASK        /* aligned? */
 
+#ifdef CONFIG_CPU_MICROMIPS
+       move            t8, a1                  /* used by 'swp' instruction */
+       move            t9, a1
+#endif
 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
        beqz            t0, 1f
-        PTR_SUBU       t0, LONGSIZE            /* alignment in bytes */
+        PTR_SUBU       t0, STORSIZE            /* alignment in bytes */
 #else
        .set            noat
-       li              AT, LONGSIZE
+       li              AT, STORSIZE
        beqz            t0, 1f
         PTR_SUBU       t0, AT                  /* alignment in bytes */
        .set            at
@@ -99,24 +120,27 @@ FEXPORT(__bzero)
 1:     ori             t1, a2, 0x3f            /* # of full blocks */
        xori            t1, 0x3f
        beqz            t1, .Lmemset_partial    /* no block to fill */
-        andi           t0, a2, 0x40-LONGSIZE
+        andi           t0, a2, 0x40-STORSIZE
 
        PTR_ADDU        t1, a0                  /* end address */
        .set            reorder
 1:     PTR_ADDIU       a0, 64
        R10KCBARRIER(0(ra))
-       f_fill64 a0, -64, a1, .Lfwd_fixup
+       f_fill64 a0, -64, FILL64RG, .Lfwd_fixup
        bne             t1, a0, 1b
        .set            noreorder
 
 .Lmemset_partial:
        R10KCBARRIER(0(ra))
        PTR_LA          t1, 2f                  /* where to start */
+#ifdef CONFIG_CPU_MICROMIPS
+       LONG_SRL        t7, t0, 1
+#endif
 #if LONGSIZE == 4
-       PTR_SUBU        t1, t0
+       PTR_SUBU        t1, FILLPTRG
 #else
        .set            noat
-       LONG_SRL                AT, t0, 1
+       LONG_SRL        AT, FILLPTRG, 1
        PTR_SUBU        t1, AT
        .set            at
 #endif
@@ -126,9 +150,9 @@ FEXPORT(__bzero)
        .set            push
        .set            noreorder
        .set            nomacro
-       f_fill64 a0, -64, a1, .Lpartial_fixup   /* ... but first do longs ... */
+       f_fill64 a0, -64, FILL64RG, .Lpartial_fixup     /* ... but first do longs ... */
 2:     .set            pop
-       andi            a2, LONGMASK            /* At most one long to go */
+       andi            a2, STORMASK            /* At most one long to go */
 
        beqz            a2, 1f
         PTR_ADDU       a0, a2                  /* What's left */
@@ -169,7 +193,7 @@ FEXPORT(__bzero)
 
 .Lpartial_fixup:
        PTR_L           t0, TI_TASK($28)
-       andi            a2, LONGMASK
+       andi            a2, STORMASK
        LONG_L          t0, THREAD_BUADDR(t0)
        LONG_ADDU       a2, t1
        jr              ra
@@ -177,4 +201,4 @@ FEXPORT(__bzero)
 
 .Llast_fixup:
        jr              ra
-        andi           v1, a2, LONGMASK
+        andi           v1, a2, STORMASK
index cd160be3ce4dc97b934b566b4c69197ceb07b342..6807f7172eaf46f2841301d64882484f11be861f 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/compiler.h>
 #include <linux/preempt.h>
 #include <linux/export.h>
+#include <linux/stringify.h>
 
 #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
 
  *
  * Workaround: mask EXL bit of the result or place a nop before mfc0.
  */
-__asm__(
-       "       .macro  arch_local_irq_disable\n"
+notrace void arch_local_irq_disable(void)
+{
+       preempt_disable();
+
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    noat                                            \n"
 #ifdef CONFIG_MIPS_MT_SMTC
@@ -52,108 +56,98 @@ __asm__(
        "       .set    noreorder                                       \n"
        "       mtc0    $1,$12                                          \n"
 #endif
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : /* no outputs */
+       : /* no inputs */
+       : "memory");
 
-notrace void arch_local_irq_disable(void)
-{
-       preempt_disable();
-       __asm__ __volatile__(
-               "arch_local_irq_disable"
-               : /* no outputs */
-               : /* no inputs */
-               : "memory");
        preempt_enable();
 }
 EXPORT_SYMBOL(arch_local_irq_disable);
 
 
-__asm__(
-       "       .macro  arch_local_irq_save result                      \n"
+notrace unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
+
+       preempt_disable();
+
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
 #ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    \\result, $2, 1                                 \n"
-       "       ori     $1, \\result, 0x400                             \n"
+       "       mfc0    %[flags], $2, 1                         \n"
+       "       ori     $1, %[flags], 0x400                             \n"
        "       .set    noreorder                                       \n"
        "       mtc0    $1, $2, 1                                       \n"
-       "       andi    \\result, \\result, 0x400                       \n"
+       "       andi    %[flags], %[flags], 0x400                       \n"
 #elif defined(CONFIG_CPU_MIPSR2)
        /* see irqflags.h for inline function */
 #else
-       "       mfc0    \\result, $12                                   \n"
-       "       ori     $1, \\result, 0x1f                              \n"
+       "       mfc0    %[flags], $12                                   \n"
+       "       ori     $1, %[flags], 0x1f                              \n"
        "       xori    $1, 0x1f                                        \n"
        "       .set    noreorder                                       \n"
        "       mtc0    $1, $12                                         \n"
 #endif
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : [flags] "=r" (flags)
+       : /* no inputs */
+       : "memory");
 
-notrace unsigned long arch_local_irq_save(void)
-{
-       unsigned long flags;
-       preempt_disable();
-       asm volatile("arch_local_irq_save\t%0"
-                    : "=r" (flags)
-                    : /* no inputs */
-                    : "memory");
        preempt_enable();
+
        return flags;
 }
 EXPORT_SYMBOL(arch_local_irq_save);
 
+notrace void arch_local_irq_restore(unsigned long flags)
+{
+       unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC kernel needs to do a software replay of queued
+        * IPIs, at the cost of branch and call overhead on each
+        * local_irq_restore()
+        */
+       if (unlikely(!(flags & 0x0400)))
+               smtc_ipi_replay();
+#endif
+       preempt_disable();
 
-__asm__(
-       "       .macro  arch_local_irq_restore flags                    \n"
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    noreorder                                       \n"
        "       .set    noat                                            \n"
 #ifdef CONFIG_MIPS_MT_SMTC
-       "mfc0   $1, $2, 1                                               \n"
-       "andi   \\flags, 0x400                                          \n"
-       "ori    $1, 0x400                                               \n"
-       "xori   $1, 0x400                                               \n"
-       "or     \\flags, $1                                             \n"
-       "mtc0   \\flags, $2, 1                                          \n"
+       "       mfc0    $1, $2, 1                                       \n"
+       "       andi    %[flags], 0x400                                 \n"
+       "       ori     $1, 0x400                                       \n"
+       "       xori    $1, 0x400                                       \n"
+       "       or      %[flags], $1                                    \n"
+       "       mtc0    %[flags], $2, 1                                 \n"
 #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
        /* see irqflags.h for inline function */
 #elif defined(CONFIG_CPU_MIPSR2)
        /* see irqflags.h for inline function */
 #else
        "       mfc0    $1, $12                                         \n"
-       "       andi    \\flags, 1                                      \n"
+       "       andi    %[flags], 1                                     \n"
        "       ori     $1, 0x1f                                        \n"
        "       xori    $1, 0x1f                                        \n"
-       "       or      \\flags, $1                                     \n"
-       "       mtc0    \\flags, $12                                    \n"
+       "       or      %[flags], $1                                    \n"
+       "       mtc0    %[flags], $12                                   \n"
 #endif
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : [flags] "=r" (__tmp1)
+       : "0" (flags)
+       : "memory");
 
-notrace void arch_local_irq_restore(unsigned long flags)
-{
-       unsigned long __tmp1;
-
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC kernel needs to do a software replay of queued
-        * IPIs, at the cost of branch and call overhead on each
-        * local_irq_restore()
-        */
-       if (unlikely(!(flags & 0x0400)))
-               smtc_ipi_replay();
-#endif
-       preempt_disable();
-       __asm__ __volatile__(
-               "arch_local_irq_restore\t%0"
-               : "=r" (__tmp1)
-               : "0" (flags)
-               : "memory");
        preempt_enable();
 }
 EXPORT_SYMBOL(arch_local_irq_restore);
@@ -164,11 +158,36 @@ notrace void __arch_local_irq_restore(unsigned long flags)
        unsigned long __tmp1;
 
        preempt_disable();
+
        __asm__ __volatile__(
-               "arch_local_irq_restore\t%0"
-               : "=r" (__tmp1)
-               : "0" (flags)
-               : "memory");
+       "       .set    push                                            \n"
+       "       .set    noreorder                                       \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    $1, $2, 1                                       \n"
+       "       andi    %[flags], 0x400                                 \n"
+       "       ori     $1, 0x400                                       \n"
+       "       xori    $1, 0x400                                       \n"
+       "       or      %[flags], $1                                    \n"
+       "       mtc0    %[flags], $2, 1                                 \n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+       /* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+       /* see irqflags.h for inline function */
+#else
+       "       mfc0    $1, $12                                         \n"
+       "       andi    %[flags], 1                                     \n"
+       "       ori     $1, 0x1f                                        \n"
+       "       xori    $1, 0x1f                                        \n"
+       "       or      %[flags], $1                                    \n"
+       "       mtc0    %[flags], $12                                   \n"
+#endif
+       "       " __stringify(__irq_disable_hazard) "                   \n"
+       "       .set    pop                                             \n"
+       : [flags] "=r" (__tmp1)
+       : "0" (flags)
+       : "memory");
+
        preempt_enable();
 }
 EXPORT_SYMBOL(__arch_local_irq_restore);
index fdbb970f670d84ed2bafcbd42cc4e9eb46b96156..e362dcdc69d1617486ee5627f055ffc3075b5548 100644 (file)
@@ -3,8 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle
- * Copyright (c) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2011 MIPS Technologies, Inc.
  */
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
@@ -28,9 +29,9 @@ LEAF(__strlen_user_asm)
 
 FEXPORT(__strlen_user_nocheck_asm)
        move            v0, a0
-1:     EX(lb, t0, (v0), .Lfault)
+1:     EX(lbu, v1, (v0), .Lfault)
        PTR_ADDIU       v0, 1
-       bnez            t0, 1b
+       bnez            v1, 1b
        PTR_SUBU        v0, a0
        jr              ra
        END(__strlen_user_asm)
index bad5394875031ecb08e668a53b5bbca70649de95..92870b6b53eaeee044a0424ef1cdd16870ec445a 100644 (file)
@@ -3,7 +3,8 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 2011 MIPS Technologies, Inc.
  */
 #include <linux/errno.h>
 #include <asm/asm.h>
@@ -33,26 +34,27 @@ LEAF(__strncpy_from_user_asm)
        bnez            v0, .Lfault
 
 FEXPORT(__strncpy_from_user_nocheck_asm)
-       move            v0, zero
-       move            v1, a1
        .set            noreorder
-1:     EX(lbu, t0, (v1), .Lfault)
+       move            t0, zero
+       move            v1, a1
+1:     EX(lbu, v0, (v1), .Lfault)
        PTR_ADDIU       v1, 1
        R10KCBARRIER(0(ra))
-       beqz            t0, 2f
-        sb             t0, (a0)
-       PTR_ADDIU       v0, 1
-       .set            reorder
-       PTR_ADDIU       a0, 1
-       bne             v0, a2, 1b
-2:     PTR_ADDU        t0, a1, v0
-       xor             t0, a1
-       bltz            t0, .Lfault
+       beqz            v0, 2f
+        sb             v0, (a0)
+       PTR_ADDIU       t0, 1
+       bne             t0, a2, 1b
+        PTR_ADDIU      a0, 1
+2:     PTR_ADDU        v0, a1, t0
+       xor             v0, a1
+       bltz            v0, .Lfault
+        nop
        jr              ra                      # return n
+        move           v0, t0
        END(__strncpy_from_user_asm)
 
-.Lfault:       li              v0, -EFAULT
-       jr              ra
+.Lfault: jr            ra
+         li            v0, -EFAULT
 
        .section        __ex_table,"a"
        PTR             1b, .Lfault
index beea03c8c0ce823130ab25b421f6874b5a2a13b8..fcacea5e61f1e685891e8e4af7ec8f701c596fd2 100644 (file)
@@ -35,7 +35,7 @@ FEXPORT(__strnlen_user_nocheck_asm)
        PTR_ADDU        a1, a0                  # stop pointer
 1:     beq             v0, a1, 1f              # limit reached?
        EX(lb, t0, (v0), .Lfault)
-       PTR_ADD       v0, 1
+       PTR_ADDIU       v0, 1
        bnez            t0, 1b
 1:     PTR_SUBU        v0, a0
        jr              ra
index 35c8c64684941603f29c072478fd42b7401a7be8..65bfbb5d06f442efbd6601600097f94124cc171c 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/pm.h>
 
+#include <asm/idle.h>
 #include <asm/reboot.h>
 
 #include <loongson.h>
index d4f610f9604a27296a685ea1455bbd18717b48be..547f34b69e4c06a471032ab588b8391e62f0cc39 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/io.h>
 #include <linux/pm.h>
+#include <asm/idle.h>
 #include <asm/reboot.h>
 
 #include <loongson1.h>
index afb5a0bcf7a5a524c4fe75450be559e8402c68d4..f03771900813cb69c9ddd5f579841350a8c48786 100644 (file)
@@ -45,6 +45,7 @@
 #include <asm/signal.h>
 #include <asm/mipsregs.h>
 #include <asm/fpu_emulator.h>
+#include <asm/fpu.h>
 #include <asm/uaccess.h>
 #include <asm/branch.h>
 
@@ -81,6 +82,11 @@ DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
 /* Determine rounding mode from the RM bits of the FCSR */
 #define modeindex(v) ((v) & FPU_CSR_RM)
 
+/* microMIPS bitfields */
+#define MM_POOL32A_MINOR_MASK  0x3f
+#define MM_POOL32A_MINOR_SHIFT 0x6
+#define MM_MIPS32_COND_FC      0x30
+
 /* Convert Mips rounding mode (0..3) to IEEE library modes. */
 static const unsigned char ieee_rm[4] = {
        [FPU_CSR_RN] = IEEE754_RN,
@@ -110,6 +116,556 @@ static const unsigned int fpucondbit[8] = {
 };
 #endif
 
+/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
+static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
+
+/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */
+static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0};
+static const int sdps_format[] = {16, 17, 22, 0, 0, 0, 0, 0};
+static const int dwl_format[] = {17, 20, 21, 0, 0, 0, 0, 0};
+static const int swl_format[] = {16, 20, 21, 0, 0, 0, 0, 0};
+
+/*
+ * This functions translates a 32-bit microMIPS instruction
+ * into a 32-bit MIPS32 instruction. Returns 0 on success
+ * and SIGILL otherwise.
+ */
+static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
+{
+       union mips_instruction insn = *insn_ptr;
+       union mips_instruction mips32_insn = insn;
+       int func, fmt, op;
+
+       switch (insn.mm_i_format.opcode) {
+       case mm_ldc132_op:
+               mips32_insn.mm_i_format.opcode = ldc1_op;
+               mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+               mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+               break;
+       case mm_lwc132_op:
+               mips32_insn.mm_i_format.opcode = lwc1_op;
+               mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+               mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+               break;
+       case mm_sdc132_op:
+               mips32_insn.mm_i_format.opcode = sdc1_op;
+               mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+               mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+               break;
+       case mm_swc132_op:
+               mips32_insn.mm_i_format.opcode = swc1_op;
+               mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+               mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+               break;
+       case mm_pool32i_op:
+               /* NOTE: offset is << by 1 if in microMIPS mode. */
+               if ((insn.mm_i_format.rt == mm_bc1f_op) ||
+                   (insn.mm_i_format.rt == mm_bc1t_op)) {
+                       mips32_insn.fb_format.opcode = cop1_op;
+                       mips32_insn.fb_format.bc = bc_op;
+                       mips32_insn.fb_format.flag =
+                               (insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
+               } else
+                       return SIGILL;
+               break;
+       case mm_pool32f_op:
+               switch (insn.mm_fp0_format.func) {
+               case mm_32f_01_op:
+               case mm_32f_11_op:
+               case mm_32f_02_op:
+               case mm_32f_12_op:
+               case mm_32f_41_op:
+               case mm_32f_51_op:
+               case mm_32f_42_op:
+               case mm_32f_52_op:
+                       op = insn.mm_fp0_format.func;
+                       if (op == mm_32f_01_op)
+                               func = madd_s_op;
+                       else if (op == mm_32f_11_op)
+                               func = madd_d_op;
+                       else if (op == mm_32f_02_op)
+                               func = nmadd_s_op;
+                       else if (op == mm_32f_12_op)
+                               func = nmadd_d_op;
+                       else if (op == mm_32f_41_op)
+                               func = msub_s_op;
+                       else if (op == mm_32f_51_op)
+                               func = msub_d_op;
+                       else if (op == mm_32f_42_op)
+                               func = nmsub_s_op;
+                       else
+                               func = nmsub_d_op;
+                       mips32_insn.fp6_format.opcode = cop1x_op;
+                       mips32_insn.fp6_format.fr = insn.mm_fp6_format.fr;
+                       mips32_insn.fp6_format.ft = insn.mm_fp6_format.ft;
+                       mips32_insn.fp6_format.fs = insn.mm_fp6_format.fs;
+                       mips32_insn.fp6_format.fd = insn.mm_fp6_format.fd;
+                       mips32_insn.fp6_format.func = func;
+                       break;
+               case mm_32f_10_op:
+                       func = -1;      /* Invalid */
+                       op = insn.mm_fp5_format.op & 0x7;
+                       if (op == mm_ldxc1_op)
+                               func = ldxc1_op;
+                       else if (op == mm_sdxc1_op)
+                               func = sdxc1_op;
+                       else if (op == mm_lwxc1_op)
+                               func = lwxc1_op;
+                       else if (op == mm_swxc1_op)
+                               func = swxc1_op;
+
+                       if (func != -1) {
+                               mips32_insn.r_format.opcode = cop1x_op;
+                               mips32_insn.r_format.rs =
+                                       insn.mm_fp5_format.base;
+                               mips32_insn.r_format.rt =
+                                       insn.mm_fp5_format.index;
+                               mips32_insn.r_format.rd = 0;
+                               mips32_insn.r_format.re = insn.mm_fp5_format.fd;
+                               mips32_insn.r_format.func = func;
+                       } else
+                               return SIGILL;
+                       break;
+               case mm_32f_40_op:
+                       op = -1;        /* Invalid */
+                       if (insn.mm_fp2_format.op == mm_fmovt_op)
+                               op = 1;
+                       else if (insn.mm_fp2_format.op == mm_fmovf_op)
+                               op = 0;
+                       if (op != -1) {
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp2_format.fmt];
+                               mips32_insn.fp0_format.ft =
+                                       (insn.mm_fp2_format.cc<<2) + op;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp2_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp2_format.fd;
+                               mips32_insn.fp0_format.func = fmovc_op;
+                       } else
+                               return SIGILL;
+                       break;
+               case mm_32f_60_op:
+                       func = -1;      /* Invalid */
+                       if (insn.mm_fp0_format.op == mm_fadd_op)
+                               func = fadd_op;
+                       else if (insn.mm_fp0_format.op == mm_fsub_op)
+                               func = fsub_op;
+                       else if (insn.mm_fp0_format.op == mm_fmul_op)
+                               func = fmul_op;
+                       else if (insn.mm_fp0_format.op == mm_fdiv_op)
+                               func = fdiv_op;
+                       if (func != -1) {
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp0_format.fmt];
+                               mips32_insn.fp0_format.ft =
+                                       insn.mm_fp0_format.ft;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp0_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp0_format.fd;
+                               mips32_insn.fp0_format.func = func;
+                       } else
+                               return SIGILL;
+                       break;
+               case mm_32f_70_op:
+                       func = -1;      /* Invalid */
+                       if (insn.mm_fp0_format.op == mm_fmovn_op)
+                               func = fmovn_op;
+                       else if (insn.mm_fp0_format.op == mm_fmovz_op)
+                               func = fmovz_op;
+                       if (func != -1) {
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp0_format.fmt];
+                               mips32_insn.fp0_format.ft =
+                                       insn.mm_fp0_format.ft;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp0_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp0_format.fd;
+                               mips32_insn.fp0_format.func = func;
+                       } else
+                               return SIGILL;
+                       break;
+               case mm_32f_73_op:    /* POOL32FXF */
+                       switch (insn.mm_fp1_format.op) {
+                       case mm_movf0_op:
+                       case mm_movf1_op:
+                       case mm_movt0_op:
+                       case mm_movt1_op:
+                               if ((insn.mm_fp1_format.op & 0x7f) ==
+                                   mm_movf0_op)
+                                       op = 0;
+                               else
+                                       op = 1;
+                               mips32_insn.r_format.opcode = spec_op;
+                               mips32_insn.r_format.rs = insn.mm_fp4_format.fs;
+                               mips32_insn.r_format.rt =
+                                       (insn.mm_fp4_format.cc << 2) + op;
+                               mips32_insn.r_format.rd = insn.mm_fp4_format.rt;
+                               mips32_insn.r_format.re = 0;
+                               mips32_insn.r_format.func = movc_op;
+                               break;
+                       case mm_fcvtd0_op:
+                       case mm_fcvtd1_op:
+                       case mm_fcvts0_op:
+                       case mm_fcvts1_op:
+                               if ((insn.mm_fp1_format.op & 0x7f) ==
+                                   mm_fcvtd0_op) {
+                                       func = fcvtd_op;
+                                       fmt = swl_format[insn.mm_fp3_format.fmt];
+                               } else {
+                                       func = fcvts_op;
+                                       fmt = dwl_format[insn.mm_fp3_format.fmt];
+                               }
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt = fmt;
+                               mips32_insn.fp0_format.ft = 0;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp3_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp3_format.rt;
+                               mips32_insn.fp0_format.func = func;
+                               break;
+                       case mm_fmov0_op:
+                       case mm_fmov1_op:
+                       case mm_fabs0_op:
+                       case mm_fabs1_op:
+                       case mm_fneg0_op:
+                       case mm_fneg1_op:
+                               if ((insn.mm_fp1_format.op & 0x7f) ==
+                                   mm_fmov0_op)
+                                       func = fmov_op;
+                               else if ((insn.mm_fp1_format.op & 0x7f) ==
+                                        mm_fabs0_op)
+                                       func = fabs_op;
+                               else
+                                       func = fneg_op;
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp3_format.fmt];
+                               mips32_insn.fp0_format.ft = 0;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp3_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp3_format.rt;
+                               mips32_insn.fp0_format.func = func;
+                               break;
+                       case mm_ffloorl_op:
+                       case mm_ffloorw_op:
+                       case mm_fceill_op:
+                       case mm_fceilw_op:
+                       case mm_ftruncl_op:
+                       case mm_ftruncw_op:
+                       case mm_froundl_op:
+                       case mm_froundw_op:
+                       case mm_fcvtl_op:
+                       case mm_fcvtw_op:
+                               if (insn.mm_fp1_format.op == mm_ffloorl_op)
+                                       func = ffloorl_op;
+                               else if (insn.mm_fp1_format.op == mm_ffloorw_op)
+                                       func = ffloor_op;
+                               else if (insn.mm_fp1_format.op == mm_fceill_op)
+                                       func = fceill_op;
+                               else if (insn.mm_fp1_format.op == mm_fceilw_op)
+                                       func = fceil_op;
+                               else if (insn.mm_fp1_format.op == mm_ftruncl_op)
+                                       func = ftruncl_op;
+                               else if (insn.mm_fp1_format.op == mm_ftruncw_op)
+                                       func = ftrunc_op;
+                               else if (insn.mm_fp1_format.op == mm_froundl_op)
+                                       func = froundl_op;
+                               else if (insn.mm_fp1_format.op == mm_froundw_op)
+                                       func = fround_op;
+                               else if (insn.mm_fp1_format.op == mm_fcvtl_op)
+                                       func = fcvtl_op;
+                               else
+                                       func = fcvtw_op;
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sd_format[insn.mm_fp1_format.fmt];
+                               mips32_insn.fp0_format.ft = 0;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp1_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp1_format.rt;
+                               mips32_insn.fp0_format.func = func;
+                               break;
+                       case mm_frsqrt_op:
+                       case mm_fsqrt_op:
+                       case mm_frecip_op:
+                               if (insn.mm_fp1_format.op == mm_frsqrt_op)
+                                       func = frsqrt_op;
+                               else if (insn.mm_fp1_format.op == mm_fsqrt_op)
+                                       func = fsqrt_op;
+                               else
+                                       func = frecip_op;
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp1_format.fmt];
+                               mips32_insn.fp0_format.ft = 0;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp1_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp1_format.rt;
+                               mips32_insn.fp0_format.func = func;
+                               break;
+                       case mm_mfc1_op:
+                       case mm_mtc1_op:
+                       case mm_cfc1_op:
+                       case mm_ctc1_op:
+                               if (insn.mm_fp1_format.op == mm_mfc1_op)
+                                       op = mfc_op;
+                               else if (insn.mm_fp1_format.op == mm_mtc1_op)
+                                       op = mtc_op;
+                               else if (insn.mm_fp1_format.op == mm_cfc1_op)
+                                       op = cfc_op;
+                               else
+                                       op = ctc_op;
+                               mips32_insn.fp1_format.opcode = cop1_op;
+                               mips32_insn.fp1_format.op = op;
+                               mips32_insn.fp1_format.rt =
+                                       insn.mm_fp1_format.rt;
+                               mips32_insn.fp1_format.fs =
+                                       insn.mm_fp1_format.fs;
+                               mips32_insn.fp1_format.fd = 0;
+                               mips32_insn.fp1_format.func = 0;
+                               break;
+                       default:
+                               return SIGILL;
+                               break;
+                       }
+                       break;
+               case mm_32f_74_op:      /* c.cond.fmt */
+                       mips32_insn.fp0_format.opcode = cop1_op;
+                       mips32_insn.fp0_format.fmt =
+                               sdps_format[insn.mm_fp4_format.fmt];
+                       mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt;
+                       mips32_insn.fp0_format.fs = insn.mm_fp4_format.fs;
+                       mips32_insn.fp0_format.fd = insn.mm_fp4_format.cc << 2;
+                       mips32_insn.fp0_format.func =
+                               insn.mm_fp4_format.cond | MM_MIPS32_COND_FC;
+                       break;
+               default:
+                       return SIGILL;
+                       break;
+               }
+               break;
+       default:
+               return SIGILL;
+               break;
+       }
+
+       *insn_ptr = mips32_insn;
+       return 0;
+}
+
+int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+                    unsigned long *contpc)
+{
+       union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+       int bc_false = 0;
+       unsigned int fcr31;
+       unsigned int bit;
+
+       switch (insn.mm_i_format.opcode) {
+       case mm_pool32a_op:
+               if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
+                   mm_pool32axf_op) {
+                       switch (insn.mm_i_format.simmediate >>
+                               MM_POOL32A_MINOR_SHIFT) {
+                       case mm_jalr_op:
+                       case mm_jalrhb_op:
+                       case mm_jalrs_op:
+                       case mm_jalrshb_op:
+                               if (insn.mm_i_format.rt != 0)   /* Not mm_jr */
+                                       regs->regs[insn.mm_i_format.rt] =
+                                               regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               dec_insn.next_pc_inc;
+                               *contpc = regs->regs[insn.mm_i_format.rs];
+                               return 1;
+                               break;
+                       }
+               }
+               break;
+       case mm_pool32i_op:
+               switch (insn.mm_i_format.rt) {
+               case mm_bltzals_op:
+               case mm_bltzal_op:
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       /* Fall through */
+               case mm_bltz_op:
+                       if ((long)regs->regs[insn.mm_i_format.rs] < 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               case mm_bgezals_op:
+               case mm_bgezal_op:
+                       regs->regs[31] = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       /* Fall through */
+               case mm_bgez_op:
+                       if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               case mm_blez_op:
+                       if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               case mm_bgtz_op:
+                       if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               case mm_bc2f_op:
+               case mm_bc1f_op:
+                       bc_false = 1;
+                       /* Fall through */
+               case mm_bc2t_op:
+               case mm_bc1t_op:
+                       preempt_disable();
+                       if (is_fpu_owner())
+                               asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+                       else
+                               fcr31 = current->thread.fpu.fcr31;
+                       preempt_enable();
+
+                       if (bc_false)
+                               fcr31 = ~fcr31;
+
+                       bit = (insn.mm_i_format.rs >> 2);
+                       bit += (bit != 0);
+                       bit += 23;
+                       if (fcr31 & (1 << bit))
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc + dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               }
+               break;
+       case mm_pool16c_op:
+               switch (insn.mm_i_format.rt) {
+               case mm_jalr16_op:
+               case mm_jalrs16_op:
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc + dec_insn.next_pc_inc;
+                       /* Fall through */
+               case mm_jr16_op:
+                       *contpc = regs->regs[insn.mm_i_format.rs];
+                       return 1;
+                       break;
+               }
+               break;
+       case mm_beqz16_op:
+               if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.mm_b1_format.simmediate << 1);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc + dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case mm_bnez16_op:
+               if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.mm_b1_format.simmediate << 1);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc + dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case mm_b16_op:
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                        (insn.mm_b0_format.simmediate << 1);
+               return 1;
+               break;
+       case mm_beq32_op:
+               if (regs->regs[insn.mm_i_format.rs] ==
+                   regs->regs[insn.mm_i_format.rt])
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.mm_i_format.simmediate << 1);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case mm_bne32_op:
+               if (regs->regs[insn.mm_i_format.rs] !=
+                   regs->regs[insn.mm_i_format.rt])
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.mm_i_format.simmediate << 1);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc + dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case mm_jalx32_op:
+               regs->regs[31] = regs->cp0_epc +
+                       dec_insn.pc_inc + dec_insn.next_pc_inc;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc;
+               *contpc >>= 28;
+               *contpc <<= 28;
+               *contpc |= (insn.j_format.target << 2);
+               return 1;
+               break;
+       case mm_jals32_op:
+       case mm_jal32_op:
+               regs->regs[31] = regs->cp0_epc +
+                       dec_insn.pc_inc + dec_insn.next_pc_inc;
+               /* Fall through */
+       case mm_j32_op:
+               *contpc = regs->cp0_epc + dec_insn.pc_inc;
+               *contpc >>= 27;
+               *contpc <<= 27;
+               *contpc |= (insn.j_format.target << 1);
+               set_isa16_mode(*contpc);
+               return 1;
+               break;
+       }
+       return 0;
+}
 
 /*
  * Redundant with logic already in kernel/branch.c,
@@ -117,53 +673,177 @@ static const unsigned int fpucondbit[8] = {
  * a single subroutine should be used across both
  * modules.
  */
-static int isBranchInstr(mips_instruction * i)
+static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+                        unsigned long *contpc)
 {
-       switch (MIPSInst_OPCODE(*i)) {
+       union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+       unsigned int fcr31;
+       unsigned int bit = 0;
+
+       switch (insn.i_format.opcode) {
        case spec_op:
-               switch (MIPSInst_FUNC(*i)) {
+               switch (insn.r_format.func) {
                case jalr_op:
+                       regs->regs[insn.r_format.rd] =
+                               regs->cp0_epc + dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       /* Fall through */
                case jr_op:
+                       *contpc = regs->regs[insn.r_format.rs];
                        return 1;
+                       break;
                }
                break;
-
        case bcond_op:
-               switch (MIPSInst_RT(*i)) {
+               switch (insn.i_format.rt) {
+               case bltzal_op:
+               case bltzall_op:
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       /* Fall through */
                case bltz_op:
-               case bgez_op:
                case bltzl_op:
-               case bgezl_op:
-               case bltzal_op:
+                       if ((long)regs->regs[insn.i_format.rs] < 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.i_format.simmediate << 2);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
                case bgezal_op:
-               case bltzall_op:
                case bgezall_op:
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       /* Fall through */
+               case bgez_op:
+               case bgezl_op:
+                       if ((long)regs->regs[insn.i_format.rs] >= 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.i_format.simmediate << 2);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
                        return 1;
+                       break;
                }
                break;
-
-       case j_op:
-       case jal_op:
        case jalx_op:
+               set_isa16_mode(bit);
+       case jal_op:
+               regs->regs[31] = regs->cp0_epc +
+                       dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+               /* Fall through */
+       case j_op:
+               *contpc = regs->cp0_epc + dec_insn.pc_inc;
+               *contpc >>= 28;
+               *contpc <<= 28;
+               *contpc |= (insn.j_format.target << 2);
+               /* Set microMIPS mode bit: XOR for jalx. */
+               *contpc ^= bit;
+               return 1;
+               break;
        case beq_op:
-       case bne_op:
-       case blez_op:
-       case bgtz_op:
        case beql_op:
+               if (regs->regs[insn.i_format.rs] ==
+                   regs->regs[insn.i_format.rt])
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case bne_op:
        case bnel_op:
+               if (regs->regs[insn.i_format.rs] !=
+                   regs->regs[insn.i_format.rt])
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case blez_op:
        case blezl_op:
+               if ((long)regs->regs[insn.i_format.rs] <= 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case bgtz_op:
        case bgtzl_op:
+               if ((long)regs->regs[insn.i_format.rs] > 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
                return 1;
-
+               break;
        case cop0_op:
        case cop1_op:
        case cop2_op:
        case cop1x_op:
-               if (MIPSInst_RS(*i) == bc_op)
-                       return 1;
+               if (insn.i_format.rs == bc_op) {
+                       preempt_disable();
+                       if (is_fpu_owner())
+                               asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+                       else
+                               fcr31 = current->thread.fpu.fcr31;
+                       preempt_enable();
+
+                       bit = (insn.i_format.rt >> 2);
+                       bit += (bit != 0);
+                       bit += 23;
+                       switch (insn.i_format.rt & 3) {
+                       case 0: /* bc1f */
+                       case 2: /* bc1fl */
+                               if (~fcr31 & (1 << bit))
+                                       *contpc = regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               (insn.i_format.simmediate << 2);
+                               else
+                                       *contpc = regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               dec_insn.next_pc_inc;
+                               return 1;
+                               break;
+                       case 1: /* bc1t */
+                       case 3: /* bc1tl */
+                               if (fcr31 & (1 << bit))
+                                       *contpc = regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               (insn.i_format.simmediate << 2);
+                               else
+                                       *contpc = regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               dec_insn.next_pc_inc;
+                               return 1;
+                               break;
+                       }
+               }
                break;
        }
-
        return 0;
 }
 
@@ -210,26 +890,23 @@ static inline int cop1_64bit(struct pt_regs *xcp)
  */
 
 static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
-                      void *__user *fault_addr)
+               struct mm_decoded_insn dec_insn, void *__user *fault_addr)
 {
        mips_instruction ir;
-       unsigned long emulpc, contpc;
+       unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
        unsigned int cond;
-
-       if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
-               MIPS_FPU_EMU_INC_STATS(errors);
-               *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-               return SIGBUS;
-       }
-       if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
-               MIPS_FPU_EMU_INC_STATS(errors);
-               *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-               return SIGSEGV;
-       }
+       int pc_inc;
 
        /* XXX NEC Vr54xx bug workaround */
-       if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir))
-               xcp->cp0_cause &= ~CAUSEF_BD;
+       if (xcp->cp0_cause & CAUSEF_BD) {
+               if (dec_insn.micro_mips_mode) {
+                       if (!mm_isBranchInstr(xcp, dec_insn, &contpc))
+                               xcp->cp0_cause &= ~CAUSEF_BD;
+               } else {
+                       if (!isBranchInstr(xcp, dec_insn, &contpc))
+                               xcp->cp0_cause &= ~CAUSEF_BD;
+               }
+       }
 
        if (xcp->cp0_cause & CAUSEF_BD) {
                /*
@@ -244,32 +921,33 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                 * Linux MIPS branch emulator operates on context, updating the
                 * cp0_epc.
                 */
-               emulpc = xcp->cp0_epc + 4;      /* Snapshot emulation target */
+               ir = dec_insn.next_insn;  /* process delay slot instr */
+               pc_inc = dec_insn.next_pc_inc;
+       } else {
+               ir = dec_insn.insn;       /* process current instr */
+               pc_inc = dec_insn.pc_inc;
+       }
 
-               if (__compute_return_epc(xcp) < 0) {
-#ifdef CP1DBG
-                       printk("failed to emulate branch at %p\n",
-                               (void *) (xcp->cp0_epc));
-#endif
+       /*
+        * Since microMIPS FPU instructios are a subset of MIPS32 FPU
+        * instructions, we want to convert microMIPS FPU instructions
+        * into MIPS32 instructions so that we could reuse all of the
+        * FPU emulation code.
+        *
+        * NOTE: We cannot do this for branch instructions since they
+        *       are not a subset. Example: Cannot emulate a 16-bit
+        *       aligned target address with a MIPS32 instruction.
+        */
+       if (dec_insn.micro_mips_mode) {
+               /*
+                * If next instruction is a 16-bit instruction, then it
+                * it cannot be a FPU instruction. This could happen
+                * since we can be called for non-FPU instructions.
+                */
+               if ((pc_inc == 2) ||
+                       (microMIPS32_to_MIPS32((union mips_instruction *)&ir)
+                        == SIGILL))
                        return SIGILL;
-               }
-               if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
-                       MIPS_FPU_EMU_INC_STATS(errors);
-                       *fault_addr = (mips_instruction __user *)emulpc;
-                       return SIGBUS;
-               }
-               if (__get_user(ir, (mips_instruction __user *) emulpc)) {
-                       MIPS_FPU_EMU_INC_STATS(errors);
-                       *fault_addr = (mips_instruction __user *)emulpc;
-                       return SIGSEGV;
-               }
-               /* __compute_return_epc() will have updated cp0_epc */
-               contpc = xcp->cp0_epc;
-               /* In order not to confuse ptrace() et al, tweak context */
-               xcp->cp0_epc = emulpc - 4;
-       } else {
-               emulpc = xcp->cp0_epc;
-               contpc = xcp->cp0_epc + 4;
        }
 
       emul:
@@ -474,22 +1152,35 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                                /* branch taken: emulate dslot
                                 * instruction
                                 */
-                               xcp->cp0_epc += 4;
-                               contpc = (xcp->cp0_epc +
-                                       (MIPSInst_SIMM(ir) << 2));
-
-                               if (!access_ok(VERIFY_READ, xcp->cp0_epc,
-                                              sizeof(mips_instruction))) {
-                                       MIPS_FPU_EMU_INC_STATS(errors);
-                                       *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-                                       return SIGBUS;
-                               }
-                               if (__get_user(ir,
-                                   (mips_instruction __user *) xcp->cp0_epc)) {
-                                       MIPS_FPU_EMU_INC_STATS(errors);
-                                       *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-                                       return SIGSEGV;
-                               }
+                               xcp->cp0_epc += dec_insn.pc_inc;
+
+                               contpc = MIPSInst_SIMM(ir);
+                               ir = dec_insn.next_insn;
+                               if (dec_insn.micro_mips_mode) {
+                                       contpc = (xcp->cp0_epc + (contpc << 1));
+
+                                       /* If 16-bit instruction, not FPU. */
+                                       if ((dec_insn.next_pc_inc == 2) ||
+                                               (microMIPS32_to_MIPS32((union mips_instruction *)&ir) == SIGILL)) {
+
+                                               /*
+                                                * Since this instruction will
+                                                * be put on the stack with
+                                                * 32-bit words, get around
+                                                * this problem by putting a
+                                                * NOP16 as the second one.
+                                                */
+                                               if (dec_insn.next_pc_inc == 2)
+                                                       ir = (ir & (~0xffff)) | MM_NOP16;
+
+                                               /*
+                                                * Single step the non-CP1
+                                                * instruction in the dslot.
+                                                */
+                                               return mips_dsemul(xcp, ir, contpc);
+                                       }
+                               } else
+                                       contpc = (xcp->cp0_epc + (contpc << 2));
 
                                switch (MIPSInst_OPCODE(ir)) {
                                case lwc1_op:
@@ -525,8 +1216,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                                         * branch likely nullifies
                                         * dslot if not taken
                                         */
-                                       xcp->cp0_epc += 4;
-                                       contpc += 4;
+                                       xcp->cp0_epc += dec_insn.pc_inc;
+                                       contpc += dec_insn.pc_inc;
                                        /*
                                         * else continue & execute
                                         * dslot as normal insn
@@ -1313,25 +2004,75 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
        int has_fpu, void *__user *fault_addr)
 {
        unsigned long oldepc, prevepc;
-       mips_instruction insn;
+       struct mm_decoded_insn dec_insn;
+       u16 instr[4];
+       u16 *instr_ptr;
        int sig = 0;
 
        oldepc = xcp->cp0_epc;
        do {
                prevepc = xcp->cp0_epc;
 
-               if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
-                       MIPS_FPU_EMU_INC_STATS(errors);
-                       *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-                       return SIGBUS;
-               }
-               if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
-                       MIPS_FPU_EMU_INC_STATS(errors);
-                       *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-                       return SIGSEGV;
+               if (get_isa16_mode(prevepc) && cpu_has_mmips) {
+                       /*
+                        * Get next 2 microMIPS instructions and convert them
+                        * into 32-bit instructions.
+                        */
+                       if ((get_user(instr[0], (u16 __user *)msk_isa16_mode(xcp->cp0_epc))) ||
+                           (get_user(instr[1], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 2))) ||
+                           (get_user(instr[2], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 4))) ||
+                           (get_user(instr[3], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 6)))) {
+                               MIPS_FPU_EMU_INC_STATS(errors);
+                               return SIGBUS;
+                       }
+                       instr_ptr = instr;
+
+                       /* Get first instruction. */
+                       if (mm_insn_16bit(*instr_ptr)) {
+                               /* Duplicate the half-word. */
+                               dec_insn.insn = (*instr_ptr << 16) |
+                                       (*instr_ptr);
+                               /* 16-bit instruction. */
+                               dec_insn.pc_inc = 2;
+                               instr_ptr += 1;
+                       } else {
+                               dec_insn.insn = (*instr_ptr << 16) |
+                                       *(instr_ptr+1);
+                               /* 32-bit instruction. */
+                               dec_insn.pc_inc = 4;
+                               instr_ptr += 2;
+                       }
+                       /* Get second instruction. */
+                       if (mm_insn_16bit(*instr_ptr)) {
+                               /* Duplicate the half-word. */
+                               dec_insn.next_insn = (*instr_ptr << 16) |
+                                       (*instr_ptr);
+                               /* 16-bit instruction. */
+                               dec_insn.next_pc_inc = 2;
+                       } else {
+                               dec_insn.next_insn = (*instr_ptr << 16) |
+                                       *(instr_ptr+1);
+                               /* 32-bit instruction. */
+                               dec_insn.next_pc_inc = 4;
+                       }
+                       dec_insn.micro_mips_mode = 1;
+               } else {
+                       if ((get_user(dec_insn.insn,
+                           (mips_instruction __user *) xcp->cp0_epc)) ||
+                           (get_user(dec_insn.next_insn,
+                           (mips_instruction __user *)(xcp->cp0_epc+4)))) {
+                               MIPS_FPU_EMU_INC_STATS(errors);
+                               return SIGBUS;
+                       }
+                       dec_insn.pc_inc = 4;
+                       dec_insn.next_pc_inc = 4;
+                       dec_insn.micro_mips_mode = 0;
                }
-               if (insn == 0)
-                       xcp->cp0_epc += 4;      /* skip nops */
+
+               if ((dec_insn.insn == 0) ||
+                  ((dec_insn.pc_inc == 2) &&
+                  ((dec_insn.insn & 0xffff) == MM_NOP16)))
+                       xcp->cp0_epc += dec_insn.pc_inc;        /* Skip NOPs */
                else {
                        /*
                         * The 'ieee754_csr' is an alias of
@@ -1341,7 +2082,7 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                         */
                        /* convert to ieee library modes */
                        ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
-                       sig = cop1Emulate(xcp, ctx, fault_addr);
+                       sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr);
                        /* revert to mips rounding mode */
                        ieee754_csr.rm = mips_rm[ieee754_csr.rm];
                }
index 384a3b0091ea33e507fe063c0b8b43a5ab714823..7ea622ab8dad32df6bf1fbd8d07bffdbf48c3f72 100644 (file)
@@ -55,7 +55,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
        struct emuframe __user *fr;
        int err;
 
-       if (ir == 0) {          /* a nop is easy */
+       if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) ||
+               (ir == 0)) {
+               /* NOP is easy */
                regs->cp0_epc = cpc;
                regs->cp0_cause &= ~CAUSEF_BD;
                return 0;
@@ -91,8 +93,16 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
        if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
                return SIGBUS;
 
-       err = __put_user(ir, &fr->emul);
-       err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+       if (get_isa16_mode(regs->cp0_epc)) {
+               err = __put_user(ir >> 16, (u16 __user *)(&fr->emul));
+               err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2));
+               err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst));
+               err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2));
+       } else {
+               err = __put_user(ir, &fr->emul);
+               err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+       }
+
        err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
        err |= __put_user(cpc, &fr->epc);
 
@@ -101,7 +111,8 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
                return SIGBUS;
        }
 
-       regs->cp0_epc = (unsigned long) &fr->emul;
+       regs->cp0_epc = ((unsigned long) &fr->emul) |
+               get_isa16_mode(regs->cp0_epc);
 
        flush_cache_sigtramp((unsigned long)&fr->badinst);
 
@@ -114,9 +125,10 @@ int do_dsemulret(struct pt_regs *xcp)
        unsigned long epc;
        u32 insn, cookie;
        int err = 0;
+       u16 instr[2];
 
        fr = (struct emuframe __user *)
-               (xcp->cp0_epc - sizeof(mips_instruction));
+               (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction));
 
        /*
         * If we can't even access the area, something is very wrong, but we'll
@@ -131,7 +143,13 @@ int do_dsemulret(struct pt_regs *xcp)
         *  - Is the instruction pointed to by the EPC an BREAK_MATH?
         *  - Is the following memory word the BD_COOKIE?
         */
-       err = __get_user(insn, &fr->badinst);
+       if (get_isa16_mode(xcp->cp0_epc)) {
+               err = __get_user(instr[0], (u16 __user *)(&fr->badinst));
+               err |= __get_user(instr[1], (u16 __user *)((long)(&fr->badinst) + 2));
+               insn = (instr[0] << 16) | instr[1];
+       } else {
+               err = __get_user(insn, &fr->badinst);
+       }
        err |= __get_user(cookie, &fr->cookie);
 
        if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) {
index 1dcec30ad1c43b7c5e6e06c61f5c46cf04df7eac..e87aae1f2e802835d9a2730b5378d2bd29b8b54f 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-y                          += cache.o dma-default.o extable.o fault.o \
                                   gup.o init.o mmap.o page.o page-funcs.o \
-                                  tlbex.o tlbex-fault.o uasm.o
+                                  tlbex.o tlbex-fault.o uasm-mips.o
 
 obj-$(CONFIG_32BIT)            += ioremap.o pgtable-32.o
 obj-$(CONFIG_64BIT)            += pgtable-64.o
@@ -22,3 +22,5 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
 obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
 obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
 obj-$(CONFIG_MIPS_CPU_SCACHE)  += sc-mips.o
+
+obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
index 2078915eacb9c9ff740ec2daa1a65274ad1de7a9..21813beec7a56f8c17ff493ca6000324168850fe 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/war.h>
 #include <asm/cacheflush.h> /* for run_uncached() */
 #include <asm/traps.h>
+#include <asm/dma-coherence.h>
 
 /*
  * Special Variant of smp_call_function for use by cache functions:
@@ -136,7 +137,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
                r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
 }
 
-static void (* r4k_blast_dcache)(void);
+void (* r4k_blast_dcache)(void);
+EXPORT_SYMBOL(r4k_blast_dcache);
 
 static void __cpuinit r4k_blast_dcache_setup(void)
 {
@@ -264,7 +266,8 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
                r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
 }
 
-static void (* r4k_blast_icache)(void);
+void (* r4k_blast_icache)(void);
+EXPORT_SYMBOL(r4k_blast_icache);
 
 static void __cpuinit r4k_blast_icache_setup(void)
 {
@@ -1377,20 +1380,6 @@ static void __cpuinit coherency_setup(void)
        }
 }
 
-#if defined(CONFIG_DMA_NONCOHERENT)
-
-static int __cpuinitdata coherentio;
-
-static int __init setcoherentio(char *str)
-{
-       coherentio = 1;
-
-       return 0;
-}
-
-early_param("coherentio", setcoherentio);
-#endif
-
 static void __cpuinit r4k_cache_error_setup(void)
 {
        extern char __weak except_vec2_generic;
@@ -1472,9 +1461,14 @@ void __cpuinit r4k_cache_init(void)
 
        build_clear_page();
        build_copy_page();
-#if !defined(CONFIG_MIPS_CMP)
+
+       /*
+        * We want to run CMP kernels on core with and without coherent
+        * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
+        * or not to flush caches.
+        */
        local_r4k___flush_cache_all(NULL);
-#endif
+
        coherency_setup();
        board_cache_error_setup = r4k_cache_error_setup;
 }
index 07cec4407b0c00cf073f406738256bd363913dcf..5aeb3eb0b72f87b5f108a6d287517e53d038be2a 100644 (file)
@@ -48,6 +48,7 @@ void (*flush_icache_all)(void);
 
 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
 EXPORT_SYMBOL(flush_data_cache_page);
+EXPORT_SYMBOL(flush_icache_all);
 
 #ifdef CONFIG_DMA_NONCOHERENT
 
index f9ef83829a523fd65803b3f5b2c8fdfe77e31a8f..caf92ecb37d6966639c47e6eb277d74cb42f2012 100644 (file)
 
 #include <dma-coherence.h>
 
+int coherentio = 0;    /* User defined DMA coherency from command line. */
+EXPORT_SYMBOL_GPL(coherentio);
+int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
+
+static int __init setcoherentio(char *str)
+{
+       coherentio = 1;
+       pr_info("Hardware DMA cache coherency (command line)\n");
+       return 0;
+}
+early_param("coherentio", setcoherentio);
+
+static int __init setnocoherentio(char *str)
+{
+       coherentio = 0;
+       pr_info("Software DMA cache coherency (command line)\n");
+       return 0;
+}
+early_param("nocoherentio", setnocoherentio);
+
 static inline struct page *dma_addr_to_page(struct device *dev,
        dma_addr_t dma_addr)
 {
@@ -115,7 +135,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
 
                if (!plat_device_is_coherent(dev)) {
                        dma_cache_wback_inv((unsigned long) ret, size);
-                       ret = UNCAC_ADDR(ret);
+                       if (!hw_coherentio)
+                               ret = UNCAC_ADDR(ret);
                }
        }
 
@@ -142,7 +163,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
 
        plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
 
-       if (!plat_device_is_coherent(dev))
+       if (!plat_device_is_coherent(dev) && !hw_coherentio)
                addr = CAC_ADDR(addr);
 
        free_pages(addr, get_order(size));
index a29fba55b53e79c8d7399d67f101356379f20c73..4eb8dcfaf1ce1953760059faf151d54fe135d6a4 100644 (file)
@@ -247,6 +247,11 @@ void __cpuinit build_clear_page(void)
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
        int i;
+       static atomic_t run_once = ATOMIC_INIT(0);
+
+       if (atomic_xchg(&run_once, 1)) {
+               return;
+       }
 
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
@@ -389,6 +394,11 @@ void __cpuinit build_copy_page(void)
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
        int i;
+       static atomic_t run_once = ATOMIC_INIT(0);
+
+       if (atomic_xchg(&run_once, 1)) {
+               return;
+       }
 
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
index 493131c81a29b9a1c45bf44a000596ccdf17035f..c643de4c473a8d67115c7f0d304ebe1dc1e8c4ce 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
+#include <linux/module.h>
 
 #include <asm/cpu.h>
 #include <asm/bootinfo.h>
@@ -94,6 +95,7 @@ void local_flush_tlb_all(void)
        FLUSH_ITLB;
        EXIT_CRITICAL(flags);
 }
+EXPORT_SYMBOL(local_flush_tlb_all);
 
 /* All entries common to a mm share an asid.  To effectively flush
    these entries, we just bump the asid. */
index 820e6612d744e199f379419103ddc507bd398696..ce9818eef7d392b569ea531a6064fea790458e3d 100644 (file)
@@ -1458,17 +1458,17 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
 u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
 u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
+u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned;
 
 static void __cpuinit build_r4000_setup_pgd(void)
 {
        const int a0 = 4;
        const int a1 = 5;
-       u32 *p = tlbmiss_handler_setup_pgd;
+       u32 *p = tlbmiss_handler_setup_pgd_array;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
 
-       memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
+       memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array));
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
 
@@ -1496,15 +1496,15 @@ static void __cpuinit build_r4000_setup_pgd(void)
                uasm_i_jr(&p, 31);
                UASM_i_MTC0(&p, a0, 31, pgd_reg);
        }
-       if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
-               panic("tlbmiss_handler_setup_pgd space exceeded");
+       if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
+               panic("tlbmiss_handler_setup_pgd_array space exceeded");
        uasm_resolve_relocs(relocs, labels);
-       pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
-                (unsigned int)(p - tlbmiss_handler_setup_pgd));
+       pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n",
+                (unsigned int)(p - tlbmiss_handler_setup_pgd_array));
 
        dump_handler("tlbmiss_handler",
-                    tlbmiss_handler_setup_pgd,
-                    ARRAY_SIZE(tlbmiss_handler_setup_pgd));
+                    tlbmiss_handler_setup_pgd_array,
+                    ARRAY_SIZE(tlbmiss_handler_setup_pgd_array));
 }
 #endif
 
@@ -2030,6 +2030,13 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
 
        uasm_l_nopage_tlbl(&l, p);
        build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+       if ((unsigned long)tlb_do_page_fault_0 & 1) {
+               uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
+               uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
+               uasm_i_jr(&p, K0);
+       } else
+#endif
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
        uasm_i_nop(&p);
 
@@ -2077,6 +2084,13 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
 
        uasm_l_nopage_tlbs(&l, p);
        build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+       if ((unsigned long)tlb_do_page_fault_1 & 1) {
+               uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+               uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+               uasm_i_jr(&p, K0);
+       } else
+#endif
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
        uasm_i_nop(&p);
 
@@ -2125,6 +2139,13 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
 
        uasm_l_nopage_tlbm(&l, p);
        build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+       if ((unsigned long)tlb_do_page_fault_1 & 1) {
+               uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+               uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+               uasm_i_jr(&p, K0);
+       } else
+#endif
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
        uasm_i_nop(&p);
 
@@ -2162,8 +2183,11 @@ void __cpuinit build_tlb_refill_handler(void)
        case CPU_TX3922:
        case CPU_TX3927:
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
-               build_r3000_tlb_refill_handler();
+               if (cpu_has_local_ebase)
+                       build_r3000_tlb_refill_handler();
                if (!run_once) {
+                       if (!cpu_has_local_ebase)
+                               build_r3000_tlb_refill_handler();
                        build_r3000_tlb_load_handler();
                        build_r3000_tlb_store_handler();
                        build_r3000_tlb_modify_handler();
@@ -2192,9 +2216,12 @@ void __cpuinit build_tlb_refill_handler(void)
                        build_r4000_tlb_load_handler();
                        build_r4000_tlb_store_handler();
                        build_r4000_tlb_modify_handler();
+                       if (!cpu_has_local_ebase)
+                               build_r4000_tlb_refill_handler();
                        run_once++;
                }
-               build_r4000_tlb_refill_handler();
+               if (cpu_has_local_ebase)
+                       build_r4000_tlb_refill_handler();
        }
 }
 
@@ -2207,7 +2234,7 @@ void __cpuinit flush_tlb_handlers(void)
        local_flush_icache_range((unsigned long)handle_tlbm,
                           (unsigned long)handle_tlbm + sizeof(handle_tlbm));
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-       local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
-                          (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
+       local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array,
+                          (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm));
 #endif
 }
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
new file mode 100644 (file)
index 0000000..162ee6d
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
+ * Copyright (C) 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013   MIPS Technologies, Inc.  All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#define UASM_ISA       _UASM_ISA_MICROMIPS
+#include <asm/uasm.h>
+
+#define RS_MASK                0x1f
+#define RS_SH          16
+#define RT_MASK                0x1f
+#define RT_SH          21
+#define SCIMM_MASK     0x3ff
+#define SCIMM_SH       16
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f)                                    \
+       ((a) << OP_SH                                           \
+        | (b) << RT_SH                                         \
+        | (c) << RS_SH                                         \
+        | (d) << RD_SH                                         \
+        | (e) << RE_SH                                         \
+        | (f) << FUNC_SH)
+
+/* Define these when we are not the ISA the kernel is being compiled with. */
+#ifndef CONFIG_CPU_MICROMIPS
+#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
+#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
+#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
+#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
+#endif
+
+#include "uasm.c"
+
+static struct insn insn_table_MM[] __uasminitdata = {
+       { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
+       { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+       { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
+       { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+       { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_beql, 0, 0 },
+       { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM },
+       { insn_bgezl, 0, 0 },
+       { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM },
+       { insn_bltzl, 0, 0 },
+       { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
+       { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
+       { insn_daddu, 0, 0 },
+       { insn_daddiu, 0, 0 },
+       { insn_dmfc0, 0, 0 },
+       { insn_dmtc0, 0, 0 },
+       { insn_dsll, 0, 0 },
+       { insn_dsll32, 0, 0 },
+       { insn_dsra, 0, 0 },
+       { insn_dsrl, 0, 0 },
+       { insn_dsrl32, 0, 0 },
+       { insn_drotr, 0, 0 },
+       { insn_drotr32, 0, 0 },
+       { insn_dsubu, 0, 0 },
+       { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 },
+       { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE },
+       { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
+       { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
+       { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
+       { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
+       { insn_ld, 0, 0 },
+       { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
+       { insn_lld, 0, 0 },
+       { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
+       { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+       { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
+       { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
+       { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
+       { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+       { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
+       { insn_rfe, 0, 0 },
+       { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM },
+       { insn_scd, 0, 0 },
+       { insn_sd, 0, 0 },
+       { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
+       { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
+       { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
+       { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
+       { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
+       { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+       { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
+       { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
+       { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
+       { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
+       { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
+       { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+       { insn_dins, 0, 0 },
+       { insn_dinsm, 0, 0 },
+       { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
+       { insn_bbit0, 0, 0 },
+       { insn_bbit1, 0, 0 },
+       { insn_lwx, 0, 0 },
+       { insn_ldx, 0, 0 },
+       { insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __uasminit u32 build_bimm(s32 arg)
+{
+       WARN(arg > 0xffff || arg < -0x10000,
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+       return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
+}
+
+static inline __uasminit u32 build_jimm(u32 arg)
+{
+
+       WARN(arg & ~((JIMM_MASK << 2) | 1),
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       return (arg >> 1) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+{
+       struct insn *ip = NULL;
+       unsigned int i;
+       va_list ap;
+       u32 op;
+
+       for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++)
+               if (insn_table_MM[i].opcode == opc) {
+                       ip = &insn_table_MM[i];
+                       break;
+               }
+
+       if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+               panic("Unsupported Micro-assembler instruction %d", opc);
+
+       op = ip->match;
+       va_start(ap, opc);
+       if (ip->fields & RS) {
+               if (opc == insn_mfc0 || opc == insn_mtc0)
+                       op |= build_rt(va_arg(ap, u32));
+               else
+                       op |= build_rs(va_arg(ap, u32));
+       }
+       if (ip->fields & RT) {
+               if (opc == insn_mfc0 || opc == insn_mtc0)
+                       op |= build_rs(va_arg(ap, u32));
+               else
+                       op |= build_rt(va_arg(ap, u32));
+       }
+       if (ip->fields & RD)
+               op |= build_rd(va_arg(ap, u32));
+       if (ip->fields & RE)
+               op |= build_re(va_arg(ap, u32));
+       if (ip->fields & SIMM)
+               op |= build_simm(va_arg(ap, s32));
+       if (ip->fields & UIMM)
+               op |= build_uimm(va_arg(ap, u32));
+       if (ip->fields & BIMM)
+               op |= build_bimm(va_arg(ap, s32));
+       if (ip->fields & JIMM)
+               op |= build_jimm(va_arg(ap, u32));
+       if (ip->fields & FUNC)
+               op |= build_func(va_arg(ap, u32));
+       if (ip->fields & SET)
+               op |= build_set(va_arg(ap, u32));
+       if (ip->fields & SCIMM)
+               op |= build_scimm(va_arg(ap, u32));
+       va_end(ap);
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+       **buf = ((op & 0xffff) << 16) | (op >> 16);
+#else
+       **buf = op;
+#endif
+       (*buf)++;
+}
+
+static inline void __uasminit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+       long laddr = (long)lab->addr;
+       long raddr = (long)rel->addr;
+
+       switch (rel->type) {
+       case R_MIPS_PC16:
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+               *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
+#else
+               *rel->addr |= build_bimm(laddr - (raddr + 4));
+#endif
+               break;
+
+       default:
+               panic("Unsupported Micro-assembler relocation %d",
+                     rel->type);
+       }
+}
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
new file mode 100644 (file)
index 0000000..5fcdd8f
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
+ * Copyright (C) 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#define UASM_ISA       _UASM_ISA_CLASSIC
+#include <asm/uasm.h>
+
+#define RS_MASK                0x1f
+#define RS_SH          21
+#define RT_MASK                0x1f
+#define RT_SH          16
+#define SCIMM_MASK     0xfffff
+#define SCIMM_SH       6
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f)                                    \
+       ((a) << OP_SH                                           \
+        | (b) << RS_SH                                         \
+        | (c) << RT_SH                                         \
+        | (d) << RD_SH                                         \
+        | (e) << RE_SH                                         \
+        | (f) << FUNC_SH)
+
+/* Define these when we are not the ISA the kernel is being compiled with. */
+#ifdef CONFIG_CPU_MICROMIPS
+#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
+#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
+#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
+#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
+#endif
+
+#include "uasm.c"
+
+static struct insn insn_table[] __uasminitdata = {
+       { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+       { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
+       { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+       { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
+       { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
+       { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
+       { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
+       { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
+       { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+       { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
+       { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
+       { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
+       { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
+       { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
+       { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
+       { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
+       { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
+       { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
+       { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
+       { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
+       { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
+       { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
+       { insn_eret,  M(cop0_op, cop_op, 0, 0, 0, eret_op),  0 },
+       { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
+       { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
+       { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+       { insn_jal,  M(jal_op, 0, 0, 0, 0, 0),  JIMM },
+       { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+       { insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
+       { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
+       { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
+       { insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
+       { insn_mfc0,  M(cop0_op, mfc_op, 0, 0, 0, 0),  RT | RD | SET},
+       { insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET},
+       { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
+       { insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
+       { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
+       { insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
+       { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
+       { insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
+       { insn_srl,  M(spec_op, 0, 0, 0, 0, srl_op),  RT | RD | RE },
+       { insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),  RS | RT | RD },
+       { insn_sw,  M(sw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
+       { insn_tlbp,  M(cop0_op, cop_op, 0, 0, 0, tlbp_op),  0 },
+       { insn_tlbr,  M(cop0_op, cop_op, 0, 0, 0, tlbr_op),  0 },
+       { insn_tlbwi,  M(cop0_op, cop_op, 0, 0, 0, tlbwi_op),  0 },
+       { insn_tlbwr,  M(cop0_op, cop_op, 0, 0, 0, tlbwr_op),  0 },
+       { insn_xori,  M(xori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
+       { insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD },
+       { insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __uasminit u32 build_bimm(s32 arg)
+{
+       WARN(arg > 0x1ffff || arg < -0x20000,
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+       return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
+}
+
+static inline __uasminit u32 build_jimm(u32 arg)
+{
+       WARN(arg & ~(JIMM_MASK << 2),
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       return (arg >> 2) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+{
+       struct insn *ip = NULL;
+       unsigned int i;
+       va_list ap;
+       u32 op;
+
+       for (i = 0; insn_table[i].opcode != insn_invalid; i++)
+               if (insn_table[i].opcode == opc) {
+                       ip = &insn_table[i];
+                       break;
+               }
+
+       if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+               panic("Unsupported Micro-assembler instruction %d", opc);
+
+       op = ip->match;
+       va_start(ap, opc);
+       if (ip->fields & RS)
+               op |= build_rs(va_arg(ap, u32));
+       if (ip->fields & RT)
+               op |= build_rt(va_arg(ap, u32));
+       if (ip->fields & RD)
+               op |= build_rd(va_arg(ap, u32));
+       if (ip->fields & RE)
+               op |= build_re(va_arg(ap, u32));
+       if (ip->fields & SIMM)
+               op |= build_simm(va_arg(ap, s32));
+       if (ip->fields & UIMM)
+               op |= build_uimm(va_arg(ap, u32));
+       if (ip->fields & BIMM)
+               op |= build_bimm(va_arg(ap, s32));
+       if (ip->fields & JIMM)
+               op |= build_jimm(va_arg(ap, u32));
+       if (ip->fields & FUNC)
+               op |= build_func(va_arg(ap, u32));
+       if (ip->fields & SET)
+               op |= build_set(va_arg(ap, u32));
+       if (ip->fields & SCIMM)
+               op |= build_scimm(va_arg(ap, u32));
+       va_end(ap);
+
+       **buf = op;
+       (*buf)++;
+}
+
+static inline void __uasminit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+       long laddr = (long)lab->addr;
+       long raddr = (long)rel->addr;
+
+       switch (rel->type) {
+       case R_MIPS_PC16:
+               *rel->addr |= build_bimm(laddr - (raddr + 4));
+               break;
+
+       default:
+               panic("Unsupported Micro-assembler relocation %d",
+                     rel->type);
+       }
+}
index 942ff6c2eba27ac256d812c64a1b5752b164f712..7eb5e4355d25c467fd1241ffda14715dfc69c1c7 100644 (file)
  * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
  * Copyright (C) 2005, 2007  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
  */
 
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <asm/inst.h>
-#include <asm/elf.h>
-#include <asm/bugs.h>
-#include <asm/uasm.h>
-
 enum fields {
        RS = 0x001,
        RT = 0x002,
@@ -37,10 +29,6 @@ enum fields {
 
 #define OP_MASK                0x3f
 #define OP_SH          26
-#define RS_MASK                0x1f
-#define RS_SH          21
-#define RT_MASK                0x1f
-#define RT_SH          16
 #define RD_MASK                0x1f
 #define RD_SH          11
 #define RE_MASK                0x1f
@@ -53,8 +41,6 @@ enum fields {
 #define FUNC_SH                0
 #define SET_MASK       0x7
 #define SET_SH         0
-#define SCIMM_MASK     0xfffff
-#define SCIMM_SH       6
 
 enum opcode {
        insn_invalid,
@@ -77,85 +63,6 @@ struct insn {
        enum fields fields;
 };
 
-/* This macro sets the non-variable bits of an instruction. */
-#define M(a, b, c, d, e, f)                                    \
-       ((a) << OP_SH                                           \
-        | (b) << RS_SH                                         \
-        | (c) << RT_SH                                         \
-        | (d) << RD_SH                                         \
-        | (e) << RE_SH                                         \
-        | (f) << FUNC_SH)
-
-static struct insn insn_table[] __uasminitdata = {
-       { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
-       { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
-       { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
-       { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
-       { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
-       { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
-       { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
-       { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
-       { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
-       { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
-       { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
-       { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
-       { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
-       { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
-       { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
-       { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
-       { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
-       { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
-       { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
-       { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
-       { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
-       { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
-       { insn_eret,  M(cop0_op, cop_op, 0, 0, 0, eret_op),  0 },
-       { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
-       { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
-       { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
-       { insn_jal,  M(jal_op, 0, 0, 0, 0, 0),  JIMM },
-       { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
-       { insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
-       { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
-       { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
-       { insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
-       { insn_mfc0,  M(cop0_op, mfc_op, 0, 0, 0, 0),  RT | RD | SET},
-       { insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET},
-       { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
-       { insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
-       { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
-       { insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
-       { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
-       { insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
-       { insn_srl,  M(spec_op, 0, 0, 0, 0, srl_op),  RT | RD | RE },
-       { insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),  RS | RT | RD },
-       { insn_sw,  M(sw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
-       { insn_tlbp,  M(cop0_op, cop_op, 0, 0, 0, tlbp_op),  0 },
-       { insn_tlbr,  M(cop0_op, cop_op, 0, 0, 0, tlbr_op),  0 },
-       { insn_tlbwi,  M(cop0_op, cop_op, 0, 0, 0, tlbwi_op),  0 },
-       { insn_tlbwr,  M(cop0_op, cop_op, 0, 0, 0, tlbwr_op),  0 },
-       { insn_xori,  M(xori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
-       { insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD },
-       { insn_invalid, 0, 0 }
-};
-
-#undef M
-
 static inline __uasminit u32 build_rs(u32 arg)
 {
        WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -199,24 +106,6 @@ static inline __uasminit u32 build_uimm(u32 arg)
        return arg & IMM_MASK;
 }
 
-static inline __uasminit u32 build_bimm(s32 arg)
-{
-       WARN(arg > 0x1ffff || arg < -0x20000,
-            KERN_WARNING "Micro-assembler field overflow\n");
-
-       WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
-
-       return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
-}
-
-static inline __uasminit u32 build_jimm(u32 arg)
-{
-       WARN(arg & ~(JIMM_MASK << 2),
-            KERN_WARNING "Micro-assembler field overflow\n");
-
-       return (arg >> 2) & JIMM_MASK;
-}
-
 static inline __uasminit u32 build_scimm(u32 arg)
 {
        WARN(arg & ~SCIMM_MASK,
@@ -239,55 +128,7 @@ static inline __uasminit u32 build_set(u32 arg)
        return arg & SET_MASK;
 }
 
-/*
- * The order of opcode arguments is implicitly left to right,
- * starting with RS and ending with FUNC or IMM.
- */
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
-{
-       struct insn *ip = NULL;
-       unsigned int i;
-       va_list ap;
-       u32 op;
-
-       for (i = 0; insn_table[i].opcode != insn_invalid; i++)
-               if (insn_table[i].opcode == opc) {
-                       ip = &insn_table[i];
-                       break;
-               }
-
-       if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
-               panic("Unsupported Micro-assembler instruction %d", opc);
-
-       op = ip->match;
-       va_start(ap, opc);
-       if (ip->fields & RS)
-               op |= build_rs(va_arg(ap, u32));
-       if (ip->fields & RT)
-               op |= build_rt(va_arg(ap, u32));
-       if (ip->fields & RD)
-               op |= build_rd(va_arg(ap, u32));
-       if (ip->fields & RE)
-               op |= build_re(va_arg(ap, u32));
-       if (ip->fields & SIMM)
-               op |= build_simm(va_arg(ap, s32));
-       if (ip->fields & UIMM)
-               op |= build_uimm(va_arg(ap, u32));
-       if (ip->fields & BIMM)
-               op |= build_bimm(va_arg(ap, s32));
-       if (ip->fields & JIMM)
-               op |= build_jimm(va_arg(ap, u32));
-       if (ip->fields & FUNC)
-               op |= build_func(va_arg(ap, u32));
-       if (ip->fields & SET)
-               op |= build_set(va_arg(ap, u32));
-       if (ip->fields & SCIMM)
-               op |= build_scimm(va_arg(ap, u32));
-       va_end(ap);
-
-       **buf = op;
-       (*buf)++;
-}
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...);
 
 #define I_u1u2u3(op)                                   \
 Ip_u1u2u3(op)                                          \
@@ -445,7 +286,7 @@ I_u3u1u2(_ldx)
 
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 #include <asm/octeon/octeon.h>
-void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
+void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
                            unsigned int c)
 {
        if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -457,21 +298,21 @@ void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
        else
                build_insn(buf, insn_pref, c, a, b);
 }
-UASM_EXPORT_SYMBOL(uasm_i_pref);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
 #else
 I_u2s3u1(_pref)
 #endif
 
 /* Handle labels. */
-void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
+void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
 {
        (*lab)->addr = addr;
        (*lab)->lab = lid;
        (*lab)++;
 }
-UASM_EXPORT_SYMBOL(uasm_build_label);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
 
-int __uasminit uasm_in_compat_space_p(long addr)
+int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
 {
        /* Is this address in 32bit compat space? */
 #ifdef CONFIG_64BIT
@@ -480,7 +321,7 @@ int __uasminit uasm_in_compat_space_p(long addr)
        return 1;
 #endif
 }
-UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
 
 static int __uasminit uasm_rel_highest(long val)
 {
@@ -500,77 +341,66 @@ static int __uasminit uasm_rel_higher(long val)
 #endif
 }
 
-int __uasminit uasm_rel_hi(long val)
+int __uasminit ISAFUNC(uasm_rel_hi)(long val)
 {
        return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
 }
-UASM_EXPORT_SYMBOL(uasm_rel_hi);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
 
-int __uasminit uasm_rel_lo(long val)
+int __uasminit ISAFUNC(uasm_rel_lo)(long val)
 {
        return ((val & 0xffff) ^ 0x8000) - 0x8000;
 }
-UASM_EXPORT_SYMBOL(uasm_rel_lo);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
 
-void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
 {
-       if (!uasm_in_compat_space_p(addr)) {
-               uasm_i_lui(buf, rs, uasm_rel_highest(addr));
+       if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
+               ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
                if (uasm_rel_higher(addr))
-                       uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
-               if (uasm_rel_hi(addr)) {
-                       uasm_i_dsll(buf, rs, rs, 16);
-                       uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr));
-                       uasm_i_dsll(buf, rs, rs, 16);
+                       ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
+               if (ISAFUNC(uasm_rel_hi(addr))) {
+                       ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
+                       ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
+                                       ISAFUNC(uasm_rel_hi)(addr));
+                       ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
                } else
-                       uasm_i_dsll32(buf, rs, rs, 0);
+                       ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
        } else
-               uasm_i_lui(buf, rs, uasm_rel_hi(addr));
+               ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
 }
-UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
+UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
 
-void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
+void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
 {
-       UASM_i_LA_mostly(buf, rs, addr);
-       if (uasm_rel_lo(addr)) {
-               if (!uasm_in_compat_space_p(addr))
-                       uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr));
+       ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
+       if (ISAFUNC(uasm_rel_lo(addr))) {
+               if (!ISAFUNC(uasm_in_compat_space_p)(addr))
+                       ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
+                                       ISAFUNC(uasm_rel_lo(addr)));
                else
-                       uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr));
+                       ISAFUNC(uasm_i_addiu)(buf, rs, rs,
+                                       ISAFUNC(uasm_rel_lo(addr)));
        }
 }
-UASM_EXPORT_SYMBOL(UASM_i_LA);
+UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
 
 /* Handle relocations. */
 void __uasminit
-uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
+ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
 {
        (*rel)->addr = addr;
        (*rel)->type = R_MIPS_PC16;
        (*rel)->lab = lid;
        (*rel)++;
 }
-UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
 
 static inline void __uasminit
-__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
-{
-       long laddr = (long)lab->addr;
-       long raddr = (long)rel->addr;
-
-       switch (rel->type) {
-       case R_MIPS_PC16:
-               *rel->addr |= build_bimm(laddr - (raddr + 4));
-               break;
-
-       default:
-               panic("Unsupported Micro-assembler relocation %d",
-                     rel->type);
-       }
-}
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
 
 void __uasminit
-uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
 {
        struct uasm_label *l;
 
@@ -579,40 +409,40 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
                        if (rel->lab == l->lab)
                                __resolve_relocs(rel, l);
 }
-UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
 
 void __uasminit
-uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
+ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
 {
        for (; rel->lab != UASM_LABEL_INVALID; rel++)
                if (rel->addr >= first && rel->addr < end)
                        rel->addr += off;
 }
-UASM_EXPORT_SYMBOL(uasm_move_relocs);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
 
 void __uasminit
-uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
+ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off)
 {
        for (; lab->lab != UASM_LABEL_INVALID; lab++)
                if (lab->addr >= first && lab->addr < end)
                        lab->addr += off;
 }
-UASM_EXPORT_SYMBOL(uasm_move_labels);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
 
 void __uasminit
-uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
+ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
                  u32 *end, u32 *target)
 {
        long off = (long)(target - first);
 
        memcpy(target, first, (end - first) * sizeof(u32));
 
-       uasm_move_relocs(rel, first, end, off);
-       uasm_move_labels(lab, first, end, off);
+       ISAFUNC(uasm_move_relocs(rel, first, end, off));
+       ISAFUNC(uasm_move_labels(lab, first, end, off));
 }
-UASM_EXPORT_SYMBOL(uasm_copy_handler);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
 
-int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
+int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
 {
        for (; rel->lab != UASM_LABEL_INVALID; rel++) {
                if (rel->addr == addr
@@ -623,88 +453,88 @@ int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
 
        return 0;
 }
-UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
 
 /* Convenience functions for labeled branches. */
 void __uasminit
-uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bltz(p, reg, 0);
+       ISAFUNC(uasm_i_bltz)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bltz);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
 
 void __uasminit
-uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
+ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_b(p, 0);
+       ISAFUNC(uasm_i_b)(p, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_b);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
 
 void __uasminit
-uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_beqz(p, reg, 0);
+       ISAFUNC(uasm_i_beqz)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_beqz);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
 
 void __uasminit
-uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_beqzl(p, reg, 0);
+       ISAFUNC(uasm_i_beqzl)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_beqzl);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
 
 void __uasminit
-uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
        unsigned int reg2, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bne(p, reg1, reg2, 0);
+       ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bne);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
 
 void __uasminit
-uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bnez(p, reg, 0);
+       ISAFUNC(uasm_i_bnez)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bnez);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
 
 void __uasminit
-uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bgezl(p, reg, 0);
+       ISAFUNC(uasm_i_bgezl)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bgezl);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
 
 void __uasminit
-uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bgez(p, reg, 0);
+       ISAFUNC(uasm_i_bgez)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bgez);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
 
 void __uasminit
-uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
              unsigned int bit, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bbit0(p, reg, bit, 0);
+       ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bbit0);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
 
 void __uasminit
-uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
              unsigned int bit, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bbit1(p, reg, bit, 0);
+       ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bbit1);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));
index 6079ef33b5f0670e0e3f4de945bd287a821a7381..0388fc8b5613430152a0f097c7e7d36e15aebdc6 100644 (file)
@@ -5,9 +5,8 @@
 # Copyright (C) 2008 Wind River Systems, Inc.
 #   written by Ralf Baechle <ralf@linux-mips.org>
 #
-obj-y                          := malta-amon.o malta-cmdline.o \
-                                  malta-display.o malta-init.o malta-int.o \
-                                  malta-memory.o malta-platform.o \
+obj-y                          := malta-amon.o malta-display.o malta-init.o \
+                                  malta-int.o malta-memory.o malta-platform.o \
                                   malta-reset.o malta-setup.o malta-time.o
 
 obj-$(CONFIG_EARLY_PRINTK)     += malta-console.o
index 5b548b5a4fcf1f5bf116d6dd59e4c42755193c4c..2cc72c9b38e3ba508a1d9b6596da4403f67e6539 100644 (file)
@@ -3,5 +3,9 @@
 #
 platform-$(CONFIG_MIPS_MALTA)  += mti-malta/
 cflags-$(CONFIG_MIPS_MALTA)    += -I$(srctree)/arch/mips/include/asm/mach-malta
-load-$(CONFIG_MIPS_MALTA)      += 0xffffffff80100000
+ifdef CONFIG_KVM_GUEST
+    load-$(CONFIG_MIPS_MALTA)  += 0x0000000040100000
+else
+    load-$(CONFIG_MIPS_MALTA)  += 0xffffffff80100000
+endif
 all-$(CONFIG_MIPS_MALTA)       := $(COMPRESSION_FNAME).bin
diff --git a/arch/mips/mti-malta/malta-cmdline.c b/arch/mips/mti-malta/malta-cmdline.c
deleted file mode 100644 (file)
index 5576a30..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Kernel command line creation using the prom monitor (YAMON) argc/argv.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-
-extern int prom_argc;
-extern int *_prom_argv;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension.
- */
-#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
-
-char * __init prom_getcmdline(void)
-{
-       return &(arcs_cmdline[0]);
-}
-
-
-void  __init prom_init_cmdline(void)
-{
-       char *cp;
-       int actr;
-
-       actr = 1; /* Always ignore argv[0] */
-
-       cp = &(arcs_cmdline[0]);
-       while(actr < prom_argc) {
-               strcpy(cp, prom_argv(actr));
-               cp += strlen(prom_argv(actr));
-               *cp++ = ' ';
-               actr++;
-       }
-       if (cp != &(arcs_cmdline[0])) {
-               /* get rid of trailing space */
-               --cp;
-               *cp = '\0';
-       }
-}
index 9bc58a24e80a637307e13058eab6d54eacf91c88..d4f807191ecd74694075ba3ca2748ff0b80bcbb1 100644 (file)
@@ -1,28 +1,20 @@
 /*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
  * Display routines for display messages in MIPS boards ascii display.
+ *
+ * Copyright (C) 1999,2000,2012  MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
-
 #include <linux/compiler.h>
 #include <linux/timer.h>
-#include <asm/io.h>
+#include <linux/io.h>
+
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 
 extern const char display_string[];
 static unsigned int display_count;
@@ -36,11 +28,11 @@ void mips_display_message(const char *str)
        if (unlikely(display == NULL))
                display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
 
-       for (i = 0; i <= 14; i=i+2) {
-                if (*str)
-                        __raw_writel(*str++, display + i);
-                else
-                        __raw_writel(' ', display + i);
+       for (i = 0; i <= 14; i += 2) {
+               if (*str)
+                       __raw_writel(*str++, display + i);
+               else
+                       __raw_writel(' ', display + i);
        }
 }
 
index c2cbce9e435e8ffc74d25a9cfff61acbc38dc1b4..ff8caffd3266ea0a1deaaa1ee1093eac50b600b9 100644 (file)
@@ -1,54 +1,28 @@
 /*
- * Copyright (C) 1999, 2000, 2004, 2005         MIPS Technologies, Inc.
- *     All rights reserved.
- *     Authors: Carsten Langgaard <carstenl@mips.com>
- *              Maciej W. Rozycki <macro@mips.com>
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
  * PROM library initialisation code.
+ *
+ * Copyright (C) 1999,2000,2004,2005,2012  MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *         Maciej W. Rozycki <macro@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 
-#include <asm/bootinfo.h>
-#include <asm/gt64120.h>
-#include <asm/io.h>
 #include <asm/cacheflush.h>
 #include <asm/smp-ops.h>
 #include <asm/traps.h>
-
+#include <asm/fw/fw.h>
 #include <asm/gcmpregs.h>
-#include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/bonito64.h>
-#include <asm/mips-boards/msc01_pci.h>
-
 #include <asm/mips-boards/malta.h>
 
-int prom_argc;
-int *_prom_argv, *_prom_envp;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension, if running in 64-bit mode.
- */
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-int init_debug;
-
 static int mips_revision_corid;
 int mips_revision_sconid;
 
@@ -62,74 +36,6 @@ unsigned long _pcictrl_gt64120;
 /* MIPS System controller register base */
 unsigned long _pcictrl_msc;
 
-char *prom_getenv(char *envname)
-{
-       /*
-        * Return a pointer to the given environment variable.
-        * In 64-bit mode: we're using 64-bit pointers, but all pointers
-        * in the PROM structures are only 32-bit, so we need some
-        * workarounds, if we are running in 64-bit mode.
-        */
-       int i, index=0;
-
-       i = strlen(envname);
-
-       while (prom_envp(index)) {
-               if(strncmp(envname, prom_envp(index), i) == 0) {
-                       return(prom_envp(index+1));
-               }
-               index += 2;
-       }
-
-       return NULL;
-}
-
-static inline unsigned char str2hexnum(unsigned char c)
-{
-       if (c >= '0' && c <= '9')
-               return c - '0';
-       if (c >= 'a' && c <= 'f')
-               return c - 'a' + 10;
-       return 0; /* foo */
-}
-
-static inline void str2eaddr(unsigned char *ea, unsigned char *str)
-{
-       int i;
-
-       for (i = 0; i < 6; i++) {
-               unsigned char num;
-
-               if((*str == '.') || (*str == ':'))
-                       str++;
-               num = str2hexnum(*str++) << 4;
-               num |= (str2hexnum(*str++));
-               ea[i] = num;
-       }
-}
-
-int get_ethernet_addr(char *ethernet_addr)
-{
-       char *ethaddr_str;
-
-       ethaddr_str = prom_getenv("ethaddr");
-       if (!ethaddr_str) {
-               printk("ethaddr not set in boot prom\n");
-               return -1;
-       }
-       str2eaddr(ethernet_addr, ethaddr_str);
-
-       if (init_debug > 1) {
-               int i;
-               printk("get_ethernet_addr: ");
-               for (i=0; i<5; i++)
-                       printk("%02x:", (unsigned char)*(ethernet_addr+i));
-               printk("%02x\n", *(ethernet_addr+i));
-       }
-
-       return 0;
-}
-
 #ifdef CONFIG_SERIAL_8250_CONSOLE
 static void __init console_config(void)
 {
@@ -138,17 +44,23 @@ static void __init console_config(void)
        char parity = '\0', bits = '\0', flow = '\0';
        char *s;
 
-       if ((strstr(prom_getcmdline(), "console=")) == NULL) {
-               s = prom_getenv("modetty0");
+       if ((strstr(fw_getcmdline(), "console=")) == NULL) {
+               s = fw_getenv("modetty0");
                if (s) {
                        while (*s >= '0' && *s <= '9')
                                baud = baud*10 + *s++ - '0';
-                       if (*s == ',') s++;
-                       if (*s) parity = *s++;
-                       if (*s == ',') s++;
-                       if (*s) bits = *s++;
-                       if (*s == ',') s++;
-                       if (*s == 'h') flow = 'r';
+                       if (*s == ',')
+                               s++;
+                       if (*s)
+                               parity = *s++;
+                       if (*s == ',')
+                               s++;
+                       if (*s)
+                               bits = *s++;
+                       if (*s == ',')
+                               s++;
+                       if (*s == 'h')
+                               flow = 'r';
                }
                if (baud == 0)
                        baud = 38400;
@@ -158,8 +70,9 @@ static void __init console_config(void)
                        bits = '8';
                if (flow == '\0')
                        flow = 'r';
-               sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, parity, bits, flow);
-               strcat(prom_getcmdline(), console_string);
+               sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+                       parity, bits, flow);
+               strcat(fw_getcmdline(), console_string);
                pr_info("Config serial console:%s\n", console_string);
        }
 }
@@ -193,10 +106,6 @@ extern struct plat_smp_ops msmtc_smp_ops;
 
 void __init prom_init(void)
 {
-       prom_argc = fw_arg0;
-       _prom_argv = (int *) fw_arg1;
-       _prom_envp = (int *) fw_arg2;
-
        mips_display_message("LINUX");
 
        /*
@@ -306,7 +215,7 @@ void __init prom_init(void)
        case MIPS_REVISION_SCON_SOCIT:
        case MIPS_REVISION_SCON_ROCIT:
                _pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000);
-       mips_pci_controller:
+mips_pci_controller:
                mb();
                MSC_READ(MSC01_PCI_CFG, data);
                MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT);
@@ -348,13 +257,13 @@ void __init prom_init(void)
        default:
                /* Unknown system controller */
                mips_display_message("SC Error");
-               while (1);   /* We die here... */
+               while (1);      /* We die here... */
        }
        board_nmi_handler_setup = mips_nmi_setup;
        board_ejtag_handler_setup = mips_ejtag_setup;
 
-       prom_init_cmdline();
-       prom_meminit();
+       fw_init_cmdline();
+       fw_meminit();
 #ifdef CONFIG_SERIAL_8250_CONSOLE
        console_config();
 #endif
index e364af70e6cf5cc255a9be51ca569d7cb911dbf0..0a1339ac3ec8f8e6c3d9be6c4dc3b272d7044f98 100644 (file)
@@ -47,7 +47,6 @@
 #include <asm/setup.h>
 
 int gcmp_present = -1;
-int gic_present;
 static unsigned long _msc01_biu_base;
 static unsigned long _gcmp_base;
 static unsigned int ipi_map[NR_CPUS];
@@ -134,6 +133,9 @@ static void malta_ipi_irqdispatch(void)
 {
        int irq;
 
+       if (gic_compare_int())
+               do_IRQ(MIPS_GIC_IRQ_BASE);
+
        irq = gic_get_int();
        if (irq < 0)
                return;  /* interrupt has already been cleared */
index f3d43aa023a9c9215dc06334204c1f82e122ef73..1f73d63e92a765d3ab1d829244a19e57dab8bd8e 100644 (file)
@@ -1,73 +1,45 @@
 /*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
  * PROM library functions for acquiring/using memory descriptors given to
  * us from the YAMON.
+ *
+ * Copyright (C) 1999,2000,2012  MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
 #include <linux/init.h>
-#include <linux/mm.h>
 #include <linux/bootmem.h>
-#include <linux/pfn.h>
 #include <linux/string.h>
 
 #include <asm/bootinfo.h>
-#include <asm/page.h>
 #include <asm/sections.h>
+#include <asm/fw/fw.h>
 
-#include <asm/mips-boards/prom.h>
-
-/*#define DEBUG*/
-
-enum yamon_memtypes {
-       yamon_dontuse,
-       yamon_prom,
-       yamon_free,
-};
-static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
-
-#ifdef DEBUG
-static char *mtypes[3] = {
-       "Dont use memory",
-       "YAMON PROM memory",
-       "Free memory",
-};
-#endif
+static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS];
 
 /* determined physical memory size, not overridden by command line args         */
 unsigned long physical_memsize = 0L;
 
-static struct prom_pmemblock * __init prom_getmdesc(void)
+fw_memblock_t * __init fw_getmdesc(void)
 {
-       char *memsize_str;
+       char *memsize_str, *ptr;
        unsigned int memsize;
-       char *ptr;
        static char cmdline[COMMAND_LINE_SIZE] __initdata;
+       long val;
+       int tmp;
 
        /* otherwise look in the environment */
-       memsize_str = prom_getenv("memsize");
+       memsize_str = fw_getenv("memsize");
        if (!memsize_str) {
-               printk(KERN_WARNING
-                      "memsize not set in boot prom, set to default (32Mb)\n");
+               pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
                physical_memsize = 0x02000000;
        } else {
-#ifdef DEBUG
-               pr_debug("prom_memsize = %s\n", memsize_str);
-#endif
-               physical_memsize = simple_strtol(memsize_str, NULL, 0);
+               tmp = kstrtol(memsize_str, 0, &val);
+               physical_memsize = (unsigned long)val;
        }
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
@@ -90,11 +62,11 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
 
        memset(mdesc, 0, sizeof(mdesc));
 
-       mdesc[0].type = yamon_dontuse;
+       mdesc[0].type = fw_dontuse;
        mdesc[0].base = 0x00000000;
        mdesc[0].size = 0x00001000;
 
-       mdesc[1].type = yamon_prom;
+       mdesc[1].type = fw_code;
        mdesc[1].base = 0x00001000;
        mdesc[1].size = 0x000ef000;
 
@@ -105,55 +77,45 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
         * This mean that this area can't be used as DMA memory for PCI
         * devices.
         */
-       mdesc[2].type = yamon_dontuse;
+       mdesc[2].type = fw_dontuse;
        mdesc[2].base = 0x000f0000;
        mdesc[2].size = 0x00010000;
 
-       mdesc[3].type = yamon_dontuse;
+       mdesc[3].type = fw_dontuse;
        mdesc[3].base = 0x00100000;
-       mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base;
+       mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
+               mdesc[3].base;
 
-       mdesc[4].type = yamon_free;
+       mdesc[4].type = fw_free;
        mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
        mdesc[4].size = memsize - mdesc[4].base;
 
        return &mdesc[0];
 }
 
-static int __init prom_memtype_classify(unsigned int type)
+static int __init fw_memtype_classify(unsigned int type)
 {
        switch (type) {
-       case yamon_free:
+       case fw_free:
                return BOOT_MEM_RAM;
-       case yamon_prom:
+       case fw_code:
                return BOOT_MEM_ROM_DATA;
        default:
                return BOOT_MEM_RESERVED;
        }
 }
 
-void __init prom_meminit(void)
+void __init fw_meminit(void)
 {
-       struct prom_pmemblock *p;
+       fw_memblock_t *p;
 
-#ifdef DEBUG
-       pr_debug("YAMON MEMORY DESCRIPTOR dump:\n");
-       p = prom_getmdesc();
-       while (p->size) {
-               int i = 0;
-               pr_debug("[%d,%p]: base<%08lx> size<%08lx> type<%s>\n",
-                        i, p, p->base, p->size, mtypes[p->type]);
-               p++;
-               i++;
-       }
-#endif
-       p = prom_getmdesc();
+       p = fw_getmdesc();
 
        while (p->size) {
                long type;
                unsigned long base, size;
 
-               type = prom_memtype_classify(p->type);
+               type = fw_memtype_classify(p->type);
                base = p->base;
                size = p->size;
 
@@ -172,7 +134,7 @@ void __init prom_free_prom_memory(void)
                        continue;
 
                addr = boot_mem_map.map[i].addr;
-               free_init_pages("prom memory",
+               free_init_pages("YAMON memory",
                                addr, addr + boot_mem_map.map[i].size);
        }
 }
index 200f64df2c9b492bf6ec948ff03bbfee864f59fa..c72a069367819d1ca8c91532862dbfbd890de1b1 100644 (file)
 #include <linux/screen_info.h>
 #include <linux/time.h>
 
-#include <asm/bootinfo.h>
+#include <asm/fw/fw.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/malta.h>
 #include <asm/mips-boards/maltaint.h>
 #include <asm/dma.h>
 #include <asm/traps.h>
+#include <asm/gcmpregs.h>
 #ifdef CONFIG_VT
 #include <linux/console.h>
 #endif
@@ -105,6 +105,66 @@ static void __init fd_activate(void)
 }
 #endif
 
+static int __init plat_enable_iocoherency(void)
+{
+       int supported = 0;
+       if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
+               if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
+                       BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
+                       pr_info("Enabled Bonito CPU coherency\n");
+                       supported = 1;
+               }
+               if (strstr(fw_getcmdline(), "iobcuncached")) {
+                       BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
+                       BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
+                               ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+                                 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+                       pr_info("Disabled Bonito IOBC coherency\n");
+               } else {
+                       BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
+                       BONITO_PCIMEMBASECFG |=
+                               (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+                                BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+                       pr_info("Enabled Bonito IOBC coherency\n");
+               }
+       } else if (gcmp_niocu() != 0) {
+               /* Nothing special needs to be done to enable coherency */
+               pr_info("CMP IOCU detected\n");
+               if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
+                       pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
+                       return 0;
+               }
+               supported = 1;
+       }
+       hw_coherentio = supported;
+       return supported;
+}
+
+static void __init plat_setup_iocoherency(void)
+{
+#ifdef CONFIG_DMA_NONCOHERENT
+       /*
+        * Kernel has been configured with software coherency
+        * but we might choose to turn it off and use hardware
+        * coherency instead.
+        */
+       if (plat_enable_iocoherency()) {
+               if (coherentio == 0)
+                       pr_info("Hardware DMA cache coherency disabled\n");
+               else
+                       pr_info("Hardware DMA cache coherency enabled\n");
+       } else {
+               if (coherentio == 1)
+                       pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
+               else
+                       pr_info("Software DMA cache coherency enabled\n");
+       }
+#else
+       if (!plat_enable_iocoherency())
+               panic("Hardware DMA cache coherency not supported!");
+#endif
+}
+
 #ifdef CONFIG_BLK_DEV_IDE
 static void __init pci_clock_check(void)
 {
@@ -115,16 +175,15 @@ static void __init pci_clock_check(void)
                33, 20, 25, 30, 12, 16, 37, 10
        };
        int pciclock = pciclocks[jmpr];
-       char *argptr = prom_getcmdline();
+       char *argptr = fw_getcmdline();
 
        if (pciclock != 33 && !strstr(argptr, "idebus=")) {
-               printk(KERN_WARNING "WARNING: PCI clock is %dMHz, "
-                               "setting idebus\n", pciclock);
+               pr_warn("WARNING: PCI clock is %dMHz, setting idebus\n",
+                       pciclock);
                argptr += strlen(argptr);
                sprintf(argptr, " idebus=%d", pciclock);
                if (pciclock < 20 || pciclock > 66)
-                       printk(KERN_WARNING "WARNING: IDE timing "
-                                       "calculations will be incorrect\n");
+                       pr_warn("WARNING: IDE timing calculations will be incorrect\n");
        }
 }
 #endif
@@ -153,31 +212,31 @@ static void __init bonito_quirks_setup(void)
 {
        char *argptr;
 
-       argptr = prom_getcmdline();
+       argptr = fw_getcmdline();
        if (strstr(argptr, "debug")) {
                BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE;
-               printk(KERN_INFO "Enabled Bonito debug mode\n");
+               pr_info("Enabled Bonito debug mode\n");
        } else
                BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
 
 #ifdef CONFIG_DMA_COHERENT
        if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
                BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
-               printk(KERN_INFO "Enabled Bonito CPU coherency\n");
+               pr_info("Enabled Bonito CPU coherency\n");
 
-               argptr = prom_getcmdline();
+               argptr = fw_getcmdline();
                if (strstr(argptr, "iobcuncached")) {
                        BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
                        BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
                                ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
                                        BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
-                       printk(KERN_INFO "Disabled Bonito IOBC coherency\n");
+                       pr_info("Disabled Bonito IOBC coherency\n");
                } else {
                        BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
                        BONITO_PCIMEMBASECFG |=
                                (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
                                        BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
-                       printk(KERN_INFO "Enabled Bonito IOBC coherency\n");
+                       pr_info("Enabled Bonito IOBC coherency\n");
                }
        } else
                panic("Hardware DMA cache coherency not supported");
@@ -207,6 +266,8 @@ void __init plat_mem_setup(void)
        if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
                bonito_quirks_setup();
 
+       plat_setup_iocoherency();
+
 #ifdef CONFIG_BLK_DEV_IDE
        pci_clock_check();
 #endif
index a144b89cf9ba299e5f9fb2bcea5fd179f5ceb413..0ad305f75802bc64250ad01be6e76b1826ea95a2 100644 (file)
 #include <asm/gic.h>
 
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
-
 #include <asm/mips-boards/maltaint.h>
 
 unsigned long cpu_khz;
-int gic_frequency;
 
 static int mips_cpu_timer_irq;
 static int mips_cpu_perf_irq;
@@ -74,7 +71,24 @@ static void __init estimate_frequencies(void)
 {
        unsigned long flags;
        unsigned int count, start;
+#ifdef CONFIG_IRQ_GIC
        unsigned int giccount = 0, gicstart = 0;
+#endif
+
+#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
+       unsigned int prid = read_c0_prid() & 0xffff00;
+
+       /*
+        * XXXKYMA: hardwire the CPU frequency to Host Freq/4
+        */
+       count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3;
+       if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
+           (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
+               count *= 2;
+
+       mips_hpt_frequency = count;
+       return;
+#endif
 
        local_irq_save(flags);
 
@@ -84,26 +98,32 @@ static void __init estimate_frequencies(void)
 
        /* Initialize counters. */
        start = read_c0_count();
+#ifdef CONFIG_IRQ_GIC
        if (gic_present)
                GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart);
+#endif
 
        /* Read counter exactly on falling edge of update flag. */
        while (CMOS_READ(RTC_REG_A) & RTC_UIP);
        while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
 
        count = read_c0_count();
+#ifdef CONFIG_IRQ_GIC
        if (gic_present)
                GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount);
+#endif
 
        local_irq_restore(flags);
 
        count -= start;
-       if (gic_present)
-               giccount -= gicstart;
-
        mips_hpt_frequency = count;
-       if (gic_present)
+
+#ifdef CONFIG_IRQ_GIC
+       if (gic_present) {
+               giccount -= gicstart;
                gic_frequency = giccount;
+       }
+#endif
 }
 
 void read_persistent_clock(struct timespec *ts)
@@ -159,24 +179,27 @@ void __init plat_time_init(void)
            (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
                freq *= 2;
        freq = freqround(freq, 5000);
-       pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000,
+       printk("CPU frequency %d.%02d MHz\n", freq/1000000,
               (freq%1000000)*100/1000000);
        cpu_khz = freq / 1000;
 
-       if (gic_present) {
-               freq = freqround(gic_frequency, 5000);
-               pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000,
-                      (freq%1000000)*100/1000000);
-               gic_clocksource_init(gic_frequency);
-       } else
-               init_r4k_clocksource();
+       mips_scroll_message();
 
 #ifdef CONFIG_I8253
        /* Only Malta has a PIT. */
        setup_pit_timer();
 #endif
 
-       mips_scroll_message();
+#ifdef CONFIG_IRQ_GIC
+       if (gic_present) {
+               freq = freqround(gic_frequency, 5000);
+               printk("GIC frequency %d.%02d MHz\n", freq/1000000,
+                      (freq%1000000)*100/1000000);
+#ifdef CONFIG_CSRC_GIC
+               gic_clocksource_init(gic_frequency);
+#endif
+       }
+#endif
 
        plat_perf_setup();
 }
index 10ec701ce6c72e8cd056fd8ac6fb55b19608d8c8..be114209217cc5a13fe8531365ba5454b12c530f 100644 (file)
@@ -8,10 +8,10 @@
 # Copyright (C) 2012 MIPS Technoligies, Inc.  All rights reserved.
 # Steven J. Hill <sjhill@mips.com>
 #
-obj-y                          := sead3-lcd.o sead3-cmdline.o \
-                                  sead3-display.o sead3-init.o sead3-int.o \
-                                  sead3-mtd.o sead3-net.o sead3-platform.o \
-                                  sead3-reset.o sead3-setup.o sead3-time.o
+obj-y                          := sead3-lcd.o sead3-display.o sead3-init.o \
+                                  sead3-int.o sead3-mtd.o sead3-net.o \
+                                  sead3-platform.o sead3-reset.o \
+                                  sead3-setup.o sead3-time.o
 
 obj-y                          += sead3-i2c-dev.o sead3-i2c.o \
                                   sead3-pic32-i2c-drv.o sead3-pic32-bus.o \
index 322148c353ed65065bd146f05de3ce27198382ca..0a168c948b01a9e593c5c4cc7cce7679a86c618b 100644 (file)
@@ -34,33 +34,15 @@ static void sead3_fled_set(struct led_classdev *led_cdev,
 static struct led_classdev sead3_pled = {
        .name           = "sead3::pled",
        .brightness_set = sead3_pled_set,
+       .flags          = LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev sead3_fled = {
        .name           = "sead3::fled",
        .brightness_set = sead3_fled_set,
+       .flags          = LED_CORE_SUSPENDRESUME,
 };
 
-#ifdef CONFIG_PM
-static int sead3_led_suspend(struct platform_device *dev,
-               pm_message_t state)
-{
-       led_classdev_suspend(&sead3_pled);
-       led_classdev_suspend(&sead3_fled);
-       return 0;
-}
-
-static int sead3_led_resume(struct platform_device *dev)
-{
-       led_classdev_resume(&sead3_pled);
-       led_classdev_resume(&sead3_fled);
-       return 0;
-}
-#else
-#define sead3_led_suspend NULL
-#define sead3_led_resume NULL
-#endif
-
 static int sead3_led_probe(struct platform_device *pdev)
 {
        int ret;
@@ -86,8 +68,6 @@ static int sead3_led_remove(struct platform_device *pdev)
 static struct platform_driver sead3_led_driver = {
        .probe          = sead3_led_probe,
        .remove         = sead3_led_remove,
-       .suspend        = sead3_led_suspend,
-       .resume         = sead3_led_resume,
        .driver         = {
                .name           = DRVNAME,
                .owner          = THIS_MODULE,
diff --git a/arch/mips/mti-sead3/sead3-cmdline.c b/arch/mips/mti-sead3/sead3-cmdline.c
deleted file mode 100644 (file)
index a2e6cec..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-
-extern int prom_argc;
-extern int *_prom_argv;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension.
- */
-#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
-
-char * __init prom_getcmdline(void)
-{
-       return &(arcs_cmdline[0]);
-}
-
-void  __init prom_init_cmdline(void)
-{
-       char *cp;
-       int actr;
-
-       actr = 1; /* Always ignore argv[0] */
-
-       cp = &(arcs_cmdline[0]);
-       while (actr < prom_argc) {
-               strcpy(cp, prom_argv(actr));
-               cp += strlen(prom_argv(actr));
-               *cp++ = ' ';
-               actr++;
-       }
-       if (cp != &(arcs_cmdline[0])) {
-               /* get rid of trailing space */
-               --cp;
-               *cp = '\0';
-       }
-}
index 2ddef19a9adc891b898188f8fae38e692309073a..031f47d6977006451f514f3aa1b72d0219f1821c 100644 (file)
@@ -26,7 +26,7 @@ static inline void serial_out(int offset, int value, unsigned int base_addr)
        __raw_writel(value, PORT(base_addr, offset));
 }
 
-void __init prom_init_early_console(char port)
+void __init fw_init_early_console(char port)
 {
        console_port = port;
 }
index e389326cfa423add03614ec7baa10b1ed529c7e2..94875991907bd4c3bfda8ac32b825a3513e7a4e8 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/timer.h>
 #include <linux/io.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 
 static unsigned int display_count;
 static unsigned int max_display_count;
index f95abaa1aa5db6a06b4d8b80696c99120ed48711..bfbd17b120a21d637888a65db614df206de41043 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/traps.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
-
-extern void prom_init_early_console(char port);
+#include <asm/fw/fw.h>
 
 extern char except_vec_nmi;
 extern char except_vec_ejtag_debug;
 
-int prom_argc;
-int *_prom_argv, *_prom_envp;
-
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-char *prom_getenv(char *envname)
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static void __init console_config(void)
 {
-       /*
-        * Return a pointer to the given environment variable.
-        * In 64-bit mode: we're using 64-bit pointers, but all pointers
-        * in the PROM structures are only 32-bit, so we need some
-        * workarounds, if we are running in 64-bit mode.
-        */
-       int i, index = 0;
-
-       i = strlen(envname);
-
-       while (prom_envp(index)) {
-               if (strncmp(envname, prom_envp(index), i) == 0)
-                       return prom_envp(index+1);
-               index += 2;
+       char console_string[40];
+       int baud = 0;
+       char parity = '\0', bits = '\0', flow = '\0';
+       char *s;
+
+       if ((strstr(fw_getcmdline(), "console=")) == NULL) {
+               s = fw_getenv("modetty0");
+               if (s) {
+                       while (*s >= '0' && *s <= '9')
+                               baud = baud*10 + *s++ - '0';
+                       if (*s == ',')
+                               s++;
+                       if (*s)
+                               parity = *s++;
+                       if (*s == ',')
+                               s++;
+                       if (*s)
+                               bits = *s++;
+                       if (*s == ',')
+                               s++;
+                       if (*s == 'h')
+                               flow = 'r';
+               }
+               if (baud == 0)
+                       baud = 38400;
+               if (parity != 'n' && parity != 'o' && parity != 'e')
+                       parity = 'n';
+               if (bits != '7' && bits != '8')
+                       bits = '8';
+               if (flow == '\0')
+                       flow = 'r';
+               sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+                       parity, bits, flow);
+               strcat(fw_getcmdline(), console_string);
        }
-
-       return NULL;
 }
+#endif
 
 static void __init mips_nmi_setup(void)
 {
@@ -52,7 +65,41 @@ static void __init mips_nmi_setup(void)
        base = cpu_has_veic ?
                (void *)(CAC_BASE + 0xa80) :
                (void *)(CAC_BASE + 0x380);
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * Decrement the exception vector address by one for microMIPS.
+        */
+       memcpy(base, (&except_vec_nmi - 1), 0x80);
+
+       /*
+        * This is a hack. We do not know if the boot loader was built with
+        * microMIPS instructions or not. If it was not, the NMI exception
+        * code at 0x80000a80 will be taken in MIPS32 mode. The hand coded
+        * assembly below forces us into microMIPS mode if we are a pure
+        * microMIPS kernel. The assembly instructions are:
+        *
+        *  3C1A8000   lui       k0,0x8000
+        *  375A0381   ori       k0,k0,0x381
+        *  03400008   jr        k0
+        *  00000000   nop
+        *
+        * The mode switch occurs by jumping to the unaligned exception
+        * vector address at 0x80000381 which would have been 0x80000380
+        * in MIPS32 mode. The jump to the unaligned address transitions
+        * us into microMIPS mode.
+        */
+       if (!cpu_has_veic) {
+               void *base2 = (void *)(CAC_BASE + 0xa80);
+               *((unsigned int *)base2) = 0x3c1a8000;
+               *((unsigned int *)base2 + 1) = 0x375a0381;
+               *((unsigned int *)base2 + 2) = 0x03400008;
+               *((unsigned int *)base2 + 3) = 0x00000000;
+               flush_icache_range((unsigned long)base2,
+                       (unsigned long)base2 + 0x10);
+       }
+#else
        memcpy(base, &except_vec_nmi, 0x80);
+#endif
        flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
 }
 
@@ -63,29 +110,40 @@ static void __init mips_ejtag_setup(void)
        base = cpu_has_veic ?
                (void *)(CAC_BASE + 0xa00) :
                (void *)(CAC_BASE + 0x300);
+#ifdef CONFIG_CPU_MICROMIPS
+       /* Deja vu... */
+       memcpy(base, (&except_vec_ejtag_debug - 1), 0x80);
+       if (!cpu_has_veic) {
+               void *base2 = (void *)(CAC_BASE + 0xa00);
+               *((unsigned int *)base2) = 0x3c1a8000;
+               *((unsigned int *)base2 + 1) = 0x375a0301;
+               *((unsigned int *)base2 + 2) = 0x03400008;
+               *((unsigned int *)base2 + 3) = 0x00000000;
+               flush_icache_range((unsigned long)base2,
+                       (unsigned long)base2 + 0x10);
+       }
+#else
        memcpy(base, &except_vec_ejtag_debug, 0x80);
+#endif
        flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
 }
 
 void __init prom_init(void)
 {
-       prom_argc = fw_arg0;
-       _prom_argv = (int *) fw_arg1;
-       _prom_envp = (int *) fw_arg2;
-
        board_nmi_handler_setup = mips_nmi_setup;
        board_ejtag_handler_setup = mips_ejtag_setup;
 
-       prom_init_cmdline();
+       fw_init_cmdline();
 #ifdef CONFIG_EARLY_PRINTK
-       if ((strstr(prom_getcmdline(), "console=ttyS0")) != NULL)
-               prom_init_early_console(0);
-       else if ((strstr(prom_getcmdline(), "console=ttyS1")) != NULL)
-               prom_init_early_console(1);
+       if ((strstr(fw_getcmdline(), "console=ttyS0")) != NULL)
+               fw_init_early_console(0);
+       else if ((strstr(fw_getcmdline(), "console=ttyS1")) != NULL)
+               fw_init_early_console(1);
 #endif
 #ifdef CONFIG_SERIAL_8250_CONSOLE
-       if ((strstr(prom_getcmdline(), "console=")) == NULL)
-               strcat(prom_getcmdline(), " console=ttyS0,38400n8r");
+       if ((strstr(fw_getcmdline(), "console=")) == NULL)
+               strcat(fw_getcmdline(), " console=ttyS0,38400n8r");
+       console_config();
 #endif
 }
 
index e26e08274fc5b32a898ccdc40c8c346ec2a9d7d8..6a560ac03def0db5f2a794529998cf5e39ad1887 100644 (file)
@@ -20,7 +20,6 @@
 #define SEAD_CONFIG_BASE               0x1b100110
 #define SEAD_CONFIG_SIZE               4
 
-int gic_present;
 static unsigned long sead3_config_reg;
 
 /*
index f012fd164ceedc9e2aed9a94ebb30cc2bdc602c2..b5059dc899f44d2616a4c72c873fe82819474c09 100644 (file)
 #include <linux/bootmem.h>
 
 #include <asm/mips-boards/generic.h>
-#include <asm/prom.h>
-
-int coherentio;                /* 0 => no DMA cache coherency (may be set by user) */
-int hw_coherentio;     /* 0 => no HW DMA cache coherency (reflects real HW) */
 
 const char *get_system_type(void)
 {
index 239e4e32757fc566f8fb96b4366806531ba87aa7..96b42eb9b5e221cccdc4cf861fe48f5455cb4d0f 100644 (file)
@@ -11,7 +11,6 @@
 #include <asm/time.h>
 #include <asm/irq.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 
 unsigned long cpu_khz;
 
index 3c05bf9e280ae5e66ea1e8ae3cfc96179a610d8b..e0873a31ebaace03336502d8bf57087fca3375b9 100644 (file)
@@ -2,13 +2,22 @@ if NLM_XLP_BOARD || NLM_XLR_BOARD
 
 if NLM_XLP_BOARD
 config DT_XLP_EVP
-       bool "Built-in device tree for XLP EVP/SVP boards"
+       bool "Built-in device tree for XLP EVP boards"
        default y
        help
-         Add an FDT blob for XLP EVP and SVP boards into the kernel.
+         Add an FDT blob for XLP EVP boards into the kernel.
          This DTB will be used if the firmware does not pass in a DTB
-          pointer to the kernel.  The corresponding DTS file is at
-          arch/mips/netlogic/dts/xlp_evp.dts
+         pointer to the kernel.  The corresponding DTS file is at
+         arch/mips/netlogic/dts/xlp_evp.dts
+
+config DT_XLP_SVP
+       bool "Built-in device tree for XLP SVP boards"
+       default y
+       help
+         Add an FDT blob for XLP VP boards into the kernel.
+         This DTB will be used if the firmware does not pass in a DTB
+         pointer to the kernel.  The corresponding DTS file is at
+         arch/mips/netlogic/dts/xlp_svp.dts
 
 config NLM_MULTINODE
        bool "Support for multi-chip boards"
index 2bb95dcfe20addf631ec84d15267563c30fe26e2..ffba52489bef7b52a1dcb996cb911c277359a45d 100644 (file)
@@ -148,8 +148,7 @@ void nlm_cpus_done(void)
 int nlm_cpu_ready[NR_CPUS];
 unsigned long nlm_next_gp;
 unsigned long nlm_next_sp;
-
-cpumask_t phys_cpu_present_map;
+static cpumask_t phys_cpu_present_mask;
 
 void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
 {
@@ -169,11 +168,12 @@ void __init nlm_smp_setup(void)
 {
        unsigned int boot_cpu;
        int num_cpus, i, ncore;
+       char buf[64];
 
        boot_cpu = hard_smp_processor_id();
-       cpumask_clear(&phys_cpu_present_map);
+       cpumask_clear(&phys_cpu_present_mask);
 
-       cpumask_set_cpu(boot_cpu, &phys_cpu_present_map);
+       cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
        __cpu_number_map[boot_cpu] = 0;
        __cpu_logical_map[0] = boot_cpu;
        set_cpu_possible(0, true);
@@ -185,7 +185,7 @@ void __init nlm_smp_setup(void)
                 * it is only set for ASPs (see smpboot.S)
                 */
                if (nlm_cpu_ready[i]) {
-                       cpumask_set_cpu(i, &phys_cpu_present_map);
+                       cpumask_set_cpu(i, &phys_cpu_present_mask);
                        __cpu_number_map[i] = num_cpus;
                        __cpu_logical_map[num_cpus] = i;
                        set_cpu_possible(num_cpus, true);
@@ -193,16 +193,19 @@ void __init nlm_smp_setup(void)
                }
        }
 
+       cpumask_scnprintf(buf, ARRAY_SIZE(buf), &phys_cpu_present_mask);
+       pr_info("Physical CPU mask: %s\n", buf);
+       cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask);
+       pr_info("Possible CPU mask: %s\n", buf);
+
        /* check with the cores we have worken up */
        for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
                ncore += hweight32(nlm_get_node(i)->coremask);
 
-       pr_info("Phys CPU present map: %lx, possible map %lx\n",
-               (unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
-               (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
-
        pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
                nlm_threads_per_core, num_cpus);
+
+       /* switch NMI handler to boot CPUs */
        nlm_set_nmi_handler(nlm_boot_secondary_cpus);
 }
 
index d117d46413aa29d9b03806935c3d206f0e4f6b29..aecb6fa9a9c33968a187c872672d835c5ee39845 100644 (file)
@@ -1 +1,2 @@
 obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o
+obj-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb.o
index 7628b5464fc76161bf84f5192221b2e9716c1478..e14f4230806427bfb481d4e46d7a757cd2c82114 100644 (file)
@@ -20,7 +20,7 @@ soc {
                #address-cells = <2>;
                #size-cells = <1>;
                compatible = "simple-bus";
-               ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
+               ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
                          1 0  0 0x16000000  0x01000000>; // GBU chipselects
 
                serial0: serial@30000 {
diff --git a/arch/mips/netlogic/dts/xlp_svp.dts b/arch/mips/netlogic/dts/xlp_svp.dts
new file mode 100644 (file)
index 0000000..8af4bdb
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * XLP3XX Device Tree Source for SVP boards
+ */
+
+/dts-v1/;
+/ {
+       model = "netlogic,XLP-SVP";
+       compatible = "netlogic,xlp";
+       #address-cells = <2>;
+       #size-cells = <2>;
+
+       memory {
+               device_type = "memory";
+               reg =  <0 0x00100000 0 0x0FF00000       // 255M at 1M
+                       0 0x20000000 0 0xa0000000       // 2560M at 512M
+                       0 0xe0000000 0 0x40000000>;
+       };
+
+       soc {
+               #address-cells = <2>;
+               #size-cells = <1>;
+               compatible = "simple-bus";
+               ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
+                         1 0  0 0x16000000  0x01000000>; // GBU chipselects
+
+               serial0: serial@30000 {
+                       device_type = "serial";
+                       compatible = "ns16550";
+                       reg = <0 0x30100 0xa00>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clock-frequency = <133333333>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <17>;
+               };
+               serial1: serial@31000 {
+                       device_type = "serial";
+                       compatible = "ns16550";
+                       reg = <0 0x31100 0xa00>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clock-frequency = <133333333>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <18>;
+               };
+               i2c0: ocores@32000 {
+                       compatible = "opencores,i2c-ocores";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0 0x32100 0xa00>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clock-frequency = <32000000>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <30>;
+               };
+               i2c1: ocores@33000 {
+                       compatible = "opencores,i2c-ocores";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0 0x33100 0xa00>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clock-frequency = <32000000>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <31>;
+
+                       rtc@68 {
+                               compatible = "dallas,ds1374";
+                               reg = <0x68>;
+                       };
+
+                       dtt@4c {
+                               compatible = "national,lm90";
+                               reg = <0x4c>;
+                       };
+               };
+               pic: pic@4000 {
+                       interrupt-controller;
+                       #address-cells = <0>;
+                       #interrupt-cells = <1>;
+                       reg = <0 0x4000 0x200>;
+               };
+
+               nor_flash@1,0 {
+                       compatible = "cfi-flash";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       bank-width = <2>;
+                       reg = <1 0 0x1000000>;
+
+                       partition@0 {
+                               label = "x-loader";
+                               reg = <0x0 0x100000>; /* 1M */
+                               read-only;
+                       };
+
+                       partition@100000 {
+                               label = "u-boot";
+                               reg = <0x100000 0x100000>; /* 1M */
+                       };
+
+                       partition@200000 {
+                               label = "kernel";
+                               reg = <0x200000 0x500000>; /* 5M */
+                       };
+
+                       partition@700000 {
+                               label = "rootfs";
+                               reg = <0x700000 0x800000>; /* 8M */
+                       };
+
+                       partition@f00000 {
+                               label = "env";
+                               reg = <0xf00000 0x100000>; /* 1M */
+                               read-only;
+                       };
+               };
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
+       };
+};
index c68fd4026104b7ed4db04d21ae8d86753fa71266..87560e4db35f148e87028b6ce34fcaf1485177bf 100644 (file)
@@ -61,43 +61,61 @@ void nlm_node_init(int node)
 
 int nlm_irq_to_irt(int irq)
 {
-       if (!PIC_IRQ_IS_IRT(irq))
-               return -1;
+       uint64_t pcibase;
+       int devoff, irt;
 
        switch (irq) {
        case PIC_UART_0_IRQ:
-               return PIC_IRT_UART_0_INDEX;
+               devoff = XLP_IO_UART0_OFFSET(0);
+               break;
        case PIC_UART_1_IRQ:
-               return PIC_IRT_UART_1_INDEX;
-       case PIC_PCIE_LINK_0_IRQ:
-              return PIC_IRT_PCIE_LINK_0_INDEX;
-       case PIC_PCIE_LINK_1_IRQ:
-              return PIC_IRT_PCIE_LINK_1_INDEX;
-       case PIC_PCIE_LINK_2_IRQ:
-              return PIC_IRT_PCIE_LINK_2_INDEX;
-       case PIC_PCIE_LINK_3_IRQ:
-              return PIC_IRT_PCIE_LINK_3_INDEX;
+               devoff = XLP_IO_UART1_OFFSET(0);
+               break;
        case PIC_EHCI_0_IRQ:
-              return PIC_IRT_EHCI_0_INDEX;
+               devoff = XLP_IO_USB_EHCI0_OFFSET(0);
+               break;
        case PIC_EHCI_1_IRQ:
-              return PIC_IRT_EHCI_1_INDEX;
+               devoff = XLP_IO_USB_EHCI1_OFFSET(0);
+               break;
        case PIC_OHCI_0_IRQ:
-              return PIC_IRT_OHCI_0_INDEX;
+               devoff = XLP_IO_USB_OHCI0_OFFSET(0);
+               break;
        case PIC_OHCI_1_IRQ:
-              return PIC_IRT_OHCI_1_INDEX;
+               devoff = XLP_IO_USB_OHCI1_OFFSET(0);
+               break;
        case PIC_OHCI_2_IRQ:
-              return PIC_IRT_OHCI_2_INDEX;
+               devoff = XLP_IO_USB_OHCI2_OFFSET(0);
+               break;
        case PIC_OHCI_3_IRQ:
-              return PIC_IRT_OHCI_3_INDEX;
+               devoff = XLP_IO_USB_OHCI3_OFFSET(0);
+               break;
        case PIC_MMC_IRQ:
-              return PIC_IRT_MMC_INDEX;
+               devoff = XLP_IO_SD_OFFSET(0);
+               break;
        case PIC_I2C_0_IRQ:
-               return PIC_IRT_I2C_0_INDEX;
+               devoff = XLP_IO_I2C0_OFFSET(0);
+               break;
        case PIC_I2C_1_IRQ:
-               return PIC_IRT_I2C_1_INDEX;
+               devoff = XLP_IO_I2C1_OFFSET(0);
+               break;
        default:
-               return -1;
+               devoff = 0;
+               break;
        }
+
+       if (devoff != 0) {
+               pcibase = nlm_pcicfg_base(devoff);
+               irt = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG) & 0xffff;
+               /* HW bug, I2C 1 irt entry is off by one */
+               if (irq == PIC_I2C_1_IRQ)
+                       irt = irt + 1;
+       } else if (irq >= PIC_PCIE_LINK_0_IRQ && irq <= PIC_PCIE_LINK_3_IRQ) {
+               /* HW bug, PCI IRT entries are bad on early silicon, fix */
+               irt = PIC_IRT_PCIE_LINK_INDEX(irq - PIC_PCIE_LINK_0_IRQ);
+       } else {
+               irt = -1;
+       }
+       return irt;
 }
 
 unsigned int nlm_get_core_frequency(int node, int core)
index 4894d62043ac3639ce7844069995466ced103ea0..eaa99d28cb8eddbf1fe0311740f22054bb4e15ad 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/pm.h>
 #include <linux/bootmem.h>
 
+#include <asm/idle.h>
 #include <asm/reboot.h>
 #include <asm/time.h>
 #include <asm/bootinfo.h>
@@ -56,7 +57,7 @@ uint64_t nlm_io_base;
 struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
 cpumask_t nlm_cpumask = CPU_MASK_CPU0;
 unsigned int nlm_threads_per_core;
-extern u32 __dtb_start[];
+extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[];
 
 static void nlm_linux_exit(void)
 {
@@ -82,8 +83,24 @@ void __init plat_mem_setup(void)
         * 64-bit, so convert pointer.
         */
        fdtp = (void *)(long)fw_arg0;
-       if (!fdtp)
-               fdtp = __dtb_start;
+       if (!fdtp) {
+               switch (current_cpu_data.processor_id & 0xff00) {
+#ifdef CONFIG_DT_XLP_SVP
+               case PRID_IMP_NETLOGIC_XLP3XX:
+                       fdtp = __dtb_xlp_svp_begin;
+                       break;
+#endif
+#ifdef CONFIG_DT_XLP_EVP
+               case PRID_IMP_NETLOGIC_XLP8XX:
+                       fdtp = __dtb_xlp_evp_begin;
+                       break;
+#endif
+               default:
+                       /* Pick a built-in if any, and hope for the best */
+                       fdtp = __dtb_start;
+                       break;
+               }
+       }
        fdtp = phys_to_virt(__pa(fdtp));
        early_init_devtree(fdtp);
 }
index 1d0b66c62fd1beed8f4f601a380170bcb16c17df..9c401dd78337384d1d95050c26ad3b38935d3f50 100644 (file)
 #include <asm/netlogic/haldefs.h>
 #include <asm/netlogic/xlp-hal/iomap.h>
 #include <asm/netlogic/xlp-hal/xlp.h>
-#include <asm/netlogic/xlp-hal/usb.h>
+
+/*
+ * USB glue logic registers, used only during initialization
+ */
+#define USB_CTL_0                      0x01
+#define USB_PHY_0                      0x0A
+#define USB_PHY_RESET                  0x01
+#define USB_PHY_PORT_RESET_0           0x10
+#define USB_PHY_PORT_RESET_1           0x20
+#define USB_CONTROLLER_RESET           0x01
+#define USB_INT_STATUS                 0x0E
+#define USB_INT_EN                     0x0F
+#define USB_PHY_INTERRUPT_EN           0x01
+#define USB_OHCI_INTERRUPT_EN          0x02
+#define USB_OHCI_INTERRUPT1_EN         0x04
+#define USB_OHCI_INTERRUPT2_EN         0x08
+#define USB_CTRL_INTERRUPT_EN          0x10
+
+#define nlm_read_usb_reg(b, r)                 nlm_read_reg(b, r)
+#define nlm_write_usb_reg(b, r, v)             nlm_write_reg(b, r, v)
+#define nlm_get_usb_pcibase(node, inst)                \
+       nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
+#define nlm_get_usb_regbase(node, inst)                \
+       (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
 
 static void nlm_usb_intr_en(int node, int port)
 {
@@ -99,23 +122,23 @@ static void nlm_usb_fixup_final(struct pci_dev *dev)
        dev->dev.coherent_dma_mask      = DMA_BIT_MASK(64);
        switch (dev->devfn) {
        case 0x10:
-              dev->irq = PIC_EHCI_0_IRQ;
-              break;
+               dev->irq = PIC_EHCI_0_IRQ;
+               break;
        case 0x11:
-              dev->irq = PIC_OHCI_0_IRQ;
-              break;
+               dev->irq = PIC_OHCI_0_IRQ;
+               break;
        case 0x12:
-              dev->irq = PIC_OHCI_1_IRQ;
-              break;
+               dev->irq = PIC_OHCI_1_IRQ;
+               break;
        case 0x13:
-              dev->irq = PIC_EHCI_1_IRQ;
-              break;
+               dev->irq = PIC_EHCI_1_IRQ;
+               break;
        case 0x14:
-              dev->irq = PIC_OHCI_2_IRQ;
-              break;
+               dev->irq = PIC_OHCI_2_IRQ;
+               break;
        case 0x15:
-              dev->irq = PIC_OHCI_3_IRQ;
-              break;
+               dev->irq = PIC_OHCI_3_IRQ;
+               break;
        }
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI,
index e3e094100e3e51f2ea879431f85a9512d84e22a4..89c8c1066632b5f2800c1a4f2ecaef212ac890cf 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/serial_8250.h>
 #include <linux/pm.h>
 
+#include <asm/idle.h>
 #include <asm/reboot.h>
 #include <asm/time.h>
 #include <asm/bootinfo.h>
index 1fd361462c030b7fefd06c676f54129ff4fe63f6..e4b1140cdae060dca0de8bfdd6a5985b1429de58 100644 (file)
@@ -41,7 +41,7 @@ static int (*save_perf_irq)(void);
  * first hardware thread in the core for setup and init.
  * Skip CPUs with non-zero hardware thread id (4 hwt per core)
  */
-#ifdef CONFIG_CPU_XLR
+#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
 #define oprofile_skip_cpu(c)   ((cpu_logical_map(c) & 0x3) != 0)
 #else
 #define oprofile_skip_cpu(c)   0
index 412ec025cf55643d528ca28116669a519f8ee182..18517dd0f7090987fe182df3d2a4dbe4e62842cf 100644 (file)
@@ -366,9 +366,9 @@ static int ar71xx_pci_probe(struct platform_device *pdev)
        if (!res)
                return -EINVAL;
 
-       apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (!apc->cfg_base)
-               return -ENOMEM;
+       apc->cfg_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apc->cfg_base))
+               return PTR_ERR(apc->cfg_base);
 
        apc->irq = platform_get_irq(pdev, 0);
        if (apc->irq < 0)
index 8a0700d448fe45c65cb9063e37e7453dc1e05931..65ec032fa0b442367c93b70cf8599fb5bd03fdd1 100644 (file)
@@ -365,25 +365,25 @@ static int ar724x_pci_probe(struct platform_device *pdev)
        if (!res)
                return -EINVAL;
 
-       apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (apc->ctrl_base == NULL)
-               return -EBUSY;
+       apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apc->ctrl_base))
+               return PTR_ERR(apc->ctrl_base);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
        if (!res)
                return -EINVAL;
 
-       apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (!apc->devcfg_base)
-               return -EBUSY;
+       apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apc->devcfg_base))
+               return PTR_ERR(apc->devcfg_base);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
        if (!res)
                return -EINVAL;
 
-       apc->crp_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (apc->crp_base == NULL)
-               return -EBUSY;
+       apc->crp_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apc->crp_base))
+               return PTR_ERR(apc->crp_base);
 
        apc->irq = platform_get_irq(pdev, 0);
        if (apc->irq < 0)
index 88e781c6b5bab274b53f76c55b7f51c2356b1bc6..2eb954239bc5bbc3b1edac6325be44f287d76235 100644 (file)
@@ -121,11 +121,17 @@ void __iomem *pci_iospace_start;
 static void __init bcm63xx_reset_pcie(void)
 {
        u32 val;
+       u32 reg;
 
        /* enable SERDES */
-       val = bcm_misc_readl(MISC_SERDES_CTRL_REG);
+       if (BCMCPU_IS_6328())
+               reg = MISC_SERDES_CTRL_6328_REG;
+       else
+               reg = MISC_SERDES_CTRL_6362_REG;
+
+       val = bcm_misc_readl(reg);
        val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN;
-       bcm_misc_writel(val, MISC_SERDES_CTRL_REG);
+       bcm_misc_writel(val, reg);
 
        /* reset the PCIe core */
        bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1);
@@ -330,6 +336,7 @@ static int __init bcm63xx_pci_init(void)
 
        switch (bcm63xx_get_cpu_id()) {
        case BCM6328_CPU_ID:
+       case BCM6362_CPU_ID:
                return bcm63xx_register_pcie();
        case BCM6348_CPU_ID:
        case BCM6358_CPU_ID:
index 0edb89a6351610480833ee81d5e996b45ee3ea45..1c98975316604aa27517afec87cb450bf459d6bd 100644 (file)
@@ -83,7 +83,7 @@ static inline unsigned char str2hexnum(unsigned char c)
        return 0; /* foo */
 }
 
-static inline int str2eaddr(unsigned char *ea, unsigned char *str)
+int str2eaddr(unsigned char *ea, unsigned char *str)
 {
        int index = 0;
        unsigned char num = 0;
index 1651cfdbfe7b1303c03b629171190e8df013ad16..396b2967ad856bb974da6ff8055d27037cd9b654 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <asm/bootinfo.h>
 #include <asm/cacheflush.h>
+#include <asm/idle.h>
 #include <asm/r4kcache.h>
 #include <asm/reboot.h>
 #include <asm/smp-ops.h>
index 5bd9d8f468cc3582ab02114ff272b833d8893565..a01baff52cae0ab1ad652574843049c6cf640097 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/traps.h>
 
-#include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/generic.h>
 #include <asm/mach-powertv/asic.h>
 
+#include "init.h"
+
 static int *_prom_envp;
 unsigned long _prom_memsize;
 
index b194c34ca9660eda535d77cc758f79868cdbbc82..c1a8bd0dbe4b38984958f4fbb498e392b0cda8eb 100644 (file)
@@ -23,4 +23,6 @@
 #ifndef _POWERTV_INIT_H
 #define _POWERTV_INIT_H
 extern unsigned long _prom_memsize;
+extern void prom_meminit(void);
+extern char *prom_getenv(char *name);
 #endif
index 6e5f1bdc59b5fa77215803d8e919b55afe3c081d..bc2f3ca22b413a27ed8f245495d5c8a38374d8d2 100644 (file)
@@ -29,7 +29,6 @@
 #include <asm/page.h>
 #include <asm/sections.h>
 
-#include <asm/mips-boards/prom.h>
 #include <asm/mach-powertv/asic.h>
 #include <asm/mach-powertv/ioremap.h>
 
index 820b8480f2223850f56169e6570e50fed0ae8de3..24689bff1039fe522e50e6d70c0eb0ab725e7849 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/bootinfo.h>
 #include <asm/irq.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 #include <asm/dma.h>
 #include <asm/asm.h>
 #include <asm/traps.h>
index a0b0197cab0aa205cdebbbb9e31fe6381da5aa07..026e823d871d34e8952ba22b064e59cd4b778fda 100644 (file)
@@ -6,12 +6,23 @@ choice
        help
          Select Ralink MIPS SoC type.
 
+       config SOC_RT288X
+               bool "RT288x"
+
        config SOC_RT305X
                bool "RT305x"
                select USB_ARCH_HAS_HCD
                select USB_ARCH_HAS_OHCI
                select USB_ARCH_HAS_EHCI
 
+       config SOC_RT3883
+               bool "RT3883"
+               select USB_ARCH_HAS_OHCI
+               select USB_ARCH_HAS_EHCI
+
+       config SOC_MT7620
+               bool "MT7620"
+
 endchoice
 
 choice
@@ -23,10 +34,22 @@ choice
        config DTB_RT_NONE
                bool "None"
 
+       config DTB_RT2880_EVAL
+               bool "RT2880 eval kit"
+               depends on SOC_RT288X
+
        config DTB_RT305X_EVAL
                bool "RT305x eval kit"
                depends on SOC_RT305X
 
+       config DTB_RT3883_EVAL
+               bool "RT3883 eval kit"
+               depends on SOC_RT3883
+
+       config DTB_MT7620A_EVAL
+               bool "MT7620A eval kit"
+               depends on SOC_MT7620
+
 endchoice
 
 endif
index 939757f0e71f61024bdc32d50102af1eb50c9c65..38cf1a880aaac21ca6b0e38b3561e1832aa5e4c3 100644 (file)
@@ -8,7 +8,10 @@
 
 obj-y := prom.o of.o reset.o clk.o irq.o
 
+obj-$(CONFIG_SOC_RT288X) += rt288x.o
 obj-$(CONFIG_SOC_RT305X) += rt305x.o
+obj-$(CONFIG_SOC_RT3883) += rt3883.o
+obj-$(CONFIG_SOC_MT7620) += mt7620.o
 
 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
 
index 6babd65765e608a6d49a51a50a627da0938e23b8..cda4b6645c50587da40f9ad9ffa0357d27fe74ab 100644 (file)
@@ -4,7 +4,25 @@
 core-$(CONFIG_RALINK)          += arch/mips/ralink/
 cflags-$(CONFIG_RALINK)                += -I$(srctree)/arch/mips/include/asm/mach-ralink
 
+#
+# Ralink RT288x
+#
+load-$(CONFIG_SOC_RT288X)      += 0xffffffff88000000
+cflags-$(CONFIG_SOC_RT288X)    += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt288x
+
 #
 # Ralink RT305x
 #
 load-$(CONFIG_SOC_RT305X)      += 0xffffffff80000000
+cflags-$(CONFIG_SOC_RT305X)    += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt305x
+
+#
+# Ralink RT3883
+#
+load-$(CONFIG_SOC_RT3883)      += 0xffffffff80000000
+cflags-$(CONFIG_SOC_RT3883)    += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt3883
+
+#
+# Ralink MT7620
+#
+load-$(CONFIG_SOC_MT7620)      += 0xffffffff80000000
index 300990313e1b3acdd85486c1a8b743c4f8e4b653..83144c3fc5acc32c0b619b7b7f4ac9949a4e4e1e 100644 (file)
@@ -22,13 +22,22 @@ struct ralink_pinmux {
        struct ralink_pinmux_grp *mode;
        struct ralink_pinmux_grp *uart;
        int uart_shift;
+       u32 uart_mask;
        void (*wdt_reset)(void);
+       struct ralink_pinmux_grp *pci;
+       int pci_shift;
+       u32 pci_mask;
 };
-extern struct ralink_pinmux gpio_pinmux;
+extern struct ralink_pinmux rt_gpio_pinmux;
 
 struct ralink_soc_info {
        unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
        unsigned char *compatible;
+
+       unsigned long mem_base;
+       unsigned long mem_size;
+       unsigned long mem_size_min;
+       unsigned long mem_size_max;
 };
 extern struct ralink_soc_info soc_info;
 
index 1a69fb300955391b30f56e64e93850769774d1aa..18194fa93e8065f0916c90ce706ae2cb4e072ae4 100644 (file)
@@ -1 +1,4 @@
+obj-$(CONFIG_DTB_RT2880_EVAL) := rt2880_eval.dtb.o
 obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o
+obj-$(CONFIG_DTB_RT3883_EVAL) := rt3883_eval.dtb.o
+obj-$(CONFIG_DTB_MT7620A_EVAL) := mt7620a_eval.dtb.o
diff --git a/arch/mips/ralink/dts/mt7620a.dtsi b/arch/mips/ralink/dts/mt7620a.dtsi
new file mode 100644 (file)
index 0000000..08bf24f
--- /dev/null
@@ -0,0 +1,58 @@
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "ralink,mtk7620a-soc";
+
+       cpus {
+               cpu@0 {
+                       compatible = "mips,mips24KEc";
+               };
+       };
+
+       cpuintc: cpuintc@0 {
+               #address-cells = <0>;
+               #interrupt-cells = <1>;
+               interrupt-controller;
+               compatible = "mti,cpu-interrupt-controller";
+       };
+
+       palmbus@10000000 {
+               compatible = "palmbus";
+               reg = <0x10000000 0x200000>;
+                ranges = <0x0 0x10000000 0x1FFFFF>;
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               sysc@0 {
+                       compatible = "ralink,mt7620a-sysc";
+                       reg = <0x0 0x100>;
+               };
+
+               intc: intc@200 {
+                       compatible = "ralink,mt7620a-intc", "ralink,rt2880-intc";
+                       reg = <0x200 0x100>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+               };
+
+               memc@300 {
+                       compatible = "ralink,mt7620a-memc", "ralink,rt3050-memc";
+                       reg = <0x300 0x100>;
+               };
+
+               uartlite@c00 {
+                       compatible = "ralink,mt7620a-uart", "ralink,rt2880-uart", "ns16550a";
+                       reg = <0xc00 0x100>;
+
+                       interrupt-parent = <&intc>;
+                       interrupts = <12>;
+
+                       reg-shift = <2>;
+               };
+       };
+};
diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts
new file mode 100644 (file)
index 0000000..35eb874
--- /dev/null
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/include/ "mt7620a.dtsi"
+
+/ {
+       compatible = "ralink,mt7620a-eval-board", "ralink,mt7620a-soc";
+       model = "Ralink MT7620A evaluation board";
+
+       memory@0 {
+               reg = <0x0 0x2000000>;
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,57600";
+       };
+};
diff --git a/arch/mips/ralink/dts/rt2880.dtsi b/arch/mips/ralink/dts/rt2880.dtsi
new file mode 100644 (file)
index 0000000..182afde
--- /dev/null
@@ -0,0 +1,58 @@
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "ralink,rt2880-soc";
+
+       cpus {
+               cpu@0 {
+                       compatible = "mips,mips4KEc";
+               };
+       };
+
+       cpuintc: cpuintc@0 {
+               #address-cells = <0>;
+               #interrupt-cells = <1>;
+               interrupt-controller;
+               compatible = "mti,cpu-interrupt-controller";
+       };
+
+       palmbus@300000 {
+               compatible = "palmbus";
+               reg = <0x300000 0x200000>;
+                ranges = <0x0 0x300000 0x1FFFFF>;
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               sysc@0 {
+                       compatible = "ralink,rt2880-sysc";
+                       reg = <0x0 0x100>;
+               };
+
+               intc: intc@200 {
+                       compatible = "ralink,rt2880-intc";
+                       reg = <0x200 0x100>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+               };
+
+               memc@300 {
+                       compatible = "ralink,rt2880-memc";
+                       reg = <0x300 0x100>;
+               };
+
+               uartlite@c00 {
+                       compatible = "ralink,rt2880-uart", "ns16550a";
+                       reg = <0xc00 0x100>;
+
+                       interrupt-parent = <&intc>;
+                       interrupts = <8>;
+
+                       reg-shift = <2>;
+               };
+       };
+};
diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts
new file mode 100644 (file)
index 0000000..322d700
--- /dev/null
@@ -0,0 +1,46 @@
+/dts-v1/;
+
+/include/ "rt2880.dtsi"
+
+/ {
+       compatible = "ralink,rt2880-eval-board", "ralink,rt2880-soc";
+       model = "Ralink RT2880 evaluation board";
+
+       memory@0 {
+               reg = <0x8000000 0x2000000>;
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,57600";
+       };
+
+       cfi@1f000000 {
+               compatible = "cfi-flash";
+               reg = <0x1f000000 0x400000>;
+
+               bank-width = <2>;
+               device-width = <2>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               partition@0 {
+                       label = "uboot";
+                       reg = <0x0 0x30000>;
+                       read-only;
+               };
+               partition@30000 {
+                       label = "uboot-env";
+                       reg = <0x30000 0x10000>;
+                       read-only;
+               };
+               partition@40000 {
+                       label = "calibration";
+                       reg = <0x40000 0x10000>;
+                       read-only;
+               };
+               partition@50000 {
+                       label = "linux";
+                       reg = <0x50000 0x3b0000>;
+               };
+       };
+};
index 069d0660e1ddaf844922ba6dffb7e7a838f7284d..e3203d414fee331eecce025924b0a8e7c4cee362 100644 (file)
@@ -1,7 +1,7 @@
 / {
        #address-cells = <1>;
        #size-cells = <1>;
-       compatible = "ralink,rt3050-soc", "ralink,rt3052-soc";
+       compatible = "ralink,rt3050-soc", "ralink,rt3052-soc", "ralink,rt3350-soc";
 
        cpus {
                cpu@0 {
@@ -9,10 +9,6 @@ cpu@0 {
                };
        };
 
-       chosen {
-               bootargs = "console=ttyS0,57600 init=/init";
-       };
-
        cpuintc: cpuintc@0 {
                #address-cells = <0>;
                #interrupt-cells = <1>;
@@ -23,7 +19,7 @@ cpuintc: cpuintc@0 {
        palmbus@10000000 {
                compatible = "palmbus";
                reg = <0x10000000 0x200000>;
-                ranges = <0x0 0x10000000 0x1FFFFF>;
+               ranges = <0x0 0x10000000 0x1FFFFF>;
 
                #address-cells = <1>;
                #size-cells = <1>;
@@ -33,11 +29,6 @@ sysc@0 {
                        reg = <0x0 0x100>;
                };
 
-               timer@100 {
-                       compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt";
-                       reg = <0x100 0x100>;
-               };
-
                intc: intc@200 {
                        compatible = "ralink,rt3052-intc", "ralink,rt2880-intc";
                        reg = <0x200 0x100>;
@@ -54,45 +45,6 @@ memc@300 {
                        reg = <0x300 0x100>;
                };
 
-               gpio0: gpio@600 {
-                       compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
-                       reg = <0x600 0x34>;
-
-                       gpio-controller;
-                       #gpio-cells = <2>;
-
-                       ralink,ngpio = <24>;
-                       ralink,regs = [ 00 04 08 0c
-                                       20 24 28 2c
-                                       30 34 ];
-               };
-
-               gpio1: gpio@638 {
-                       compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
-                       reg = <0x638 0x24>;
-
-                       gpio-controller;
-                       #gpio-cells = <2>;
-
-                       ralink,ngpio = <16>;
-                       ralink,regs = [ 00 04 08 0c
-                                       10 14 18 1c
-                                       20 24 ];
-               };
-
-               gpio2: gpio@660 {
-                       compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
-                       reg = <0x660 0x24>;
-
-                       gpio-controller;
-                       #gpio-cells = <2>;
-
-                       ralink,ngpio = <12>;
-                       ralink,regs = [ 00 04 08 0c
-                                       10 14 18 1c
-                                       20 24 ];
-               };
-
                uartlite@c00 {
                        compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a";
                        reg = <0xc00 0x100>;
@@ -103,4 +55,14 @@ uartlite@c00 {
                        reg-shift = <2>;
                };
        };
+
+       usb@101c0000 {
+               compatible = "ralink,rt3050-usb", "snps,dwc2";
+               reg = <0x101c0000 40000>;
+
+               interrupt-parent = <&intc>;
+               interrupts = <18>;
+
+               status = "disabled";
+       };
 };
index 148a590bc4194784c369acef68c76b73d9c2240d..0ac73ea281984909df89403d17fb82fd4e9f3ae4 100644 (file)
@@ -1,10 +1,8 @@
 /dts-v1/;
 
-/include/ "rt3050.dtsi"
+#include "rt3050.dtsi"
 
 / {
-       #address-cells = <1>;
-       #size-cells = <1>;
        compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
        model = "Ralink RT3052 evaluation board";
 
@@ -12,12 +10,8 @@ memory@0 {
                reg = <0x0 0x2000000>;
        };
 
-       palmbus@10000000 {
-               sysc@0 {
-                       ralink,pinmmux = "uartlite", "spi";
-                       ralink,uartmux = "gpio";
-                       ralink,wdtmux = <0>;
-               };
+       chosen {
+               bootargs = "console=ttyS0,57600";
        };
 
        cfi@1f000000 {
@@ -49,4 +43,8 @@ partition@50000 {
                        reg = <0x50000 0x7b0000>;
                };
        };
+
+       usb@101c0000 {
+               status = "ok";
+       };
 };
diff --git a/arch/mips/ralink/dts/rt3883.dtsi b/arch/mips/ralink/dts/rt3883.dtsi
new file mode 100644 (file)
index 0000000..3b131dd
--- /dev/null
@@ -0,0 +1,58 @@
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "ralink,rt3883-soc";
+
+       cpus {
+               cpu@0 {
+                       compatible = "mips,mips74Kc";
+               };
+       };
+
+       cpuintc: cpuintc@0 {
+               #address-cells = <0>;
+               #interrupt-cells = <1>;
+               interrupt-controller;
+               compatible = "mti,cpu-interrupt-controller";
+       };
+
+       palmbus@10000000 {
+               compatible = "palmbus";
+               reg = <0x10000000 0x200000>;
+               ranges = <0x0 0x10000000 0x1FFFFF>;
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               sysc@0 {
+                       compatible = "ralink,rt3883-sysc", "ralink,rt3050-sysc";
+                       reg = <0x0 0x100>;
+               };
+
+               intc: intc@200 {
+                       compatible = "ralink,rt3883-intc", "ralink,rt2880-intc";
+                       reg = <0x200 0x100>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+               };
+
+               memc@300 {
+                       compatible = "ralink,rt3883-memc", "ralink,rt3050-memc";
+                       reg = <0x300 0x100>;
+               };
+
+               uartlite@c00 {
+                       compatible = "ralink,rt3883-uart", "ralink,rt2880-uart", "ns16550a";
+                       reg = <0xc00 0x100>;
+
+                       interrupt-parent = <&intc>;
+                       interrupts = <12>;
+
+                       reg-shift = <2>;
+               };
+       };
+};
diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts
new file mode 100644 (file)
index 0000000..2fa6b33
--- /dev/null
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/include/ "rt3883.dtsi"
+
+/ {
+       compatible = "ralink,rt3883-eval-board", "ralink,rt3883-soc";
+       model = "Ralink RT3883 evaluation board";
+
+       memory@0 {
+               reg = <0x0 0x2000000>;
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,57600";
+       };
+};
index c4ae47eb24abba22390902acd90e8f0430325162..b46d0419d09b52a3864e7f3f1bc0e2767ed97d4f 100644 (file)
 
 #include <asm/addrspace.h>
 
+#ifdef CONFIG_SOC_RT288X
+#define EARLY_UART_BASE         0x300c00
+#else
 #define EARLY_UART_BASE         0x10000c00
+#endif
 
 #define UART_REG_RX             0x00
 #define UART_REG_TX             0x04
index 6d054c5ec9ab03bf83950d152fdc173dbd0d8b56..320b1f1043fff108854ac7bab56ae1cf37e5699f 100644 (file)
@@ -31,6 +31,7 @@
 #define INTC_INT_GLOBAL                BIT(31)
 
 #define RALINK_CPU_IRQ_INTC    (MIPS_CPU_IRQ_BASE + 2)
+#define RALINK_CPU_IRQ_PCI     (MIPS_CPU_IRQ_BASE + 4)
 #define RALINK_CPU_IRQ_FE      (MIPS_CPU_IRQ_BASE + 5)
 #define RALINK_CPU_IRQ_WIFI    (MIPS_CPU_IRQ_BASE + 6)
 #define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7)
@@ -104,6 +105,9 @@ asmlinkage void plat_irq_dispatch(void)
        else if (pending & STATUSF_IP6)
                do_IRQ(RALINK_CPU_IRQ_WIFI);
 
+       else if (pending & STATUSF_IP4)
+               do_IRQ(RALINK_CPU_IRQ_PCI);
+
        else if (pending & STATUSF_IP2)
                do_IRQ(RALINK_CPU_IRQ_INTC);
 
@@ -162,6 +166,7 @@ static int __init intc_of_init(struct device_node *node,
        irq_set_chained_handler(irq, ralink_intc_irq_handler);
        irq_set_handler_data(irq, domain);
 
+       /* tell the kernel which irq is used for performance monitoring */
        cp0_perfcount_irq = irq_create_mapping(domain, 9);
 
        return 0;
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
new file mode 100644 (file)
index 0000000..0018b1a
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/mt7620.h>
+
+#include "common.h"
+
+/* does the board have sdram or ddram */
+static int dram_type;
+
+/* the pll dividers */
+static u32 mt7620_clk_divider[] = { 2, 3, 4, 8 };
+
+static struct ralink_pinmux_grp mode_mux[] = {
+       {
+               .name = "i2c",
+               .mask = MT7620_GPIO_MODE_I2C,
+               .gpio_first = 1,
+               .gpio_last = 2,
+       }, {
+               .name = "spi",
+               .mask = MT7620_GPIO_MODE_SPI,
+               .gpio_first = 3,
+               .gpio_last = 6,
+       }, {
+               .name = "uartlite",
+               .mask = MT7620_GPIO_MODE_UART1,
+               .gpio_first = 15,
+               .gpio_last = 16,
+       }, {
+               .name = "wdt",
+               .mask = MT7620_GPIO_MODE_WDT,
+               .gpio_first = 17,
+               .gpio_last = 17,
+       }, {
+               .name = "mdio",
+               .mask = MT7620_GPIO_MODE_MDIO,
+               .gpio_first = 22,
+               .gpio_last = 23,
+       }, {
+               .name = "rgmii1",
+               .mask = MT7620_GPIO_MODE_RGMII1,
+               .gpio_first = 24,
+               .gpio_last = 35,
+       }, {
+               .name = "spi refclk",
+               .mask = MT7620_GPIO_MODE_SPI_REF_CLK,
+               .gpio_first = 37,
+               .gpio_last = 39,
+       }, {
+               .name = "jtag",
+               .mask = MT7620_GPIO_MODE_JTAG,
+               .gpio_first = 40,
+               .gpio_last = 44,
+       }, {
+               /* shared lines with jtag */
+               .name = "ephy",
+               .mask = MT7620_GPIO_MODE_EPHY,
+               .gpio_first = 40,
+               .gpio_last = 44,
+       }, {
+               .name = "nand",
+               .mask = MT7620_GPIO_MODE_JTAG,
+               .gpio_first = 45,
+               .gpio_last = 59,
+       }, {
+               .name = "rgmii2",
+               .mask = MT7620_GPIO_MODE_RGMII2,
+               .gpio_first = 60,
+               .gpio_last = 71,
+       }, {
+               .name = "wled",
+               .mask = MT7620_GPIO_MODE_WLED,
+               .gpio_first = 72,
+               .gpio_last = 72,
+       }, {0}
+};
+
+static struct ralink_pinmux_grp uart_mux[] = {
+       {
+               .name = "uartf",
+               .mask = MT7620_GPIO_MODE_UARTF,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "pcm uartf",
+               .mask = MT7620_GPIO_MODE_PCM_UARTF,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "pcm i2s",
+               .mask = MT7620_GPIO_MODE_PCM_I2S,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "i2s uartf",
+               .mask = MT7620_GPIO_MODE_I2S_UARTF,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "pcm gpio",
+               .mask = MT7620_GPIO_MODE_PCM_GPIO,
+               .gpio_first = 11,
+               .gpio_last = 14,
+       }, {
+               .name = "gpio uartf",
+               .mask = MT7620_GPIO_MODE_GPIO_UARTF,
+               .gpio_first = 7,
+               .gpio_last = 10,
+       }, {
+               .name = "gpio i2s",
+               .mask = MT7620_GPIO_MODE_GPIO_I2S,
+               .gpio_first = 7,
+               .gpio_last = 10,
+       }, {
+               .name = "gpio",
+               .mask = MT7620_GPIO_MODE_GPIO,
+       }, {0}
+};
+
+struct ralink_pinmux rt_gpio_pinmux = {
+       .mode = mode_mux,
+       .uart = uart_mux,
+       .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT,
+       .uart_mask = MT7620_GPIO_MODE_UART0_MASK,
+};
+
+void __init ralink_clk_init(void)
+{
+       unsigned long cpu_rate, sys_rate;
+       u32 c0 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG0);
+       u32 c1 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG1);
+       u32 swconfig = (c0 >> CPLL_SW_CONFIG_SHIFT) & CPLL_SW_CONFIG_MASK;
+       u32 cpu_clk = (c1 >> CPLL_CPU_CLK_SHIFT) & CPLL_CPU_CLK_MASK;
+
+       if (cpu_clk) {
+               cpu_rate = 480000000;
+       } else if (!swconfig) {
+               cpu_rate = 600000000;
+       } else {
+               u32 m = (c0 >> CPLL_MULT_RATIO_SHIFT) & CPLL_MULT_RATIO;
+               u32 d = (c0 >> CPLL_DIV_RATIO_SHIFT) & CPLL_DIV_RATIO;
+
+               cpu_rate = ((40 * (m + 24)) / mt7620_clk_divider[d]) * 1000000;
+       }
+
+       if (dram_type == SYSCFG0_DRAM_TYPE_SDRAM)
+               sys_rate = cpu_rate / 4;
+       else
+               sys_rate = cpu_rate / 3;
+
+       ralink_clk_add("cpu", cpu_rate);
+       ralink_clk_add("10000100.timer", 40000000);
+       ralink_clk_add("10000500.uart", 40000000);
+       ralink_clk_add("10000c00.uartlite", 40000000);
+}
+
+void __init ralink_of_remap(void)
+{
+       rt_sysc_membase = plat_of_remap_node("ralink,mt7620a-sysc");
+       rt_memc_membase = plat_of_remap_node("ralink,mt7620a-memc");
+
+       if (!rt_sysc_membase || !rt_memc_membase)
+               panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE);
+       unsigned char *name = NULL;
+       u32 n0;
+       u32 n1;
+       u32 rev;
+       u32 cfg0;
+
+       n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+       n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+
+       if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) {
+               name = "MT7620N";
+               soc_info->compatible = "ralink,mt7620n-soc";
+       } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) {
+               name = "MT7620A";
+               soc_info->compatible = "ralink,mt7620a-soc";
+       } else {
+               panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+       }
+
+       rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
+
+       snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+               "Ralink %s ver:%u eco:%u",
+               name,
+               (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
+               (rev & CHIP_REV_ECO_MASK));
+
+       cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0);
+       dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK;
+
+       switch (dram_type) {
+       case SYSCFG0_DRAM_TYPE_SDRAM:
+               soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
+               soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
+               break;
+
+       case SYSCFG0_DRAM_TYPE_DDR1:
+               soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
+               soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
+               break;
+
+       case SYSCFG0_DRAM_TYPE_DDR2:
+               soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
+               soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
+               break;
+       default:
+               BUG();
+       }
+       soc_info->mem_base = MT7620_DRAM_BASE;
+}
index 4165e70775be52dbf5df31d344204aa1b5ba40de..fb1569580def7c25a03a7f1348315c4e93e92e3c 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/init.h>
+#include <linux/sizes.h>
 #include <linux/of_fdt.h>
 #include <linux/kernel.h>
 #include <linux/bootmem.h>
@@ -85,6 +86,14 @@ void __init plat_mem_setup(void)
         * parsed resulting in our memory appearing
         */
        __dt_setup_arch(&__dtb_start);
+
+       if (soc_info.mem_size)
+               add_memory_region(soc_info.mem_base, soc_info.mem_size,
+                                 BOOT_MEM_RAM);
+       else
+               detect_memory_region(soc_info.mem_base,
+                                    soc_info.mem_size_min * SZ_1M,
+                                    soc_info.mem_size_max * SZ_1M);
 }
 
 static int __init plat_of_setup(void)
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
new file mode 100644 (file)
index 0000000..f87de1a
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt288x.h>
+
+#include "common.h"
+
+static struct ralink_pinmux_grp mode_mux[] = {
+       {
+               .name = "i2c",
+               .mask = RT2880_GPIO_MODE_I2C,
+               .gpio_first = 1,
+               .gpio_last = 2,
+       }, {
+               .name = "spi",
+               .mask = RT2880_GPIO_MODE_SPI,
+               .gpio_first = 3,
+               .gpio_last = 6,
+       }, {
+               .name = "uartlite",
+               .mask = RT2880_GPIO_MODE_UART0,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "jtag",
+               .mask = RT2880_GPIO_MODE_JTAG,
+               .gpio_first = 17,
+               .gpio_last = 21,
+       }, {
+               .name = "mdio",
+               .mask = RT2880_GPIO_MODE_MDIO,
+               .gpio_first = 22,
+               .gpio_last = 23,
+       }, {
+               .name = "sdram",
+               .mask = RT2880_GPIO_MODE_SDRAM,
+               .gpio_first = 24,
+               .gpio_last = 39,
+       }, {
+               .name = "pci",
+               .mask = RT2880_GPIO_MODE_PCI,
+               .gpio_first = 40,
+               .gpio_last = 71,
+       }, {0}
+};
+
+static void rt288x_wdt_reset(void)
+{
+       u32 t;
+
+       /* enable WDT reset output on pin SRAM_CS_N */
+       t = rt_sysc_r32(SYSC_REG_CLKCFG);
+       t |= CLKCFG_SRAM_CS_N_WDT;
+       rt_sysc_w32(t, SYSC_REG_CLKCFG);
+}
+
+struct ralink_pinmux rt_gpio_pinmux = {
+       .mode = mode_mux,
+       .wdt_reset = rt288x_wdt_reset,
+};
+
+void __init ralink_clk_init(void)
+{
+       unsigned long cpu_rate;
+       u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+       t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK);
+
+       switch (t) {
+       case SYSTEM_CONFIG_CPUCLK_250:
+               cpu_rate = 250000000;
+               break;
+       case SYSTEM_CONFIG_CPUCLK_266:
+               cpu_rate = 266666667;
+               break;
+       case SYSTEM_CONFIG_CPUCLK_280:
+               cpu_rate = 280000000;
+               break;
+       case SYSTEM_CONFIG_CPUCLK_300:
+               cpu_rate = 300000000;
+               break;
+       }
+
+       ralink_clk_add("cpu", cpu_rate);
+       ralink_clk_add("300100.timer", cpu_rate / 2);
+       ralink_clk_add("300120.watchdog", cpu_rate / 2);
+       ralink_clk_add("300500.uart", cpu_rate / 2);
+       ralink_clk_add("300c00.uartlite", cpu_rate / 2);
+       ralink_clk_add("400000.ethernet", cpu_rate / 2);
+}
+
+void __init ralink_of_remap(void)
+{
+       rt_sysc_membase = plat_of_remap_node("ralink,rt2880-sysc");
+       rt_memc_membase = plat_of_remap_node("ralink,rt2880-memc");
+
+       if (!rt_sysc_membase || !rt_memc_membase)
+               panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE);
+       const char *name;
+       u32 n0;
+       u32 n1;
+       u32 id;
+
+       n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+       n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+       id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
+
+       if (n0 == RT2880_CHIP_NAME0 && n1 == RT2880_CHIP_NAME1) {
+               soc_info->compatible = "ralink,r2880-soc";
+               name = "RT2880";
+       } else {
+               panic("rt288x: unknown SoC, n0:%08x n1:%08x", n0, n1);
+       }
+
+       snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+               "Ralink %s id:%u rev:%u",
+               name,
+               (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
+               (id & CHIP_ID_REV_MASK));
+
+       soc_info->mem_base = RT2880_SDRAM_BASE;
+       soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
+       soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
+}
index 0a4bbdcf59d97d9d82b01fc8eb8ab9fffd5eba91..ca7ee3a33790fc074ed4788f8fb1e1412babe187 100644 (file)
@@ -22,7 +22,7 @@
 
 enum rt305x_soc_type rt305x_soc;
 
-struct ralink_pinmux_grp mode_mux[] = {
+static struct ralink_pinmux_grp mode_mux[] = {
        {
                .name = "i2c",
                .mask = RT305X_GPIO_MODE_I2C,
@@ -61,7 +61,7 @@ struct ralink_pinmux_grp mode_mux[] = {
        }, {0}
 };
 
-struct ralink_pinmux_grp uart_mux[] = {
+static struct ralink_pinmux_grp uart_mux[] = {
        {
                .name = "uartf",
                .mask = RT305X_GPIO_MODE_UARTF,
@@ -91,19 +91,19 @@ struct ralink_pinmux_grp uart_mux[] = {
                .name = "gpio uartf",
                .mask = RT305X_GPIO_MODE_GPIO_UARTF,
                .gpio_first = RT305X_GPIO_7,
-               .gpio_last = RT305X_GPIO_14,
+               .gpio_last = RT305X_GPIO_10,
        }, {
                .name = "gpio i2s",
                .mask = RT305X_GPIO_MODE_GPIO_I2S,
                .gpio_first = RT305X_GPIO_7,
-               .gpio_last = RT305X_GPIO_14,
+               .gpio_last = RT305X_GPIO_10,
        }, {
                .name = "gpio",
                .mask = RT305X_GPIO_MODE_GPIO,
        }, {0}
 };
 
-void rt305x_wdt_reset(void)
+static void rt305x_wdt_reset(void)
 {
        u32 t;
 
@@ -114,16 +114,53 @@ void rt305x_wdt_reset(void)
        rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
 }
 
-struct ralink_pinmux gpio_pinmux = {
+struct ralink_pinmux rt_gpio_pinmux = {
        .mode = mode_mux,
        .uart = uart_mux,
        .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT,
+       .uart_mask = RT305X_GPIO_MODE_UART0_MASK,
        .wdt_reset = rt305x_wdt_reset,
 };
 
+static unsigned long rt5350_get_mem_size(void)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
+       unsigned long ret;
+       u32 t;
+
+       t = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG);
+       t = (t >> RT5350_SYSCFG0_DRAM_SIZE_SHIFT) &
+               RT5350_SYSCFG0_DRAM_SIZE_MASK;
+
+       switch (t) {
+       case RT5350_SYSCFG0_DRAM_SIZE_2M:
+               ret = 2;
+               break;
+       case RT5350_SYSCFG0_DRAM_SIZE_8M:
+               ret = 8;
+               break;
+       case RT5350_SYSCFG0_DRAM_SIZE_16M:
+               ret = 16;
+               break;
+       case RT5350_SYSCFG0_DRAM_SIZE_32M:
+               ret = 32;
+               break;
+       case RT5350_SYSCFG0_DRAM_SIZE_64M:
+               ret = 64;
+               break;
+       default:
+               panic("rt5350: invalid DRAM size: %u", t);
+               break;
+       }
+
+       return ret;
+}
+
 void __init ralink_clk_init(void)
 {
        unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate;
+       unsigned long wmac_rate = 40000000;
+
        u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
 
        if (soc_is_rt305x() || soc_is_rt3350()) {
@@ -176,11 +213,21 @@ void __init ralink_clk_init(void)
                BUG();
        }
 
+       if (soc_is_rt3352() || soc_is_rt5350()) {
+               u32 val = rt_sysc_r32(RT3352_SYSC_REG_SYSCFG0);
+
+               if (!(val & RT3352_CLKCFG0_XTAL_SEL))
+                       wmac_rate = 20000000;
+       }
+
        ralink_clk_add("cpu", cpu_rate);
        ralink_clk_add("10000b00.spi", sys_rate);
        ralink_clk_add("10000100.timer", wdt_rate);
+       ralink_clk_add("10000120.watchdog", wdt_rate);
        ralink_clk_add("10000500.uart", uart_rate);
        ralink_clk_add("10000c00.uartlite", uart_rate);
+       ralink_clk_add("10100000.ethernet", sys_rate);
+       ralink_clk_add("10180000.wmac", wmac_rate);
 }
 
 void __init ralink_of_remap(void)
@@ -239,4 +286,15 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
                name,
                (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
                (id & CHIP_ID_REV_MASK));
+
+       soc_info->mem_base = RT305X_SDRAM_BASE;
+       if (soc_is_rt5350()) {
+               soc_info->mem_size = rt5350_get_mem_size();
+       } else if (soc_is_rt305x() || soc_is_rt3350()) {
+               soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
+               soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
+       } else if (soc_is_rt3352()) {
+               soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
+               soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
+       }
 }
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
new file mode 100644 (file)
index 0000000..b474ac2
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt3883.h>
+
+#include "common.h"
+
+static struct ralink_pinmux_grp mode_mux[] = {
+       {
+               .name = "i2c",
+               .mask = RT3883_GPIO_MODE_I2C,
+               .gpio_first = RT3883_GPIO_I2C_SD,
+               .gpio_last = RT3883_GPIO_I2C_SCLK,
+       }, {
+               .name = "spi",
+               .mask = RT3883_GPIO_MODE_SPI,
+               .gpio_first = RT3883_GPIO_SPI_CS0,
+               .gpio_last = RT3883_GPIO_SPI_MISO,
+       }, {
+               .name = "uartlite",
+               .mask = RT3883_GPIO_MODE_UART1,
+               .gpio_first = RT3883_GPIO_UART1_TXD,
+               .gpio_last = RT3883_GPIO_UART1_RXD,
+       }, {
+               .name = "jtag",
+               .mask = RT3883_GPIO_MODE_JTAG,
+               .gpio_first = RT3883_GPIO_JTAG_TDO,
+               .gpio_last = RT3883_GPIO_JTAG_TCLK,
+       }, {
+               .name = "mdio",
+               .mask = RT3883_GPIO_MODE_MDIO,
+               .gpio_first = RT3883_GPIO_MDIO_MDC,
+               .gpio_last = RT3883_GPIO_MDIO_MDIO,
+       }, {
+               .name = "ge1",
+               .mask = RT3883_GPIO_MODE_GE1,
+               .gpio_first = RT3883_GPIO_GE1_TXD0,
+               .gpio_last = RT3883_GPIO_GE1_RXCLK,
+       }, {
+               .name = "ge2",
+               .mask = RT3883_GPIO_MODE_GE2,
+               .gpio_first = RT3883_GPIO_GE2_TXD0,
+               .gpio_last = RT3883_GPIO_GE2_RXCLK,
+       }, {
+               .name = "pci",
+               .mask = RT3883_GPIO_MODE_PCI,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "lna a",
+               .mask = RT3883_GPIO_MODE_LNA_A,
+               .gpio_first = RT3883_GPIO_LNA_PE_A0,
+               .gpio_last = RT3883_GPIO_LNA_PE_A2,
+       }, {
+               .name = "lna g",
+               .mask = RT3883_GPIO_MODE_LNA_G,
+               .gpio_first = RT3883_GPIO_LNA_PE_G0,
+               .gpio_last = RT3883_GPIO_LNA_PE_G2,
+       }, {0}
+};
+
+static struct ralink_pinmux_grp uart_mux[] = {
+       {
+               .name = "uartf",
+               .mask = RT3883_GPIO_MODE_UARTF,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "pcm uartf",
+               .mask = RT3883_GPIO_MODE_PCM_UARTF,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "pcm i2s",
+               .mask = RT3883_GPIO_MODE_PCM_I2S,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "i2s uartf",
+               .mask = RT3883_GPIO_MODE_I2S_UARTF,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "pcm gpio",
+               .mask = RT3883_GPIO_MODE_PCM_GPIO,
+               .gpio_first = RT3883_GPIO_11,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "gpio uartf",
+               .mask = RT3883_GPIO_MODE_GPIO_UARTF,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_10,
+       }, {
+               .name = "gpio i2s",
+               .mask = RT3883_GPIO_MODE_GPIO_I2S,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_10,
+       }, {
+               .name = "gpio",
+               .mask = RT3883_GPIO_MODE_GPIO,
+       }, {0}
+};
+
+static struct ralink_pinmux_grp pci_mux[] = {
+       {
+               .name = "pci-dev",
+               .mask = 0,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "pci-host2",
+               .mask = 1,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "pci-host1",
+               .mask = 2,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "pci-fnc",
+               .mask = 3,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "pci-gpio",
+               .mask = 7,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {0}
+};
+
+static void rt3883_wdt_reset(void)
+{
+       u32 t;
+
+       /* enable WDT reset output on GPIO 2 */
+       t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
+       t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT;
+       rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
+}
+
+struct ralink_pinmux rt_gpio_pinmux = {
+       .mode = mode_mux,
+       .uart = uart_mux,
+       .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT,
+       .uart_mask = RT3883_GPIO_MODE_UART0_MASK,
+       .wdt_reset = rt3883_wdt_reset,
+       .pci = pci_mux,
+       .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT,
+       .pci_mask = RT3883_GPIO_MODE_PCI_MASK,
+};
+
+void __init ralink_clk_init(void)
+{
+       unsigned long cpu_rate, sys_rate;
+       u32 syscfg0;
+       u32 clksel;
+       u32 ddr2;
+
+       syscfg0 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG0);
+       clksel = ((syscfg0 >> RT3883_SYSCFG0_CPUCLK_SHIFT) &
+               RT3883_SYSCFG0_CPUCLK_MASK);
+       ddr2 = syscfg0 & RT3883_SYSCFG0_DRAM_TYPE_DDR2;
+
+       switch (clksel) {
+       case RT3883_SYSCFG0_CPUCLK_250:
+               cpu_rate = 250000000;
+               sys_rate = (ddr2) ? 125000000 : 83000000;
+               break;
+       case RT3883_SYSCFG0_CPUCLK_384:
+               cpu_rate = 384000000;
+               sys_rate = (ddr2) ? 128000000 : 96000000;
+               break;
+       case RT3883_SYSCFG0_CPUCLK_480:
+               cpu_rate = 480000000;
+               sys_rate = (ddr2) ? 160000000 : 120000000;
+               break;
+       case RT3883_SYSCFG0_CPUCLK_500:
+               cpu_rate = 500000000;
+               sys_rate = (ddr2) ? 166000000 : 125000000;
+               break;
+       }
+
+       ralink_clk_add("cpu", cpu_rate);
+       ralink_clk_add("10000100.timer", sys_rate);
+       ralink_clk_add("10000120.watchdog", sys_rate);
+       ralink_clk_add("10000500.uart", 40000000);
+       ralink_clk_add("10000b00.spi", sys_rate);
+       ralink_clk_add("10000c00.uartlite", 40000000);
+       ralink_clk_add("10100000.ethernet", sys_rate);
+}
+
+void __init ralink_of_remap(void)
+{
+       rt_sysc_membase = plat_of_remap_node("ralink,rt3883-sysc");
+       rt_memc_membase = plat_of_remap_node("ralink,rt3883-memc");
+
+       if (!rt_sysc_membase || !rt_memc_membase)
+               panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE);
+       const char *name;
+       u32 n0;
+       u32 n1;
+       u32 id;
+
+       n0 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID0_3);
+       n1 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID4_7);
+       id = __raw_readl(sysc + RT3883_SYSC_REG_REVID);
+
+       if (n0 == RT3883_CHIP_NAME0 && n1 == RT3883_CHIP_NAME1) {
+               soc_info->compatible = "ralink,rt3883-soc";
+               name = "RT3883";
+       } else {
+               panic("rt3883: unknown SoC, n0:%08x n1:%08x", n0, n1);
+       }
+
+       snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+               "Ralink %s ver:%u eco:%u",
+               name,
+               (id >> RT3883_REVID_VER_ID_SHIFT) & RT3883_REVID_VER_ID_MASK,
+               (id & RT3883_REVID_ECO_ID_MASK));
+
+       soc_info->mem_base = RT3883_SDRAM_BASE;
+       soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
+       soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
+}
index 1d1919a44e88c350685866d8420b24fa9e168dc9..7a53b1e28a93dfa512da4e5a6dff6f49e9ae9baa 100644 (file)
@@ -114,7 +114,7 @@ void __init replicate_kernel_text()
  * data structures on the first couple of pages of the first slot of each
  * node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
  */
-pfn_t node_getfirstfree(cnodeid_t cnode)
+unsigned long node_getfirstfree(cnodeid_t cnode)
 {
        unsigned long loadbase = REP_BASE;
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
index 5f2bddb1860e11c1b32035593b3376b758c37570..1230f56429d7334ea418f41921e709a9b737586a 100644 (file)
@@ -255,14 +255,14 @@ static void __init dump_topology(void)
        }
 }
 
-static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
+static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot)
 {
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
 
-       return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
+       return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
 }
 
-static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
+static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
 {
        nasid_t nasid;
        lboard_t *brd;
@@ -353,7 +353,7 @@ static void __init mlreset(void)
 
 static void __init szmem(void)
 {
-       pfn_t slot_psize, slot0sz = 0, nodebytes;       /* Hack to detect problem configs */
+       unsigned long slot_psize, slot0sz = 0, nodebytes;       /* Hack to detect problem configs */
        int slot;
        cnodeid_t node;
 
@@ -390,10 +390,10 @@ static void __init szmem(void)
 
 static void __init node_mem_init(cnodeid_t node)
 {
-       pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
-       pfn_t slot_freepfn = node_getfirstfree(node);
+       unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
+       unsigned long slot_freepfn = node_getfirstfree(node);
        unsigned long bootmap_size;
-       pfn_t start_pfn, end_pfn;
+       unsigned long start_pfn, end_pfn;
 
        get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
 
@@ -467,7 +467,7 @@ void __init paging_init(void)
        pagetable_init();
 
        for_each_online_node(node) {
-               pfn_t start_pfn, end_pfn;
+               unsigned long start_pfn, end_pfn;
 
                get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
 
index fff58ac176f36328914c06801234f1f31c38389c..2e21b761cb9c771586797a50f5eda5666368a81e 100644 (file)
@@ -69,7 +69,7 @@ static void rt_set_mode(enum clock_event_mode mode,
        /* Nothing to do ...  */
 }
 
-int rt_timer_irq;
+unsigned int rt_timer_irq;
 
 static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
 static DEFINE_PER_CPU(char [11], hub_rt_name);
index 5364aabc21027951532bc5d0e7d97898344f701a..681e7f86c08000f29884ce369efafab86afe1b29 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/slab.h>
 #include <linux/irq.h>
 #include <asm/bootinfo.h>
+#include <asm/idle.h>
 #include <asm/time.h>
 #include <asm/reboot.h>
 #include <asm/r4kcache.h>
index 70a3f90131d82af9d67bac9ff2d8d206812dde6b..d7f755833c3f43431b48f38b296da4df34d993e6 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <asm/cacheflush.h>
 #include <asm/cpu.h>
+#include <asm/idle.h>
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/reboot.h>
index cc5474b24f0657f6b51091d6f2779427a03d3e78..80beb188ed476ebb2775db8a89673bd1de95f631 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 
 #include <asm/cacheflush.h>
+#include <asm/idle.h>
 #include <asm/mipsregs.h>
 #include <asm/processor.h>
 
index 8137c25c4e15912841f702a655b05677d0c90a04..6f31cc0f1a878139dc4ac233d89f8f1d22f93379 100644 (file)
@@ -103,4 +103,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
        return channel ? 15 : 14;
 }
 
+#include <asm-generic/pci_iomap.h>
+
 #endif /* _ASM_PCI_H */
index 68fcab8f8f6f5eadebd5a18715034d6b33c8d781..222152a3f75195b429a6794e036c279cdb2e6160 100644 (file)
@@ -60,6 +60,7 @@ ENTRY(ret_from_kernel_thread)
        mov     (REG_D0,fp),d0
        mov     (REG_A0,fp),a0
        calls   (a0)
+       GET_THREAD_INFO a2              # A2 must be set on return from sys_exit()
        clr     d0
        mov     d0,(REG_D0,fp)
        jmp     syscall_exit
@@ -107,10 +108,10 @@ syscall_exit_work:
        and     EPSW_nSL,d0
        beq     resume_kernel           # returning to supervisor mode
 
-       btst    _TIF_SYSCALL_TRACE,d2
-       beq     work_pending
        LOCAL_IRQ_ENABLE                # could let syscall_trace_exit() call
                                        # schedule() instead
+       btst    _TIF_SYSCALL_TRACE,d2
+       beq     work_pending
        mov     fp,d0
        call    syscall_trace_exit[],0  # do_syscall_trace(regs)
        jmp     resume_userspace
@@ -123,6 +124,7 @@ work_pending:
 work_resched:
        call    schedule[],0
 
+resume_userspace:
        # make sure we don't miss an interrupt setting need_resched or
        # sigpending between sampling and the rti
        LOCAL_IRQ_DISABLE
@@ -131,6 +133,8 @@ work_resched:
        mov     (TI_flags,a2),d2
        btst    _TIF_WORK_MASK,d2
        beq     restore_all
+
+       LOCAL_IRQ_ENABLE
        btst    _TIF_NEED_RESCHED,d2
        bne     work_resched
 
@@ -169,17 +173,6 @@ ret_from_intr:
        and     EPSW_nSL,d0
        beq     resume_kernel           # returning to supervisor mode
 
-ENTRY(resume_userspace)
-       # make sure we don't miss an interrupt setting need_resched or
-       # sigpending between sampling and the rti
-       LOCAL_IRQ_DISABLE
-
-       # is there any work to be done on int/exception return?
-       mov     (TI_flags,a2),d2
-       btst    _TIF_WORK_MASK,d2
-       bne     work_pending
-       jmp     restore_all
-
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
        LOCAL_IRQ_DISABLE
index 1adcf024bb9a4c2221ae93bce8558d85c0b64abb..e37fac0461f3351ae6284ca011f3fcb0d80cc906 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/delay.h>
 #include <linux/irq.h>
 #include <asm/io.h>
+#include <asm/irq.h>
 #include "pci-asb2305.h"
 
 unsigned int pci_probe = 1;
index cad060f288cf51e089bfb9cf7b6e879064bc0989..6507dabdd5ddc1a758e68f7476eb197fac9ec7b3 100644 (file)
@@ -245,7 +245,7 @@ config SMP
 
 config IRQSTACKS
        bool "Use separate kernel stacks when processing interrupts"
-       default n
+       default y
        help
          If you say Y here the kernel will use separate kernel stacks
          for handling hard and soft interrupts.  This can help avoid
index 2f967cc6649e0cab325136624a614af4f0784281..96ec3982be8d37271b6e3236d00d3a0f3f5ce4d1 100644 (file)
@@ -23,24 +23,21 @@ NM          = sh $(srctree)/arch/parisc/nm
 CHECKFLAGS     += -D__hppa__=1
 LIBGCC         = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
 
-MACHINE                := $(shell uname -m)
-NATIVE         := $(if $(filter parisc%,$(MACHINE)),1,0)
-
 ifdef CONFIG_64BIT
 UTS_MACHINE    := parisc64
 CHECKFLAGS     += -D__LP64__=1 -m64
-WIDTH          := 64
+CC_ARCHES      = hppa64
 else # 32-bit
-WIDTH          :=
+CC_ARCHES      = hppa hppa2.0 hppa1.1
 endif
 
-# attempt to help out folks who are cross-compiling
-ifeq ($(NATIVE),1)
-CROSS_COMPILE  := hppa$(WIDTH)-linux-
-else
- ifeq ($(CROSS_COMPILE),)
- CROSS_COMPILE := hppa$(WIDTH)-linux-gnu-
- endif
+ifneq ($(SUBARCH),$(UTS_MACHINE))
+       ifeq ($(CROSS_COMPILE),)
+               CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
+               CROSS_COMPILE := $(call cc-cross-prefix, \
+                       $(foreach a,$(CC_ARCHES), \
+                       $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
      endif
 endif
 
 OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
@@ -69,7 +66,7 @@ KBUILD_CFLAGS_KERNEL += -mlong-calls
 endif
 
 # select which processor to optimise for
-cflags-$(CONFIG_PA7100)                += -march=1.1 -mschedule=7100
+cflags-$(CONFIG_PA7000)                += -march=1.1 -mschedule=7100
 cflags-$(CONFIG_PA7200)                += -march=1.1 -mschedule=7200
 cflags-$(CONFIG_PA7100LC)      += -march=1.1 -mschedule=7100LC
 cflags-$(CONFIG_PA7300LC)      += -march=1.1 -mschedule=7300
index 89fb40005e3f81e875ed0111952e7941282832cf..0da848232344fc41d9583cc10f8510a54a7d6a16 100644 (file)
        SAVE_SP  (%sr4, PT_SR4 (\regs))
        SAVE_SP  (%sr5, PT_SR5 (\regs))
        SAVE_SP  (%sr6, PT_SR6 (\regs))
-       SAVE_SP  (%sr7, PT_SR7 (\regs))
 
        SAVE_CR  (%cr17, PT_IASQ0(\regs))
        mtctl    %r0,   %cr17
index 12373c4dababec920c0ec42f120064011567d4b6..241c34518465d9404f56b786a86554a1e89824a3 100644 (file)
 #include <linux/threads.h>
 #include <linux/irq.h>
 
+#ifdef CONFIG_IRQSTACKS
+#define __ARCH_HAS_DO_SOFTIRQ
+#endif
+
 typedef struct {
        unsigned int __softirq_pending;
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
        unsigned int kernel_stack_usage;
-#endif
+       unsigned int irq_stack_usage;
 #ifdef CONFIG_SMP
        unsigned int irq_resched_count;
        unsigned int irq_call_count;
 #endif
+       unsigned int irq_unaligned_count;
+       unsigned int irq_fpassist_count;
        unsigned int irq_tlb_count;
 } ____cacheline_aligned irq_cpustat_t;
 
@@ -28,6 +33,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 #define __ARCH_IRQ_STAT
 #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
 #define inc_irq_stat(member)   this_cpu_inc(irq_stat.member)
+#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member)
 #define local_softirq_pending()        this_cpu_read(irq_stat.__softirq_pending)
 
 #define __ARCH_SET_SOFTIRQ_PENDING
index 0e625ab9aaec90df20e3a5059ad792cd8f293a10..cc50d33b7b881b7c57f7a46c125a56860840e514 100644 (file)
@@ -39,17 +39,14 @@ extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
 static inline int pfn_to_nid(unsigned long pfn)
 {
        unsigned int i;
-       unsigned char r;
 
        if (unlikely(pfn_is_io(pfn)))
                return 0;
 
        i = pfn >> PFNNID_SHIFT;
        BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
-       r = pfnnid_map[i];
-       BUG_ON(r == 0xff);
 
-       return (int)r;
+       return (int)pfnnid_map[i];
 }
 
 static inline int pfn_valid(int pfn)
index 064015547d1e32be0a968ba946bc54f8db362895..cc2290a3cace1e0d65f8e69be0138c0c09cc8b2a 100644 (file)
@@ -17,7 +17,6 @@
 #include <asm/ptrace.h>
 #include <asm/types.h>
 #include <asm/percpu.h>
-
 #endif /* __ASSEMBLY__ */
 
 /*
 
 #ifndef __ASSEMBLY__
 
-/*
- * IRQ STACK - used for irq handler
- */
-#ifdef __KERNEL__
-
-#define IRQ_STACK_SIZE      (4096 << 2) /* 16k irq stack size */
-
-union irq_stack_union {
-       unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
-};
-
-DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
-
-void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
-
-#endif /* __KERNEL__ */
-
 /*
  * Data detected about CPUs at boot time which is the same for all CPU's.
  * HP boxes are SMP - ie identical processors.
index 5709c5e59be82c14c544bd06ad693c071a31c434..14285caec71a4360b5b061f87a43077675f0ce1e 100644 (file)
@@ -394,7 +394,7 @@ EXPORT_SYMBOL(print_pci_hwpath);
 static void setup_bus_id(struct parisc_device *padev)
 {
        struct hardware_path path;
-       char name[20];
+       char name[28];
        char *output = name;
        int i;
 
index 4bb96ad9b0b14ddd1cb066bc21b304d861059f38..e8f07dd2840186a934632104fdc3116fa7293506 100644 (file)
        rsm     PSW_SM_I, %r0   /* barrier for "Relied upon Translation */
        mtsp    %r0, %sr4
        mtsp    %r0, %sr5
-       mfsp    %sr7, %r1
-       or,=    %r0,%r1,%r0     /* Only save sr7 in sr3 if sr7 != 0 */
-       mtsp    %r1, %sr3
+       mtsp    %r0, %sr6
        tovirt_r1 %r29
        load32  KERNEL_PSW, %r1
 
        rsm     PSW_SM_QUIET,%r0        /* second "heavy weight" ctl op */
-       mtsp    %r0, %sr6
-       mtsp    %r0, %sr7
        mtctl   %r0, %cr17      /* Clear IIASQ tail */
        mtctl   %r0, %cr17      /* Clear IIASQ head */
        mtctl   %r1, %ipsw
 
        /* we save the registers in the task struct */
 
+       copy    %r30, %r17
        mfctl   %cr30, %r1
+       ldo     THREAD_SZ_ALGN(%r1), %r30
+       mtsp    %r0,%sr7
+       mtsp    %r16,%sr3
        tophys  %r1,%r9
        LDREG   TI_TASK(%r9), %r1       /* thread_info -> task_struct */
        tophys  %r1,%r9
        ldo     TASK_REGS(%r9),%r9
-       STREG   %r30, PT_GR30(%r9)
+       STREG   %r17,PT_GR30(%r9)
        STREG   %r29,PT_GR29(%r9)
        STREG   %r26,PT_GR26(%r9)
+       STREG   %r16,PT_SR7(%r9)
        copy    %r9,%r29
-       mfctl   %cr30, %r1
-       ldo     THREAD_SZ_ALGN(%r1), %r30
        .endm
 
        .macro  get_stack_use_r30
        /* we put a struct pt_regs on the stack and save the registers there */
 
        tophys  %r30,%r9
-       STREG   %r30,PT_GR30(%r9)
+       copy    %r30,%r1
        ldo     PT_SZ_ALGN(%r30),%r30
+       STREG   %r1,PT_GR30(%r9)
        STREG   %r29,PT_GR29(%r9)
        STREG   %r26,PT_GR26(%r9)
+       STREG   %r16,PT_SR7(%r9)
        copy    %r9,%r29
        .endm
 
        L2_ptep         \pgd,\pte,\index,\va,\fault
        .endm
 
+       /* Acquire pa_dbit_lock lock. */
+       .macro          dbit_lock       spc,tmp,tmp1
+#ifdef CONFIG_SMP
+       cmpib,COND(=),n 0,\spc,2f
+       load32          PA(pa_dbit_lock),\tmp
+1:     LDCW            0(\tmp),\tmp1
+       cmpib,COND(=)   0,\tmp1,1b
+       nop
+2:
+#endif
+       .endm
+
+       /* Release pa_dbit_lock lock without reloading lock address. */
+       .macro          dbit_unlock0    spc,tmp
+#ifdef CONFIG_SMP
+       or,COND(=)      %r0,\spc,%r0
+       stw             \spc,0(\tmp)
+#endif
+       .endm
+
+       /* Release pa_dbit_lock lock. */
+       .macro          dbit_unlock1    spc,tmp
+#ifdef CONFIG_SMP
+       load32          PA(pa_dbit_lock),\tmp
+       dbit_unlock0    \spc,\tmp
+#endif
+       .endm
+
        /* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
         * don't needlessly dirty the cache line if it was already set */
-       .macro          update_ptep     ptep,pte,tmp,tmp1
+       .macro          update_ptep     spc,ptep,pte,tmp,tmp1
+#ifdef CONFIG_SMP
+       or,COND(=)      %r0,\spc,%r0
+       LDREG           0(\ptep),\pte
+#endif
        ldi             _PAGE_ACCESSED,\tmp1
        or              \tmp1,\pte,\tmp
        and,COND(<>)    \tmp1,\pte,%r0
 
        /* Set the dirty bit (and accessed bit).  No need to be
         * clever, this is only used from the dirty fault */
-       .macro          update_dirty    ptep,pte,tmp
+       .macro          update_dirty    spc,ptep,pte,tmp
+#ifdef CONFIG_SMP
+       or,COND(=)      %r0,\spc,%r0
+       LDREG           0(\ptep),\pte
+#endif
        ldi             _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
        or              \tmp,\pte,\pte
        STREG           \pte,0(\ptep)
@@ -1111,11 +1148,13 @@ dtlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,dtlb_check_alias_20w
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
        
        idtlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1135,11 +1174,13 @@ nadtlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,nadtlb_check_alias_20w
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        idtlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1161,7 +1202,8 @@ dtlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_11
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
@@ -1172,6 +1214,7 @@ dtlb_miss_11:
        idtlbp          prot,(%sr1,va)
 
        mtsp            t0, %sr1        /* Restore sr1 */
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1192,7 +1235,8 @@ nadtlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_11
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
@@ -1204,6 +1248,7 @@ nadtlb_miss_11:
        idtlbp          prot,(%sr1,va)
 
        mtsp            t0, %sr1        /* Restore sr1 */
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1224,13 +1269,15 @@ dtlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_20
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        f_extend        pte,t0
 
        idtlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1250,13 +1297,15 @@ nadtlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_20
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        f_extend        pte,t0
        
         idtlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1357,11 +1406,13 @@ itlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,itlb_fault
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
        
        iitlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1379,11 +1430,13 @@ naitlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,naitlb_check_alias_20w
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        iitlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1405,7 +1458,8 @@ itlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,itlb_fault
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
@@ -1416,6 +1470,7 @@ itlb_miss_11:
        iitlbp          prot,(%sr1,va)
 
        mtsp            t0, %sr1        /* Restore sr1 */
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1427,7 +1482,8 @@ naitlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_11
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
@@ -1438,6 +1494,7 @@ naitlb_miss_11:
        iitlbp          prot,(%sr1,va)
 
        mtsp            t0, %sr1        /* Restore sr1 */
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1459,13 +1516,15 @@ itlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,itlb_fault
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        f_extend        pte,t0  
 
        iitlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1477,13 +1536,15 @@ naitlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_20
 
-       update_ptep     ptp,pte,t0,t1
+       dbit_lock       spc,t0,t1
+       update_ptep     spc,ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        f_extend        pte,t0
 
        iitlbt          pte,prot
+       dbit_unlock1    spc,t0
 
        rfir
        nop
@@ -1507,29 +1568,13 @@ dbit_trap_20w:
 
        L3_ptep         ptp,pte,t0,va,dbit_fault
 
-#ifdef CONFIG_SMP
-       cmpib,COND(=),n        0,spc,dbit_nolock_20w
-       load32          PA(pa_dbit_lock),t0
-
-dbit_spin_20w:
-       LDCW            0(t0),t1
-       cmpib,COND(=)         0,t1,dbit_spin_20w
-       nop
-
-dbit_nolock_20w:
-#endif
-       update_dirty    ptp,pte,t1
+       dbit_lock       spc,t0,t1
+       update_dirty    spc,ptp,pte,t1
 
        make_insert_tlb spc,pte,prot
                
        idtlbt          pte,prot
-#ifdef CONFIG_SMP
-       cmpib,COND(=),n        0,spc,dbit_nounlock_20w
-       ldi             1,t1
-       stw             t1,0(t0)
-
-dbit_nounlock_20w:
-#endif
+       dbit_unlock0    spc,t0
 
        rfir
        nop
@@ -1543,18 +1588,8 @@ dbit_trap_11:
 
        L2_ptep         ptp,pte,t0,va,dbit_fault
 
-#ifdef CONFIG_SMP
-       cmpib,COND(=),n        0,spc,dbit_nolock_11
-       load32          PA(pa_dbit_lock),t0
-
-dbit_spin_11:
-       LDCW            0(t0),t1
-       cmpib,=         0,t1,dbit_spin_11
-       nop
-
-dbit_nolock_11:
-#endif
-       update_dirty    ptp,pte,t1
+       dbit_lock       spc,t0,t1
+       update_dirty    spc,ptp,pte,t1
 
        make_insert_tlb_11      spc,pte,prot
 
@@ -1565,13 +1600,7 @@ dbit_nolock_11:
        idtlbp          prot,(%sr1,va)
 
        mtsp            t1, %sr1     /* Restore sr1 */
-#ifdef CONFIG_SMP
-       cmpib,COND(=),n        0,spc,dbit_nounlock_11
-       ldi             1,t1
-       stw             t1,0(t0)
-
-dbit_nounlock_11:
-#endif
+       dbit_unlock0    spc,t0
 
        rfir
        nop
@@ -1583,32 +1612,15 @@ dbit_trap_20:
 
        L2_ptep         ptp,pte,t0,va,dbit_fault
 
-#ifdef CONFIG_SMP
-       cmpib,COND(=),n        0,spc,dbit_nolock_20
-       load32          PA(pa_dbit_lock),t0
-
-dbit_spin_20:
-       LDCW            0(t0),t1
-       cmpib,=         0,t1,dbit_spin_20
-       nop
-
-dbit_nolock_20:
-#endif
-       update_dirty    ptp,pte,t1
+       dbit_lock       spc,t0,t1
+       update_dirty    spc,ptp,pte,t1
 
        make_insert_tlb spc,pte,prot
 
        f_extend        pte,t1
        
         idtlbt          pte,prot
-
-#ifdef CONFIG_SMP
-       cmpib,COND(=),n        0,spc,dbit_nounlock_20
-       ldi             1,t1
-       stw             t1,0(t0)
-
-dbit_nounlock_20:
-#endif
+       dbit_unlock0    spc,t0
 
        rfir
        nop
index f7752f6af29e090e559d74e0d8d1206bd50a72ef..9e2d2e408529f744b7bf5b7830cec15ad48a6012 100644 (file)
@@ -222,6 +222,7 @@ static struct hp_hardware hp_hardware_list[] = {
        {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
        {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
        {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
+       {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+? (rp5470)"},
        {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
        {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
        {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
index e255db0bb7619cf92e8581cae017f01f20c4718d..2e6443b1e9228426ba94d8602c8956c5daec93c2 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
-#include <linux/spinlock.h>
 #include <linux/types.h>
 #include <asm/io.h>
 
 #include <asm/smp.h>
+#include <asm/ldcw.h>
 
 #undef PARISC_IRQ_CR16_COUNTS
 
@@ -166,22 +166,36 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        seq_printf(p, "%*s: ", prec, "STK");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
-       seq_printf(p, "  Kernel stack usage\n");
+       seq_puts(p, "  Kernel stack usage\n");
+# ifdef CONFIG_IRQSTACKS
+       seq_printf(p, "%*s: ", prec, "IST");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
+       seq_puts(p, "  Interrupt stack usage\n");
+# endif
 #endif
 #ifdef CONFIG_SMP
        seq_printf(p, "%*s: ", prec, "RES");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
-       seq_printf(p, "  Rescheduling interrupts\n");
+       seq_puts(p, "  Rescheduling interrupts\n");
        seq_printf(p, "%*s: ", prec, "CAL");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
-       seq_printf(p, "  Function call interrupts\n");
+       seq_puts(p, "  Function call interrupts\n");
 #endif
+       seq_printf(p, "%*s: ", prec, "UAH");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
+       seq_puts(p, "  Unaligned access handler traps\n");
+       seq_printf(p, "%*s: ", prec, "FPA");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
+       seq_puts(p, "  Floating point assist traps\n");
        seq_printf(p, "%*s: ", prec, "TLB");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
-       seq_printf(p, "  TLB shootdowns\n");
+       seq_puts(p, "  TLB shootdowns\n");
        return 0;
 }
 
@@ -366,6 +380,24 @@ static inline int eirr_to_irq(unsigned long eirr)
        return (BITS_PER_LONG - bit) + TIMER_IRQ;
 }
 
+#ifdef CONFIG_IRQSTACKS
+/*
+ * IRQ STACK - used for irq handler
+ */
+#define IRQ_STACK_SIZE      (4096 << 2) /* 16k irq stack size */
+
+union irq_stack_union {
+       unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
+       volatile unsigned int slock[4];
+       volatile unsigned int lock[1];
+};
+
+DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+               .slock = { 1,1,1,1 },
+       };
+#endif
+
+
 int sysctl_panic_on_stackoverflow = 1;
 
 static inline void stack_overflow_check(struct pt_regs *regs)
@@ -378,6 +410,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
        unsigned long sp = regs->gr[30];
        unsigned long stack_usage;
        unsigned int *last_usage;
+       int cpu = smp_processor_id();
 
        /* if sr7 != 0, we interrupted a userspace process which we do not want
         * to check for stack overflow. We will only check the kernel stack. */
@@ -386,7 +419,31 @@ static inline void stack_overflow_check(struct pt_regs *regs)
 
        /* calculate kernel stack usage */
        stack_usage = sp - stack_start;
-       last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id());
+#ifdef CONFIG_IRQSTACKS
+       if (likely(stack_usage <= THREAD_SIZE))
+               goto check_kernel_stack; /* found kernel stack */
+
+       /* check irq stack usage */
+       stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
+       stack_usage = sp - stack_start;
+
+       last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
+       if (unlikely(stack_usage > *last_usage))
+               *last_usage = stack_usage;
+
+       if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
+               return;
+
+       pr_emerg("stackcheck: %s will most likely overflow irq stack "
+                "(sp:%lx, stk bottom-top:%lx-%lx)\n",
+               current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
+       goto panic_check;
+
+check_kernel_stack:
+#endif
+
+       /* check kernel stack usage */
+       last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
 
        if (unlikely(stack_usage > *last_usage))
                *last_usage = stack_usage;
@@ -398,31 +455,66 @@ static inline void stack_overflow_check(struct pt_regs *regs)
                 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
                current->comm, sp, stack_start, stack_start + THREAD_SIZE);
 
+#ifdef CONFIG_IRQSTACKS
+panic_check:
+#endif
        if (sysctl_panic_on_stackoverflow)
                panic("low stack detected by irq handler - check messages\n");
 #endif
 }
 
 #ifdef CONFIG_IRQSTACKS
-DEFINE_PER_CPU(union irq_stack_union, irq_stack_union);
+/* in entry.S: */
+void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
 
 static void execute_on_irq_stack(void *func, unsigned long param1)
 {
-       unsigned long *irq_stack_start;
+       union irq_stack_union *union_ptr;
        unsigned long irq_stack;
-       int cpu = smp_processor_id();
+       volatile unsigned int *irq_stack_in_use;
+
+       union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
+       irq_stack = (unsigned long) &union_ptr->stack;
+       irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
+                        64); /* align for stack frame usage */
 
-       irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0];
-       irq_stack = (unsigned long) irq_stack_start;
-       irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */
+       /* We may be called recursive. If we are already using the irq stack,
+        * just continue to use it. Use spinlocks to serialize
+        * the irq stack usage.
+        */
+       irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
+       if (!__ldcw(irq_stack_in_use)) {
+               void (*direct_call)(unsigned long p1) = func;
 
-       BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */
-       *irq_stack_start = 1;
+               /* We are using the IRQ stack already.
+                * Do direct call on current stack. */
+               direct_call(param1);
+               return;
+       }
 
        /* This is where we switch to the IRQ stack. */
        call_on_stack(param1, func, irq_stack);
 
-       *irq_stack_start = 0;
+       /* free up irq stack usage. */
+       *irq_stack_in_use = 1;
+}
+
+asmlinkage void do_softirq(void)
+{
+       __u32 pending;
+       unsigned long flags;
+
+       if (in_interrupt())
+               return;
+
+       local_irq_save(flags);
+
+       pending = local_softirq_pending();
+
+       if (pending)
+               execute_on_irq_stack(__do_softirq, 0);
+
+       local_irq_restore(flags);
 }
 #endif /* CONFIG_IRQSTACKS */
 
index 5e1de6072be57f0c92ce04950284fe7f10c5bf4c..36d7f402e48edb8b8cb13dea9e302d1e84edfdf9 100644 (file)
@@ -605,14 +605,14 @@ ENTRY(copy_user_page_asm)
        convert_phys_for_tlb_insert20 %r26      /* convert phys addr to tlb insert format */
        convert_phys_for_tlb_insert20 %r23      /* convert phys addr to tlb insert format */
        depd            %r24,63,22, %r28        /* Form aliased virtual address 'to' */
-       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
+       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
        copy            %r28, %r29
        depdi           1, 41,1, %r29           /* Form aliased virtual address 'from' */
 #else
        extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
        extrw,u         %r23, 24,25, %r23       /* convert phys addr to tlb insert format */
        depw            %r24, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,12, %r28          /* Clear any offset bits */
+       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
        copy            %r28, %r29
        depwi           1, 9,1, %r29            /* Form aliased virtual address 'from' */
 #endif
@@ -762,7 +762,7 @@ ENTRY(clear_user_page_asm)
 #else
        extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
        depw            %r25, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,12, %r28          /* Clear any offset bits */
+       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
 #endif
 
        /* Purge any old translation */
@@ -846,7 +846,7 @@ ENTRY(flush_dcache_page_asm)
 #else
        extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
        depw            %r25, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,12, %r28          /* Clear any offset bits */
+       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
 #endif
 
        /* Purge any old translation */
@@ -918,11 +918,11 @@ ENTRY(flush_icache_page_asm)
 #endif
        convert_phys_for_tlb_insert20 %r26      /* convert phys addr to tlb insert format */
        depd            %r25, 63,22, %r28       /* Form aliased virtual address 'to' */
-       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
+       depdi           0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
 #else
        extrw,u         %r26, 24,25, %r26       /* convert phys addr to tlb insert format */
        depw            %r25, 31,22, %r28       /* Form aliased virtual address 'to' */
-       depwi           0, 31,12, %r28          /* Clear any offset bits */
+       depwi           0, 31,PAGE_SHIFT, %r28  /* Clear any offset bits */
 #endif
 
        /* Purge any old translation */
index 76b63e726a539ee912bea1077d00370603e1f538..1e95b2000ce85650903e85a705206ed113540670 100644 (file)
@@ -69,7 +69,8 @@ void __init setup_cmdline(char **cmdline_p)
                /* called from hpux boot loader */
                boot_command_line[0] = '\0';
        } else {
-               strcpy(boot_command_line, (char *)__va(boot_args[1]));
+               strlcpy(boot_command_line, (char *)__va(boot_args[1]),
+                       COMMAND_LINE_SIZE);
 
 #ifdef CONFIG_BLK_DEV_INITRD
                if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
index f517e08e7f0d4727b771db06429875b140180722..a134ff4da12ee1e4ad39a78e514328f578573ca3 100644 (file)
@@ -59,11 +59,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
        current->comm, current->pid, r20);
     return -ENOSYS;
 }
-
-asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
-                                        u32 mask_lo, int fd,
-                                        const char __user *pathname)
-{
-       return sys_fanotify_mark(fan_fd, flags, ((u64)mask_hi << 32) | mask_lo,
-                                fd, pathname);
-}
index fe41a98043bbcf287e3ee3562972d6b693e140d0..04e47c6a45626347aa261d3725005cdafb9385ad 100644 (file)
@@ -646,6 +646,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
        case 14:
                /* Assist Exception Trap, i.e. floating point exception. */
                die_if_kernel("Floating point exception", regs, 0); /* quiet */
+               __inc_irq_stat(irq_fpassist_count);
                handle_fpe(regs);
                return;
 
index 234e3682cf0900ef0e55e18e168d1ad5324afd5f..d7c0acb35ec248c51329a5cb4594189bd5f5040f 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/signal.h>
 #include <linux/ratelimit.h>
 #include <asm/uaccess.h>
+#include <asm/hardirq.h>
 
 /* #define DEBUG_UNALIGNED 1 */
 
@@ -454,6 +455,8 @@ void handle_unaligned(struct pt_regs *regs)
        struct siginfo si;
        register int flop=0;    /* true if this is a flop */
 
+       __inc_irq_stat(irq_unaligned_count);
+
        /* log a message with pacing */
        if (user_mode(regs)) {
                if (current->thread.flags & PARISC_UAC_SIGBUS) {
index ce939ac8622b84b7278f9e979e68a56354267adf..1c965642068b48d6b7102a5b6adb365f321f73e6 100644 (file)
@@ -1069,7 +1069,7 @@ void flush_tlb_all(void)
 {
        int do_recycle;
 
-       inc_irq_stat(irq_tlb_count);
+       __inc_irq_stat(irq_tlb_count);
        do_recycle = 0;
        spin_lock(&sid_lock);
        if (dirty_space_ids > RECYCLE_THRESHOLD) {
@@ -1090,7 +1090,7 @@ void flush_tlb_all(void)
 #else
 void flush_tlb_all(void)
 {
-       inc_irq_stat(irq_tlb_count);
+       __inc_irq_stat(irq_tlb_count);
        spin_lock(&sid_lock);
        flush_tlb_all_local(NULL);
        recycle_sids();
index 5416e28a753871ec02c75503e9583e8ff5abfbe9..863d877e0b5f7444f31239a2412bdec68165abea 100644 (file)
@@ -262,8 +262,31 @@ config PPC_EARLY_DEBUG_OPAL_HVSI
          Select this to enable early debugging for the PowerNV platform
          using an "hvsi" console
 
+config PPC_EARLY_DEBUG_MEMCONS
+       bool "In memory console"
+       help
+         Select this to enable early debugging using an in memory console.
+         This console provides input and output buffers stored within the
+         kernel BSS and should be safe to select on any system. A debugger
+         can then be used to read kernel output or send input to the console.
 endchoice
 
+config PPC_MEMCONS_OUTPUT_SIZE
+       int "In memory console output buffer size"
+       depends on PPC_EARLY_DEBUG_MEMCONS
+       default 4096
+       help
+         Selects the size of the output buffer (in bytes) of the in memory
+         console.
+
+config PPC_MEMCONS_INPUT_SIZE
+       int "In memory console input buffer size"
+       depends on PPC_EARLY_DEBUG_MEMCONS
+       default 128
+       help
+         Selects the size of the input buffer (in bytes) of the in memory
+         console.
+
 config PPC_EARLY_DEBUG_OPAL
        def_bool y
        depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI
index f79196232917140f0afef3085f53d63e32f5e5d7..139a8308070c8fee09665b81d168ef092e584d3e 100644 (file)
@@ -136,7 +136,6 @@ CONFIG_HID_SMARTJOYPLUS=m
 CONFIG_USB_HIDDEV=y
 CONFIG_USB=m
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_SUSPEND=y
 CONFIG_USB_MON=m
 CONFIG_USB_EHCI_HCD=m
 # CONFIG_USB_EHCI_HCD_PPC_OF is not set
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h
new file mode 100644 (file)
index 0000000..b6f5a33
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
+#define _ASM_POWERPC_CONTEXT_TRACKING_H
+
+#ifdef CONFIG_CONTEXT_TRACKING
+#define SCHEDULE_USER bl       .schedule_user
+#else
+#define SCHEDULE_USER bl       .schedule
+#endif
+
+#endif
index 0df54646f9689d867ff86d3b8f515af2dfa1bfe6..681bc0314b6bb999070d59a3f86955d2bf42dd15 100644 (file)
@@ -52,6 +52,7 @@
 #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
 #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
 #define FW_FEATURE_PRRN                ASM_CONST(0x0000000200000000)
+#define FW_FEATURE_OPALv3      ASM_CONST(0x0000000400000000)
 
 #ifndef __ASSEMBLY__
 
@@ -69,7 +70,8 @@ enum {
                FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
                FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
        FW_FEATURE_PSERIES_ALWAYS = 0,
-       FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
+       FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
+               FW_FEATURE_OPALv3,
        FW_FEATURE_POWERNV_ALWAYS = 0,
        FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
        FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
index cf4df8e2139af0a1a0c083ba3ad2618b2efc57c4..0c7f2bfcf1348100fb10c4cd6f74dcee34e42756 100644 (file)
 #define H_GET_MPP              0x2D4
 #define H_HOME_NODE_ASSOCIATIVITY 0x2EC
 #define H_BEST_ENERGY          0x2F4
+#define H_XIRR_X               0x2FC
 #define H_RANDOM               0x300
 #define H_COP                  0x304
 #define H_GET_MPP_X            0x314
index d615b28dda82ff7e9afed97ddf51815f5d170cca..ba713f166fa57fb2f9b08002a8a5cf8c0162d196 100644 (file)
@@ -96,11 +96,12 @@ static inline bool arch_irqs_disabled(void)
 #endif
 
 #define hard_irq_disable()     do {                    \
+       u8 _was_enabled = get_paca()->soft_enabled;     \
        __hard_irq_disable();                           \
-       if (local_paca->soft_enabled)                   \
-               trace_hardirqs_off();                   \
        get_paca()->soft_enabled = 0;                   \
        get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;  \
+       if (_was_enabled)                               \
+               trace_hardirqs_off();                   \
 } while(0)
 
 static inline bool lazy_irq_pending(void)
index b6c8b58b1d764c5d1d8255fbc62df86b6b88c83e..cbb9305ab15affb2035f723be351e3a7431e2955 100644 (file)
@@ -243,7 +243,8 @@ enum OpalMCE_TlbErrorType {
 
 enum OpalThreadStatus {
        OPAL_THREAD_INACTIVE = 0x0,
-       OPAL_THREAD_STARTED = 0x1
+       OPAL_THREAD_STARTED = 0x1,
+       OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
 };
 
 enum OpalPciBusCompare {
@@ -563,6 +564,8 @@ extern void opal_nvram_init(void);
 
 extern int opal_machine_check(struct pt_regs *regs);
 
+extern void opal_shutdown(void);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __OPAL_H */
index 8b11b5bd9938b847a5439eb5e13851653af9fe25..2c1d8cb9b26562a295ab6b6016261b0d692df30b 100644 (file)
@@ -174,6 +174,8 @@ struct pci_dn {
 /* Get the pointer to a device_node's pci_dn */
 #define PCI_DN(dn)     ((struct pci_dn *) (dn)->data)
 
+extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
+
 extern void * update_dn_pci_info(struct device_node *dn, void *data);
 
 static inline int pci_device_from_OF_node(struct device_node *np,
index 91acb12bac9280a2a8068cc9cd46176f5cbd1243..b66ae722a8e9c2d9aacb9ef7eb49e553d9ad9dde 100644 (file)
@@ -186,7 +186,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 
 static inline pgtable_t pmd_pgtable(pmd_t pmd)
 {
-       return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE);
+       return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
 }
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
index cea8496091ffbd1cb2437061cb1f7772f3b8bbc3..2f1b6c5f8174f4a4759086f21668dda5c8477b1d 100644 (file)
@@ -523,6 +523,17 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
 #define PPC440EP_ERR42
 #endif
 
+/* The following stops all load and store data streams associated with stream
+ * ID (ie. streams created explicitly).  The embedded and server mnemonics for
+ * dcbt are different so we use machine "power4" here explicitly.
+ */
+#define DCBT_STOP_ALL_STREAM_IDS(scratch)      \
+.machine push ;                                        \
+.machine "power4" ;                            \
+       lis     scratch,0x60000000@h;           \
+       dcbt    r0,scratch,0b01010;             \
+.machine pop
+
 /*
  * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
  * keep the address intact to be compatible with code shared with
index d7e67ca8b4a6fb48f26b7a778be4db8873862012..14a658363698ee1d58e0b400b33b3bbc13475dda 100644 (file)
@@ -284,6 +284,12 @@ struct thread_struct {
        unsigned long   ebbrr;
        unsigned long   ebbhr;
        unsigned long   bescr;
+       unsigned long   siar;
+       unsigned long   sdar;
+       unsigned long   sier;
+       unsigned long   mmcr0;
+       unsigned long   mmcr2;
+       unsigned long   mmcra;
 #endif
 };
 
@@ -403,21 +409,16 @@ static inline void prefetchw(const void *x)
 #endif
 
 #ifdef CONFIG_PPC64
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
 {
-       unsigned long sp;
-
        if (is_32)
-               sp = regs->gpr[1] & 0x0ffffffffUL;
-       else
-               sp = regs->gpr[1];
-
+               return sp & 0x0ffffffffUL;
        return sp;
 }
 #else
-static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
 {
-       return regs->gpr[1];
+       return sp;
 }
 #endif
 
index 3e13e23e4fdf8a4e3fc84e07ef091cffdfc0492f..d836d945068d032cb072013cf027f7a43e36b88a 100644 (file)
@@ -47,7 +47,7 @@
  * generic accessors and iterators here
  */
 #define __real_pte(e,p)        ((real_pte_t) { \
-                       (e), ((e) & _PAGE_COMBO) ? \
+                       (e), (pte_val(e) & _PAGE_COMBO) ? \
                                (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
 #define __rpte_to_hidx(r,index)        ((pte_val((r).pte) & _PAGE_COMBO) ? \
         (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
index a6136515c7f2952d82843eb47586901b3747f22f..4a9e408644fe6ae0403d49821d48516092e51640 100644 (file)
 #define MSR_TM_TRANSACTIONAL(x)        (((x) & MSR_TS_MASK) == MSR_TS_T)
 #define MSR_TM_SUSPENDED(x)    (((x) & MSR_TS_MASK) == MSR_TS_S)
 
-/* Reason codes describing kernel causes for transaction aborts.  By
-   convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
-   the failure is persistent.
-*/
-#define TM_CAUSE_RESCHED       0xfe
-#define TM_CAUSE_TLBI          0xfc
-#define TM_CAUSE_FAC_UNAV      0xfa
-#define TM_CAUSE_SYSCALL       0xf9 /* Persistent */
-#define TM_CAUSE_MISC          0xf6
-#define TM_CAUSE_SIGNAL                0xf4
-
 #if defined(CONFIG_PPC_BOOK3S_64)
 #define MSR_64BIT      MSR_SF
 
index a8bc2bb4adc97147b8a4d95b07ea54bef1016567..34fd70488d83f09ebb947680763ddbcf4ce62fad 100644 (file)
@@ -264,6 +264,8 @@ extern void rtas_progress(char *s, unsigned short hex);
 extern void rtas_initialize(void);
 extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
 extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
+extern int rtas_online_cpus_mask(cpumask_var_t cpus);
+extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
 extern int rtas_ibm_suspend_me(struct rtas_args *);
 
 struct rtc_time;
index fbe66c463891417b846864077732eda44eed68f6..9322c28aebd2b89ac193c8470ca41c33b62730e3 100644 (file)
@@ -3,5 +3,8 @@
 
 #define __ARCH_HAS_SA_RESTORER
 #include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+extern unsigned long get_tm_stackpointer(struct pt_regs *regs);
 
 #endif /* _ASM_POWERPC_SIGNAL_H */
index 8ceea14d6fe44a20d0d807e28a4ca63477f18863..ba7b1973866e1933486f47ed59e3aa0efe799f34 100644 (file)
@@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_PERFMON_CTXSW      6       /* perfmon needs ctxsw calls */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_SINGLESTEP         8       /* singlestepping active */
-#define TIF_MEMDIE             9       /* is terminating due to OOM killer */
+#define TIF_NOHZ               9       /* in adaptive nohz mode */
 #define TIF_SECCOMP            10      /* secure computing */
 #define TIF_RESTOREALL         11      /* Restore all regs (implies NOERROR) */
 #define TIF_NOERROR            12      /* Force successful syscall return */
@@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_SYSCALL_TRACEPOINT 15      /* syscall tracepoint instrumentation */
 #define TIF_EMULATE_STACK_STORE        16      /* Is an instruction emulation
                                                for stack store? */
+#define TIF_MEMDIE             17      /* is terminating due to OOM killer */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
@@ -124,8 +125,10 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_UPROBE            (1<<TIF_UPROBE)
 #define _TIF_SYSCALL_TRACEPOINT        (1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_EMULATE_STACK_STORE       (1<<TIF_EMULATE_STACK_STORE)
+#define _TIF_NOHZ              (1<<TIF_NOHZ)
 #define _TIF_SYSCALL_T_OR_A    (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-                                _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
+                                _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+                                _TIF_NOHZ)
 
 #define _TIF_USER_WORK_MASK    (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
                                 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
index 4b4449abf3f854d8a29a1bed14fb273bf94f612b..9dfbc34bdbf5e7e86eda005d4e974f5fe3ebd12a 100644 (file)
@@ -5,6 +5,8 @@
  * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
  */
 
+#include <uapi/asm/tm.h>
+
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 extern void do_load_up_transact_fpu(struct thread_struct *thread);
 extern void do_load_up_transact_altivec(struct thread_struct *thread);
index 5a7510e9d09d3865619b7a874efa7584074b5f7a..dc590919f8eb370d1568d54f7c002d58f63b2dad 100644 (file)
@@ -52,6 +52,7 @@ extern void __init udbg_init_40x_realmode(void);
 extern void __init udbg_init_cpm(void);
 extern void __init udbg_init_usbgecko(void);
 extern void __init udbg_init_wsp(void);
+extern void __init udbg_init_memcons(void);
 extern void __init udbg_init_ehv_bc(void);
 extern void __init udbg_init_ps3gelic(void);
 extern void __init udbg_init_debug_opal_raw(void);
index f7bca6370745b49133a771d9aff43a742116b229..5182c8622b54eea94ba18cfad7236872ae056865 100644 (file)
@@ -40,6 +40,7 @@ header-y += statfs.h
 header-y += swab.h
 header-y += termbits.h
 header-y += termios.h
+header-y += tm.h
 header-y += types.h
 header-y += ucontext.h
 header-y += unistd.h
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
new file mode 100644 (file)
index 0000000..85059a0
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef _ASM_POWERPC_TM_H
+#define _ASM_POWERPC_TM_H
+
+/* Reason codes describing kernel causes for transaction aborts.  By
+ * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
+ * the failure is persistent.  PAPR saves 0xff-0xe0 for the hypervisor.
+ */
+#define TM_CAUSE_PERSISTENT    0x01
+#define TM_CAUSE_RESCHED       0xde
+#define TM_CAUSE_TLBI          0xdc
+#define TM_CAUSE_FAC_UNAV      0xda
+#define TM_CAUSE_SYSCALL       0xd8  /* future use */
+#define TM_CAUSE_MISC          0xd6  /* future use */
+#define TM_CAUSE_SIGNAL                0xd4
+#define TM_CAUSE_ALIGNMENT     0xd2
+#define TM_CAUSE_EMULATE       0xd0
+
+#endif
index b51a97cfedf88ff2b7884ef19485ff081d18cf78..6f16ffafa6f01542d54ccc684c3f8d5d7d6add5c 100644 (file)
@@ -127,6 +127,12 @@ int main(void)
        DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr));
        DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr));
        DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr));
+       DEFINE(THREAD_SIAR, offsetof(struct thread_struct, siar));
+       DEFINE(THREAD_SDAR, offsetof(struct thread_struct, sdar));
+       DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier));
+       DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0));
+       DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2));
+       DEFINE(THREAD_MMCRA, offsetof(struct thread_struct, mmcra));
 #endif
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
        DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch));
index a283b6442b266544235e11bc061d33d58f321b05..18b5b9cf8e3730f4608ca6ff997810d5cc47df9a 100644 (file)
@@ -135,8 +135,12 @@ __init_HFSCR:
        blr
 
 __init_TLB:
-       /* Clear the TLB */
-       li      r6,128
+       /*
+        * Clear the TLB using the "IS 3" form of tlbiel instruction
+        * (invalidate by congruence class). P7 has 128 CCs, P8 has 512
+        * so we just always do 512
+        */
+       li      r6,512
        mtctr   r6
        li      r7,0xc00        /* IS field = 0b11 */
        ptesync
index c60bbec25c1fe98ca73581ef576d9785c2561a73..1f0937d7d4b546a1f8e90269075fec63a9b7df88 100644 (file)
@@ -453,7 +453,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .icache_bsize           = 128,
                .dcache_bsize           = 128,
                .oprofile_type          = PPC_OPROFILE_POWER4,
-               .oprofile_cpu_type      = "ppc64/ibm-compat-v1",
+               .oprofile_cpu_type      = 0,
                .cpu_setup              = __setup_cpu_power8,
                .cpu_restore            = __restore_cpu_power8,
                .platform               = "power8",
@@ -482,7 +482,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .cpu_name               = "POWER7+ (raw)",
                .cpu_features           = CPU_FTRS_POWER7,
                .cpu_user_features      = COMMON_USER_POWER7,
-               .cpu_user_features      = COMMON_USER2_POWER7,
+               .cpu_user_features2     = COMMON_USER2_POWER7,
                .mmu_features           = MMU_FTRS_POWER7,
                .icache_bsize           = 128,
                .dcache_bsize           = 128,
@@ -506,7 +506,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .dcache_bsize           = 128,
                .num_pmcs               = 6,
                .pmc_type               = PPC_PMC_IBM,
-               .oprofile_cpu_type      = "ppc64/power8",
+               .oprofile_cpu_type      = 0,
                .oprofile_type          = PPC_OPROFILE_POWER4,
                .cpu_setup              = __setup_cpu_power8,
                .cpu_restore            = __restore_cpu_power8,
index e514de57a125333a4ce174cb399070b6cc62e3b4..22b45a4955cd8f03a26c89a7fb2ab79252f2b6d4 100644 (file)
@@ -439,8 +439,6 @@ ret_from_fork:
 ret_from_kernel_thread:
        REST_NVGPRS(r1)
        bl      schedule_tail
-       li      r3,0
-       stw     r3,0(r1)
        mtlr    r14
        mr      r3,r15
        PPC440EP_ERR42
@@ -851,7 +849,7 @@ resume_kernel:
        /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
        CURRENT_THREAD_INFO(r9, r1)
        lwz     r8,TI_FLAGS(r9)
-       andis.  r8,r8,_TIF_EMULATE_STACK_STORE@h
+       andis.  r0,r8,_TIF_EMULATE_STACK_STORE@h
        beq+    1f
 
        addi    r8,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
index 3fe5259e2fea872be5da2e333427a935f8a012d0..246b11c4fe7eda803a4c076c078c5afdcff04590 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/irqflags.h>
 #include <asm/ftrace.h>
 #include <asm/hw_irq.h>
+#include <asm/context_tracking.h>
 
 /*
  * System calls.
@@ -150,7 +151,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
        CURRENT_THREAD_INFO(r11, r1)
        ld      r10,TI_FLAGS(r11)
        andi.   r11,r10,_TIF_SYSCALL_T_OR_A
-       bne-    syscall_dotrace
+       bne     syscall_dotrace
 .Lsyscall_dotrace_cont:
        cmpldi  0,r0,NR_syscalls
        bge-    syscall_enosys
@@ -376,8 +377,6 @@ _GLOBAL(ret_from_fork)
 _GLOBAL(ret_from_kernel_thread)
        bl      .schedule_tail
        REST_NVGPRS(r1)
-       li      r3,0
-       std     r3,0(r1)
        ld      r14, 0(r14)
        mtlr    r14
        mr      r3,r15
@@ -466,6 +465,20 @@ BEGIN_FTR_SECTION
        std     r0, THREAD_EBBHR(r3)
        mfspr   r0, SPRN_EBBRR
        std     r0, THREAD_EBBRR(r3)
+
+       /* PMU registers made user read/(write) by EBB */
+       mfspr   r0, SPRN_SIAR
+       std     r0, THREAD_SIAR(r3)
+       mfspr   r0, SPRN_SDAR
+       std     r0, THREAD_SDAR(r3)
+       mfspr   r0, SPRN_SIER
+       std     r0, THREAD_SIER(r3)
+       mfspr   r0, SPRN_MMCR0
+       std     r0, THREAD_MMCR0(r3)
+       mfspr   r0, SPRN_MMCR2
+       std     r0, THREAD_MMCR2(r3)
+       mfspr   r0, SPRN_MMCRA
+       std     r0, THREAD_MMCRA(r3)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 #endif
 
@@ -488,6 +501,13 @@ BEGIN_FTR_SECTION
        ldarx   r6,0,r1
 END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
 
+#ifdef CONFIG_PPC_BOOK3S
+/* Cancel all explict user streams as they will have no use after context
+ * switch and will stop the HW from creating streams itself
+ */
+       DCBT_STOP_ALL_STREAM_IDS(r6)
+#endif
+
        addi    r6,r4,-THREAD   /* Convert THREAD to 'current' */
        std     r6,PACACURRENT(r13)     /* Set new 'current' */
 
@@ -561,6 +581,20 @@ BEGIN_FTR_SECTION
        ld      r0, THREAD_EBBRR(r4)
        mtspr   SPRN_EBBRR, r0
 
+       /* PMU registers made user read/(write) by EBB */
+       ld      r0, THREAD_SIAR(r4)
+       mtspr   SPRN_SIAR, r0
+       ld      r0, THREAD_SDAR(r4)
+       mtspr   SPRN_SDAR, r0
+       ld      r0, THREAD_SIER(r4)
+       mtspr   SPRN_SIER, r0
+       ld      r0, THREAD_MMCR0(r4)
+       mtspr   SPRN_MMCR0, r0
+       ld      r0, THREAD_MMCR2(r4)
+       mtspr   SPRN_MMCR2, r0
+       ld      r0, THREAD_MMCRA(r4)
+       mtspr   SPRN_MMCRA, r0
+
        ld      r0,THREAD_TAR(r4)
        mtspr   SPRN_TAR,r0
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
@@ -634,7 +668,7 @@ _GLOBAL(ret_from_except_lite)
        andi.   r0,r4,_TIF_NEED_RESCHED
        beq     1f
        bl      .restore_interrupts
-       bl      .schedule
+       SCHEDULE_USER
        b       .ret_from_except_lite
 
 1:     bl      .save_nvgprs
index 42a756eec9ff7ea6a240bbef197e6519b1abd008..645170a07ada1da7abf4fddc814fcbaad9c071c9 100644 (file)
@@ -489,7 +489,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
         */
 
        mfspr   r14,SPRN_DBSR           /* check single-step/branch taken */
-       andis.  r15,r14,DBSR_IC@h
+       andis.  r15,r14,(DBSR_IC|DBSR_BT)@h
        beq+    1f
 
        LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
@@ -500,7 +500,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
        bge+    cr1,1f
 
        /* here it looks like we got an inappropriate debug exception. */
-       lis     r14,DBSR_IC@h           /* clear the IC event */
+       lis     r14,(DBSR_IC|DBSR_BT)@h         /* clear the event */
        rlwinm  r11,r11,0,~MSR_DE       /* clear DE in the CSRR1 value */
        mtspr   SPRN_DBSR,r14
        mtspr   SPRN_CSRR1,r11
@@ -555,7 +555,7 @@ kernel_dbg_exc:
         */
 
        mfspr   r14,SPRN_DBSR           /* check single-step/branch taken */
-       andis.  r15,r14,DBSR_IC@h
+       andis.  r15,r14,(DBSR_IC|DBSR_BT)@h
        beq+    1f
 
        LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
@@ -566,7 +566,7 @@ kernel_dbg_exc:
        bge+    cr1,1f
 
        /* here it looks like we got an inappropriate debug exception. */
-       lis     r14,DBSR_IC@h           /* clear the IC event */
+       lis     r14,(DBSR_IC|DBSR_BT)@h         /* clear the event */
        rlwinm  r11,r11,0,~MSR_DE       /* clear DE in the DSRR1 value */
        mtspr   SPRN_DBSR,r14
        mtspr   SPRN_DSRR1,r11
index 466a2908bb634506e034cf63ab1da77b2036d95c..611acdf30096643a1d94bc3111b685e28d290453 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/cpu.h>
+#include <linux/hardirq.h>
 
 #include <asm/page.h>
 #include <asm/current.h>
@@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage *image)
        pr_debug("kexec: Starting switchover sequence.\n");
 
        /* switch to a staticly allocated stack.  Based on irq stack code.
+        * We setup preempt_count to avoid using VMX in memcpy.
         * XXX: the task struct will likely be invalid once we do the copy!
         */
        kexec_stack.thread_info.task = current_thread_info()->task;
        kexec_stack.thread_info.flags = 0;
+       kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
+       kexec_stack.thread_info.cpu = current_thread_info()->cpu;
 
        /* We need a static PACA, too; copy this CPU's PACA over and switch to
         * it.  Also poison per_cpu_offset to catch anyone using non-static
index 19e096bd0e73d8b9a530159a4b9b440efc25403f..e469f30e6eeb88b4668e11a86d287e99b67aeae2 100644 (file)
@@ -657,6 +657,17 @@ _GLOBAL(__ucmpdi2)
        li      r3,2
        blr
 
+_GLOBAL(__bswapdi2)
+       rotlwi  r9,r4,8
+       rotlwi  r10,r3,8
+       rlwimi  r9,r4,24,0,7
+       rlwimi  r10,r3,24,0,7
+       rlwimi  r9,r4,24,16,23
+       rlwimi  r10,r3,24,16,23
+       mr      r3,r9
+       mr      r4,r10
+       blr
+
 _GLOBAL(abs)
        srawi   r4,r3,31
        xor     r3,r3,r4
index 5cfa8008693b83aaae96dd4f5d91a7e8190d234b..6820e45f557b73b848fda82c512332752b760dc2 100644 (file)
@@ -234,6 +234,17 @@ _GLOBAL(__flush_dcache_icache)
        isync
        blr
 
+_GLOBAL(__bswapdi2)
+       srdi    r8,r3,32
+       rlwinm  r7,r3,8,0xffffffff
+       rlwimi  r7,r3,24,0,7
+       rlwinm  r9,r8,8,0xffffffff
+       rlwimi  r7,r3,24,16,23
+       rlwimi  r9,r8,24,0,7
+       rlwimi  r9,r8,24,16,23
+       sldi    r7,r7,32
+       or      r3,r7,r9
+       blr
 
 #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
 /*
index f5c5c90799a763eb2a188316ea95e5d761ffe232..7f2273cc3c7d8a104100a9e3b47657b94fb23067 100644 (file)
@@ -359,7 +359,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
                                      enum pci_mmap_state mmap_state,
                                      int write_combine)
 {
-       unsigned long prot = pgprot_val(protection);
 
        /* Write combine is always 0 on non-memory space mappings. On
         * memory space, if the user didn't pass 1, we check for a
@@ -376,9 +375,9 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
 
        /* XXX would be nice to have a way to ask for write-through */
        if (write_combine)
-               return pgprot_noncached_wc(prot);
+               return pgprot_noncached_wc(protection);
        else
-               return pgprot_noncached(prot);
+               return pgprot_noncached(protection);
 }
 
 /*
@@ -658,15 +657,6 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
  *     ranges. However, some machines (thanks Apple !) tend to split their
  *     space into lots of small contiguous ranges. So we have to coalesce.
  *
- *   - We can only cope with all memory ranges having the same offset
- *     between CPU addresses and PCI addresses. Unfortunately, some bridges
- *     are setup for a large 1:1 mapping along with a small "window" which
- *     maps PCI address 0 to some arbitrary high address of the CPU space in
- *     order to give access to the ISA memory hole.
- *     The way out of here that I've chosen for now is to always set the
- *     offset based on the first resource found, then override it if we
- *     have a different offset and the previous was set by an ISA hole.
- *
  *   - Some busses have IO space not starting at 0, which causes trouble with
  *     the way we do our IO resource renumbering. The code somewhat deals with
  *     it for 64 bits but I would expect problems on 32 bits.
@@ -681,10 +671,9 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
        int rlen;
        int pna = of_n_addr_cells(dev);
        int np = pna + 5;
-       int memno = 0, isa_hole = -1;
+       int memno = 0;
        u32 pci_space;
        unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
-       unsigned long long isa_mb = 0;
        struct resource *res;
 
        printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
@@ -778,8 +767,6 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                        }
                        /* Handles ISA memory hole space here */
                        if (pci_addr == 0) {
-                               isa_mb = cpu_addr;
-                               isa_hole = memno;
                                if (primary || isa_mem_base == 0)
                                        isa_mem_base = cpu_addr;
                                hose->isa_mem_phys = cpu_addr;
@@ -1521,9 +1508,10 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
        for (i = 0; i < 3; ++i) {
                res = &hose->mem_resources[i];
                if (!res->flags) {
-                       printk(KERN_ERR "PCI: Memory resource 0 not set for "
-                              "host bridge %s (domain %d)\n",
-                              hose->dn->full_name, hose->global_number);
+                       if (i == 0)
+                               printk(KERN_ERR "PCI: Memory resource 0 not set for "
+                                      "host bridge %s (domain %d)\n",
+                                      hose->dn->full_name, hose->global_number);
                        continue;
                }
                offset = hose->mem_offset[i];
index 873050d268406e4a343d2e597c80ec7d99795552..2e8629654ca872443e89e30455b05f54c1850eb0 100644 (file)
@@ -266,3 +266,13 @@ int pcibus_to_node(struct pci_bus *bus)
 }
 EXPORT_SYMBOL(pcibus_to_node);
 #endif
+
+static void quirk_radeon_32bit_msi(struct pci_dev *dev)
+{
+       struct pci_dn *pdn = pci_get_pdn(dev);
+
+       if (pdn)
+               pdn->force_32bit_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
index e7af165f8b9d402add808fc5ce3760b9674d7c7a..df038442548a1397fb3cf5c9458fc39379a381a2 100644 (file)
 #include <asm/ppc-pci.h>
 #include <asm/firmware.h>
 
+struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
+{
+       struct device_node *dn = pci_device_to_OF_node(pdev);
+       if (!dn)
+               return NULL;
+       return PCI_DN(dn);
+}
+
 /*
  * Traverse_func that inits the PCI fields of the device node.
  * NOTE: this *must* be done before read/write config to the device.
index 78b8766fd79e4605f96a103c6562ff62caa37c5e..c29666586998f27dd0f4d5aa8db721320a82cc2d 100644 (file)
@@ -143,7 +143,8 @@ EXPORT_SYMBOL(__lshrdi3);
 int __ucmpdi2(unsigned long long, unsigned long long);
 EXPORT_SYMBOL(__ucmpdi2);
 #endif
-
+long long __bswapdi2(long long);
+EXPORT_SYMBOL(__bswapdi2);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memmove);
index ceb4e7b62cf441ed6237499d11ef15cc6e2e4d50..a902723fdc69b0857d3d8658e32790614983c626 100644 (file)
@@ -339,6 +339,13 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
 
 static void prime_debug_regs(struct thread_struct *thread)
 {
+       /*
+        * We could have inherited MSR_DE from userspace, since
+        * it doesn't get cleared on exception entry.  Make sure
+        * MSR_DE is clear before we enable any debug events.
+        */
+       mtmsr(mfmsr() & ~MSR_DE);
+
        mtspr(SPRN_IAC1, thread->iac1);
        mtspr(SPRN_IAC2, thread->iac2);
 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
@@ -971,6 +978,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
         * do some house keeping and then return from the fork or clone
         * system call, using the stack frame created above.
         */
+       ((unsigned long *)sp)[0] = 0;
        sp -= sizeof(struct pt_regs);
        kregs = (struct pt_regs *) sp;
        sp -= STACK_FRAME_OVERHEAD;
index 3b14d320e69f81737b447b59bcef4046edde0b5a..98c2fc198712aabe3f90055e24d653df604be78e 100644 (file)
@@ -32,6 +32,7 @@
 #include <trace/syscall.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/perf_event.h>
+#include <linux/context_tracking.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -1788,6 +1789,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
 {
        long ret = 0;
 
+       user_exit();
+
        secure_computing_strict(regs->gpr[0]);
 
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
@@ -1832,4 +1835,6 @@ void do_syscall_trace_leave(struct pt_regs *regs)
        step = test_thread_flag(TIF_SINGLESTEP);
        if (step || test_thread_flag(TIF_SYSCALL_TRACE))
                tracehook_report_syscall_exit(regs, step);
+
+       user_enter();
 }
index 1fd6e7b2f390b41dac642f58e781baf735c04be4..52add6f3e201e1c196ea493f40a8fccc2cc922bc 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/capability.h>
 #include <linux/delay.h>
+#include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/completion.h>
 #include <linux/cpumask.h>
@@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info)
        __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
 }
 
+enum rtas_cpu_state {
+       DOWN,
+       UP,
+};
+
+#ifndef CONFIG_SMP
+static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+                               cpumask_var_t cpus)
+{
+       if (!cpumask_empty(cpus)) {
+               cpumask_clear(cpus);
+               return -EINVAL;
+       } else
+               return 0;
+}
+#else
+/* On return cpumask will be altered to indicate CPUs changed.
+ * CPUs with states changed will be set in the mask,
+ * CPUs with status unchanged will be unset in the mask. */
+static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+                               cpumask_var_t cpus)
+{
+       int cpu;
+       int cpuret = 0;
+       int ret = 0;
+
+       if (cpumask_empty(cpus))
+               return 0;
+
+       for_each_cpu(cpu, cpus) {
+               switch (state) {
+               case DOWN:
+                       cpuret = cpu_down(cpu);
+                       break;
+               case UP:
+                       cpuret = cpu_up(cpu);
+                       break;
+               }
+               if (cpuret) {
+                       pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
+                                       __func__,
+                                       ((state == UP) ? "up" : "down"),
+                                       cpu, cpuret);
+                       if (!ret)
+                               ret = cpuret;
+                       if (state == UP) {
+                               /* clear bits for unchanged cpus, return */
+                               cpumask_shift_right(cpus, cpus, cpu);
+                               cpumask_shift_left(cpus, cpus, cpu);
+                               break;
+                       } else {
+                               /* clear bit for unchanged cpu, continue */
+                               cpumask_clear_cpu(cpu, cpus);
+                       }
+               }
+       }
+
+       return ret;
+}
+#endif
+
+int rtas_online_cpus_mask(cpumask_var_t cpus)
+{
+       int ret;
+
+       ret = rtas_cpu_state_change_mask(UP, cpus);
+
+       if (ret) {
+               cpumask_var_t tmp_mask;
+
+               if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
+                       return ret;
+
+               /* Use tmp_mask to preserve cpus mask from first failure */
+               cpumask_copy(tmp_mask, cpus);
+               rtas_offline_cpus_mask(tmp_mask);
+               free_cpumask_var(tmp_mask);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(rtas_online_cpus_mask);
+
+int rtas_offline_cpus_mask(cpumask_var_t cpus)
+{
+       return rtas_cpu_state_change_mask(DOWN, cpus);
+}
+EXPORT_SYMBOL(rtas_offline_cpus_mask);
+
 int rtas_ibm_suspend_me(struct rtas_args *args)
 {
        long state;
@@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
        unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
        struct rtas_suspend_me_data data;
        DECLARE_COMPLETION_ONSTACK(done);
+       cpumask_var_t offline_mask;
+       int cpuret;
 
        if (!rtas_service_present("ibm,suspend-me"))
                return -ENOSYS;
@@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
                return 0;
        }
 
+       if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+               return -ENOMEM;
+
        atomic_set(&data.working, 0);
        atomic_set(&data.done, 0);
        atomic_set(&data.error, 0);
        data.token = rtas_token("ibm,suspend-me");
        data.complete = &done;
+
+       /* All present CPUs must be online */
+       cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
+       cpuret = rtas_online_cpus_mask(offline_mask);
+       if (cpuret) {
+               pr_err("%s: Could not bring present CPUs online.\n", __func__);
+               atomic_set(&data.error, cpuret);
+               goto out;
+       }
+
        stop_topology_update();
 
        /* Call function on all CPUs.  One of us will make the
@@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
 
        start_topology_update();
 
+       /* Take down CPUs not online prior to suspend */
+       cpuret = rtas_offline_cpus_mask(offline_mask);
+       if (cpuret)
+               pr_warn("%s: Could not restore CPUs to offline state.\n",
+                               __func__);
+
+out:
+       free_cpumask_var(offline_mask);
        return atomic_read(&data.error);
 }
 #else /* CONFIG_PPC_PSERIES */
index 5b3022470126ce1787bb259c89b319d7f7e9770f..2f3cdb01506de3d7791712ecd6ffeaf1fcd36352 100644 (file)
@@ -89,6 +89,7 @@
 
 /* Array sizes */
 #define VALIDATE_BUF_SIZE 4096    
+#define VALIDATE_MSG_LEN  256
 #define RTAS_MSG_MAXLEN   64
 
 /* Quirk - RTAS requires 4k list length and block size */
@@ -466,7 +467,7 @@ static void validate_flash(struct rtas_validate_flash_t *args_buf)
 }
 
 static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, 
-                                  char *msg)
+                                  char *msg, int msglen)
 {
        int n;
 
@@ -474,7 +475,8 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
                n = sprintf(msg, "%d\n", args_buf->update_results);
                if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) ||
                    (args_buf->update_results == VALIDATE_TMP_UPDATE))
-                       n += sprintf(msg + n, "%s\n", args_buf->buf);
+                       n += snprintf(msg + n, msglen - n, "%s\n",
+                                       args_buf->buf);
        } else {
                n = sprintf(msg, "%d\n", args_buf->status);
        }
@@ -486,11 +488,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
 {
        struct rtas_validate_flash_t *const args_buf =
                &rtas_validate_flash_data;
-       char msg[RTAS_MSG_MAXLEN];
+       char msg[VALIDATE_MSG_LEN];
        int msglen;
 
        mutex_lock(&rtas_validate_flash_mutex);
-       msglen = get_validate_flash_msg(args_buf, msg);
+       msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN);
        mutex_unlock(&rtas_validate_flash_mutex);
 
        return simple_read_from_buffer(buf, count, ppos, msg, msglen);
index cf12eae02de5a694e2ba0e5298bfd8f082b688f9..457e97aa29455e6894257f443f1a223583483685 100644 (file)
 #include <linux/signal.h>
 #include <linux/uprobes.h>
 #include <linux/key.h>
+#include <linux/context_tracking.h>
 #include <asm/hw_breakpoint.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/debug.h>
+#include <asm/tm.h>
 
 #include "signal.h"
 
  * through debug.exception-trace sysctl.
  */
 
-int show_unhandled_signals = 0;
+int show_unhandled_signals = 1;
 
 /*
  * Allocate space for the signal frame
  */
-void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
                           size_t frame_size, int is_32)
 {
         unsigned long oldsp, newsp;
 
         /* Default to using normal stack */
-        oldsp = get_clean_sp(regs, is_32);
+        oldsp = get_clean_sp(sp, is_32);
 
        /* Check for alt stack */
        if ((ka->sa.sa_flags & SA_ONSTACK) &&
@@ -159,6 +161,8 @@ static int do_signal(struct pt_regs *regs)
 
 void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
 {
+       user_exit();
+
        if (thread_info_flags & _TIF_UPROBE)
                uprobe_notify_resume(regs);
 
@@ -169,4 +173,41 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
        }
+
+       user_enter();
+}
+
+unsigned long get_tm_stackpointer(struct pt_regs *regs)
+{
+       /* When in an active transaction that takes a signal, we need to be
+        * careful with the stack.  It's possible that the stack has moved back
+        * up after the tbegin.  The obvious case here is when the tbegin is
+        * called inside a function that returns before a tend.  In this case,
+        * the stack is part of the checkpointed transactional memory state.
+        * If we write over this non transactionally or in suspend, we are in
+        * trouble because if we get a tm abort, the program counter and stack
+        * pointer will be back at the tbegin but our in memory stack won't be
+        * valid anymore.
+        *
+        * To avoid this, when taking a signal in an active transaction, we
+        * need to use the stack pointer from the checkpointed state, rather
+        * than the speculated state.  This ensures that the signal context
+        * (written tm suspended) will be written below the stack required for
+        * the rollback.  The transaction is aborted becuase of the treclaim,
+        * so any memory written between the tbegin and the signal will be
+        * rolled back anyway.
+        *
+        * For signals taken in non-TM or suspended mode, we use the
+        * normal/non-checkpointed stack pointer.
+        */
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       if (MSR_TM_ACTIVE(regs->msr)) {
+               tm_enable();
+               tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL);
+               if (MSR_TM_TRANSACTIONAL(regs->msr))
+                       return current->thread.ckpt_regs.gpr[1];
+       }
+#endif
+       return regs->gpr[1];
 }
index ec84c901ceabb754e7e64b6efbf33fda04b7fe22..c69b9aeb9f236646c06ef3f2cfc320ecc00cdcd9 100644 (file)
@@ -12,7 +12,7 @@
 
 extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
 
-extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp,
                                  size_t frame_size, int is_32);
 
 extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
index 95068bf569adc17cf51d7e32bd650daaba234b17..201385c3a1ae186f9de8102d0f5a8d645c36a6d4 100644 (file)
@@ -503,12 +503,6 @@ static int save_tm_user_regs(struct pt_regs *regs,
 {
        unsigned long msr = regs->msr;
 
-       /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs,
-        * thread.transact_fpr[], thread.transact_vr[], etc.
-        */
-       tm_enable();
-       tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
-
        /* Make sure floating point registers are stored in regs */
        flush_fp_to_thread(current);
 
@@ -965,7 +959,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
 
        /* Set up Signal Frame */
        /* Put a Real Time Context onto stack */
-       rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
+       rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
        addr = rt_sf;
        if (unlikely(rt_sf == NULL))
                goto badframe;
@@ -1403,7 +1397,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
        unsigned long tramp;
 
        /* Set up Signal Frame */
-       frame = get_sigframe(ka, regs, sizeof(*frame), 1);
+       frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1);
        if (unlikely(frame == NULL))
                goto badframe;
        sc = (struct sigcontext __user *) &frame->sctx;
index c1794286098ca2f78c5e32f4c01b0891ca04fc78..345947367ec00a4fa440e162005864d9708eae6a 100644 (file)
@@ -154,11 +154,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
  * As above, but Transactional Memory is in use, so deliver sigcontexts
  * containing checkpointed and transactional register states.
  *
- * To do this, we treclaim to gather both sets of registers and set up the
- * 'normal' sigcontext registers with rolled-back register values such that a
- * simple signal handler sees a correct checkpointed register state.
- * If interested, a TM-aware sighandler can examine the transactional registers
- * in the 2nd sigcontext to determine the real origin of the signal.
+ * To do this, we treclaim (done before entering here) to gather both sets of
+ * registers and set up the 'normal' sigcontext registers with rolled-back
+ * register values such that a simple signal handler sees a correct
+ * checkpointed register state.  If interested, a TM-aware sighandler can
+ * examine the transactional registers in the 2nd sigcontext to determine the
+ * real origin of the signal.
  */
 static long setup_tm_sigcontexts(struct sigcontext __user *sc,
                                 struct sigcontext __user *tm_sc,
@@ -184,16 +185,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
 
        BUG_ON(!MSR_TM_ACTIVE(regs->msr));
 
-       /* tm_reclaim rolls back all reg states, saving checkpointed (older)
-        * GPRs to thread.ckpt_regs and (if used) FPRs to (newer)
-        * thread.transact_fp and/or VRs to (newer) thread.transact_vr.
-        * THEN we save out FP/VRs, if necessary, to the checkpointed (older)
-        * thread.fr[]/vr[]s.  The transactional (newer) GPRs are on the
-        * stack, in *regs.
-        */
-       tm_enable();
-       tm_reclaim(&current->thread, msr, TM_CAUSE_SIGNAL);
-
        flush_fp_to_thread(current);
 
 #ifdef CONFIG_ALTIVEC
@@ -711,7 +702,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
        unsigned long newsp = 0;
        long err = 0;
 
-       frame = get_sigframe(ka, regs, sizeof(*frame), 0);
+       frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0);
        if (unlikely(frame == NULL))
                goto badframe;
 
index cd6e19d263b32ca403f49fa4409ca3fab6f88411..8a285876aef8a8e667091b5560ec1aa8756f5bc7 100644 (file)
@@ -126,11 +126,3 @@ asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
 
        return sys_sync_file_range(fd, offset, nbytes, flags);
 }
-
-asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags,
-                                        unsigned mask_hi, unsigned mask_lo,
-                                        int dfd, const char __user *pathname)
-{
-       u64 mask = ((u64)mask_hi << 32) | mask_lo;
-       return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
-}
index 83efa2f7d9266ced97726aa6b26c1739076a6ec1..f18c79c324eff539a25f8a5f10d13566a68eb573 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/kdebug.h>
 #include <linux/debugfs.h>
 #include <linux/ratelimit.h>
+#include <linux/context_tracking.h>
 
 #include <asm/emulated_ops.h>
 #include <asm/pgtable.h>
@@ -52,6 +53,7 @@
 #ifdef CONFIG_PPC64
 #include <asm/firmware.h>
 #include <asm/processor.h>
+#include <asm/tm.h>
 #endif
 #include <asm/kexec.h>
 #include <asm/ppc-opcode.h>
@@ -667,6 +669,7 @@ int machine_check_generic(struct pt_regs *regs)
 
 void machine_check_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
        int recover = 0;
 
        __get_cpu_var(irq_stat).mce_exceptions++;
@@ -683,7 +686,7 @@ void machine_check_exception(struct pt_regs *regs)
                recover = cur_cpu_spec->machine_check(regs);
 
        if (recover > 0)
-               return;
+               goto bail;
 
 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
        /* the qspan pci read routines can cause machine checks -- Cort
@@ -693,20 +696,23 @@ void machine_check_exception(struct pt_regs *regs)
         * -- BenH
         */
        bad_page_fault(regs, regs->dar, SIGBUS);
-       return;
+       goto bail;
 #endif
 
        if (debugger_fault_handler(regs))
-               return;
+               goto bail;
 
        if (check_io_access(regs))
-               return;
+               goto bail;
 
        die("Machine check", regs, SIGBUS);
 
        /* Must die if the interrupt is not recoverable */
        if (!(regs->msr & MSR_RI))
                panic("Unrecoverable Machine check");
+
+bail:
+       exception_exit(prev_state);
 }
 
 void SMIException(struct pt_regs *regs)
@@ -716,20 +722,29 @@ void SMIException(struct pt_regs *regs)
 
 void unknown_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
+
        printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
               regs->nip, regs->msr, regs->trap);
 
        _exception(SIGTRAP, regs, 0, 0);
+
+       exception_exit(prev_state);
 }
 
 void instruction_breakpoint_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
+
        if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
                                        5, SIGTRAP) == NOTIFY_STOP)
-               return;
+               goto bail;
        if (debugger_iabr_match(regs))
-               return;
+               goto bail;
        _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
+
+bail:
+       exception_exit(prev_state);
 }
 
 void RunModeException(struct pt_regs *regs)
@@ -739,15 +754,20 @@ void RunModeException(struct pt_regs *regs)
 
 void __kprobes single_step_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
+
        clear_single_step(regs);
 
        if (notify_die(DIE_SSTEP, "single_step", regs, 5,
                                        5, SIGTRAP) == NOTIFY_STOP)
-               return;
+               goto bail;
        if (debugger_sstep(regs))
-               return;
+               goto bail;
 
        _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
+
+bail:
+       exception_exit(prev_state);
 }
 
 /*
@@ -913,6 +933,28 @@ static int emulate_isel(struct pt_regs *regs, u32 instword)
        return 0;
 }
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static inline bool tm_abort_check(struct pt_regs *regs, int cause)
+{
+        /* If we're emulating a load/store in an active transaction, we cannot
+         * emulate it as the kernel operates in transaction suspended context.
+         * We need to abort the transaction.  This creates a persistent TM
+         * abort so tell the user what caused it with a new code.
+        */
+       if (MSR_TM_TRANSACTIONAL(regs->msr)) {
+               tm_enable();
+               tm_abort(cause);
+               return true;
+       }
+       return false;
+}
+#else
+static inline bool tm_abort_check(struct pt_regs *regs, int reason)
+{
+       return false;
+}
+#endif
+
 static int emulate_instruction(struct pt_regs *regs)
 {
        u32 instword;
@@ -952,6 +994,9 @@ static int emulate_instruction(struct pt_regs *regs)
 
        /* Emulate load/store string insn. */
        if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
+               if (tm_abort_check(regs,
+                                  TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
+                       return -EINVAL;
                PPC_WARN_EMULATED(string, regs);
                return emulate_string_inst(regs, instword);
        }
@@ -1005,6 +1050,7 @@ int is_valid_bugaddr(unsigned long addr)
 
 void __kprobes program_check_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
        unsigned int reason = get_reason(regs);
        extern int do_mathemu(struct pt_regs *regs);
 
@@ -1014,26 +1060,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
        if (reason & REASON_FP) {
                /* IEEE FP exception */
                parse_fpe(regs);
-               return;
+               goto bail;
        }
        if (reason & REASON_TRAP) {
                /* Debugger is first in line to stop recursive faults in
                 * rcu_lock, notify_die, or atomic_notifier_call_chain */
                if (debugger_bpt(regs))
-                       return;
+                       goto bail;
 
                /* trap exception */
                if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
                                == NOTIFY_STOP)
-                       return;
+                       goto bail;
 
                if (!(regs->msr & MSR_PR) &&  /* not user-mode */
                    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
                        regs->nip += 4;
-                       return;
+                       goto bail;
                }
                _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
-               return;
+               goto bail;
        }
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
        if (reason & REASON_TM) {
@@ -1049,7 +1095,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
                if (!user_mode(regs) &&
                    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
                        regs->nip += 4;
-                       return;
+                       goto bail;
                }
                /* If usermode caused this, it's done something illegal and
                 * gets a SIGILL slap on the wrist.  We call it an illegal
@@ -1059,7 +1105,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
                 */
                if (user_mode(regs)) {
                        _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
-                       return;
+                       goto bail;
                } else {
                        printk(KERN_EMERG "Unexpected TM Bad Thing exception "
                               "at %lx (msr 0x%x)\n", regs->nip, reason);
@@ -1083,16 +1129,16 @@ void __kprobes program_check_exception(struct pt_regs *regs)
        switch (do_mathemu(regs)) {
        case 0:
                emulate_single_step(regs);
-               return;
+               goto bail;
        case 1: {
                        int code = 0;
                        code = __parse_fpscr(current->thread.fpscr.val);
                        _exception(SIGFPE, regs, code, regs->nip);
-                       return;
+                       goto bail;
                }
        case -EFAULT:
                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
-               return;
+               goto bail;
        }
        /* fall through on any other errors */
 #endif /* CONFIG_MATH_EMULATION */
@@ -1103,10 +1149,10 @@ void __kprobes program_check_exception(struct pt_regs *regs)
                case 0:
                        regs->nip += 4;
                        emulate_single_step(regs);
-                       return;
+                       goto bail;
                case -EFAULT:
                        _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
-                       return;
+                       goto bail;
                }
        }
 
@@ -1114,16 +1160,23 @@ void __kprobes program_check_exception(struct pt_regs *regs)
                _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
        else
                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+
+bail:
+       exception_exit(prev_state);
 }
 
 void alignment_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
        int sig, code, fixed = 0;
 
        /* We restore the interrupt state now */
        if (!arch_irq_disabled_regs(regs))
                local_irq_enable();
 
+       if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
+               goto bail;
+
        /* we don't implement logging of alignment exceptions */
        if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
                fixed = fix_alignment(regs);
@@ -1131,7 +1184,7 @@ void alignment_exception(struct pt_regs *regs)
        if (fixed == 1) {
                regs->nip += 4; /* skip over emulated instruction */
                emulate_single_step(regs);
-               return;
+               goto bail;
        }
 
        /* Operand address was bad */
@@ -1146,6 +1199,9 @@ void alignment_exception(struct pt_regs *regs)
                _exception(sig, regs, code, regs->dar);
        else
                bad_page_fault(regs, regs->dar, sig);
+
+bail:
+       exception_exit(prev_state);
 }
 
 void StackOverflow(struct pt_regs *regs)
@@ -1174,23 +1230,32 @@ void trace_syscall(struct pt_regs *regs)
 
 void kernel_fp_unavailable_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
+
        printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
                          "%lx at %lx\n", regs->trap, regs->nip);
        die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
+
+       exception_exit(prev_state);
 }
 
 void altivec_unavailable_exception(struct pt_regs *regs)
 {
+       enum ctx_state prev_state = exception_enter();
+
        if (user_mode(regs)) {
                /* A user program has executed an altivec instruction,
                   but this kernel doesn't support altivec. */
                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
-               return;
+               goto bail;
        }
 
        printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
                        "%lx at %lx\n", regs->trap, regs->nip);
        die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
+
+bail:
+       exception_exit(prev_state);
 }
 
 void vsx_unavailable_exception(struct pt_regs *regs)
index 13b867093499be5abccb014677371f8787390c7f..9d3fdcd66290b79c6200cd853a04eea73be1a3d0 100644 (file)
@@ -64,6 +64,9 @@ void __init udbg_early_init(void)
        udbg_init_usbgecko();
 #elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
        udbg_init_wsp();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
+       /* In memory console */
+       udbg_init_memcons();
 #elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC)
        udbg_init_ehv_bc();
 #elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC)
index 9de24f8e03c71b44e0407b65bbe137c3506650e3..550f5928b394f6cb4c1da71978031a1ce1f96b79 100644 (file)
@@ -562,6 +562,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        case H_CPPR:
        case H_EOI:
        case H_IPI:
+       case H_IPOLL:
+       case H_XIRR_X:
                if (kvmppc_xics_enabled(vcpu)) {
                        ret = kvmppc_xics_hcall(vcpu, req);
                        break;
index b24309c6c2d507d3f2c008358c3c0728d450a7cd..da0e0bc268bd4bce1322b696dd25972c24bc740e 100644 (file)
@@ -257,6 +257,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
        case H_CPPR:
        case H_EOI:
        case H_IPI:
+       case H_IPOLL:
+       case H_XIRR_X:
                if (kvmppc_xics_enabled(vcpu))
                        return kvmppc_h_pr_xics_hcall(vcpu, cmd);
                break;
index f7a103756618e530014d0dc03508a91ff00ded2b..94c1dd46b83d54e98a96c6af3b2e682072f027e6 100644 (file)
@@ -650,6 +650,23 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
        return H_SUCCESS;
 }
 
+static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
+{
+       union kvmppc_icp_state state;
+       struct kvmppc_icp *icp;
+
+       icp = vcpu->arch.icp;
+       if (icp->server_num != server) {
+               icp = kvmppc_xics_find_server(vcpu->kvm, server);
+               if (!icp)
+                       return H_PARAMETER;
+       }
+       state = ACCESS_ONCE(icp->state);
+       kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
+       kvmppc_set_gpr(vcpu, 5, state.mfrr);
+       return H_SUCCESS;
+}
+
 static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
 {
        union kvmppc_icp_state old_state, new_state;
@@ -787,6 +804,18 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
        if (!xics || !vcpu->arch.icp)
                return H_HARDWARE;
 
+       /* These requests don't have real-mode implementations at present */
+       switch (req) {
+       case H_XIRR_X:
+               res = kvmppc_h_xirr(vcpu);
+               kvmppc_set_gpr(vcpu, 4, res);
+               kvmppc_set_gpr(vcpu, 5, get_tb());
+               return rc;
+       case H_IPOLL:
+               rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
+               return rc;
+       }
+
        /* Check for real mode returning too hard */
        if (xics->real_mode)
                return kvmppc_xics_rm_complete(vcpu, req);
index 0ef75bf0695cee25185842625526e1d46a4e7026..395c594722a223e339944905ddeb675670c9085e 100644 (file)
@@ -28,13 +28,14 @@ _GLOBAL(copypage_power7)
         * aligned we don't need to clear the bottom 7 bits of either
         * address.
         */
-       ori     r9,r3,1         /* stream=1 */
+       ori     r9,r3,1         /* stream=1 => to */
 
 #ifdef CONFIG_PPC_64K_PAGES
-       lis     r7,0x0E01       /* depth=7, units=512 */
+       lis     r7,0x0E01       /* depth=7
+                                * units/cachelines=512 */
 #else
        lis     r7,0x0E00       /* depth=7 */
-       ori     r7,r7,0x1000    /* units=32 */
+       ori     r7,r7,0x1000    /* units/cachelines=32 */
 #endif
        ori     r10,r7,1        /* stream=1 */
 
@@ -43,12 +44,14 @@ _GLOBAL(copypage_power7)
 
 .machine push
 .machine "power4"
-       dcbt    r0,r4,0b01000
-       dcbt    r0,r7,0b01010
-       dcbtst  r0,r9,0b01000
-       dcbtst  r0,r10,0b01010
+       /* setup read stream 0  */
+       dcbt    r0,r4,0b01000   /* addr from */
+       dcbt    r0,r7,0b01010   /* length and depth from */
+       /* setup write stream 1 */
+       dcbtst  r0,r9,0b01000   /* addr to */
+       dcbtst  r0,r10,0b01010  /* length and depth to */
        eieio
-       dcbt    r0,r8,0b01010   /* GO */
+       dcbt    r0,r8,0b01010   /* all streams GO */
 .machine pop
 
 #ifdef CONFIG_ALTIVEC
index 0d24ff15f5f6fb197af85a4d9e537eb8880f099c..d1f11795a7ad64bd6bd05beb522e07e44eb46498 100644 (file)
@@ -318,12 +318,14 @@ err1;     stb     r0,0(r3)
 
 .machine push
 .machine "power4"
-       dcbt    r0,r6,0b01000
-       dcbt    r0,r7,0b01010
-       dcbtst  r0,r9,0b01000
-       dcbtst  r0,r10,0b01010
+       /* setup read stream 0 */
+       dcbt    r0,r6,0b01000   /* addr from */
+       dcbt    r0,r7,0b01010   /* length and depth from */
+       /* setup write stream 1 */
+       dcbtst  r0,r9,0b01000   /* addr to */
+       dcbtst  r0,r10,0b01010  /* length and depth to */
        eieio
-       dcbt    r0,r8,0b01010   /* GO */
+       dcbt    r0,r8,0b01010   /* all streams GO */
 .machine pop
 
        beq     cr1,.Lunwind_stack_nonvmx_copy
index 229951ffc35137beb826657c826813b9733eba9b..8726779e1409b5da36c1e1ea852276cf47d4252c 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/perf_event.h>
 #include <linux/magic.h>
 #include <linux/ratelimit.h>
+#include <linux/context_tracking.h>
 
 #include <asm/firmware.h>
 #include <asm/page.h>
@@ -196,6 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
 int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
                            unsigned long error_code)
 {
+       enum ctx_state prev_state = exception_enter();
        struct vm_area_struct * vma;
        struct mm_struct *mm = current->mm;
        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -204,6 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
        int trap = TRAP(regs);
        int is_exec = trap == 0x400;
        int fault;
+       int rc = 0;
 
 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
        /*
@@ -230,28 +233,30 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
         * look at it
         */
        if (error_code & ICSWX_DSI_UCT) {
-               int rc = acop_handle_fault(regs, address, error_code);
+               rc = acop_handle_fault(regs, address, error_code);
                if (rc)
-                       return rc;
+                       goto bail;
        }
 #endif /* CONFIG_PPC_ICSWX */
 
        if (notify_page_fault(regs))
-               return 0;
+               goto bail;
 
        if (unlikely(debugger_fault_handler(regs)))
-               return 0;
+               goto bail;
 
        /* On a kernel SLB miss we can only check for a valid exception entry */
-       if (!user_mode(regs) && (address >= TASK_SIZE))
-               return SIGSEGV;
+       if (!user_mode(regs) && (address >= TASK_SIZE)) {
+               rc = SIGSEGV;
+               goto bail;
+       }
 
 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
                             defined(CONFIG_PPC_BOOK3S_64))
        if (error_code & DSISR_DABRMATCH) {
                /* breakpoint match */
                do_break(regs, address, error_code);
-               return 0;
+               goto bail;
        }
 #endif
 
@@ -260,8 +265,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
                local_irq_enable();
 
        if (in_atomic() || mm == NULL) {
-               if (!user_mode(regs))
-                       return SIGSEGV;
+               if (!user_mode(regs)) {
+                       rc = SIGSEGV;
+                       goto bail;
+               }
                /* in_atomic() in user mode is really bad,
                   as is current->mm == NULL. */
                printk(KERN_EMERG "Page fault in user mode with "
@@ -417,9 +424,11 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
         */
        fault = handle_mm_fault(mm, vma, address, flags);
        if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
-               int rc = mm_fault_error(regs, address, fault);
+               rc = mm_fault_error(regs, address, fault);
                if (rc >= MM_FAULT_RETURN)
-                       return rc;
+                       goto bail;
+               else
+                       rc = 0;
        }
 
        /*
@@ -454,7 +463,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
        }
 
        up_read(&mm->mmap_sem);
-       return 0;
+       goto bail;
 
 bad_area:
        up_read(&mm->mmap_sem);
@@ -463,7 +472,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
        /* User mode accesses cause a SIGSEGV */
        if (user_mode(regs)) {
                _exception(SIGSEGV, regs, code, address);
-               return 0;
+               goto bail;
        }
 
        if (is_exec && (error_code & DSISR_PROTFAULT))
@@ -471,7 +480,11 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
                                   " page (%lx) - exploit attempt? (uid: %d)\n",
                                   address, from_kuid(&init_user_ns, current_uid()));
 
-       return SIGSEGV;
+       rc = SIGSEGV;
+
+bail:
+       exception_exit(prev_state);
+       return rc;
 
 }
 
index 6a2aead5b0e5e1e78c5c742c12a97a83755859bc..4c122c3f1623c7525e682338f555f73678263f72 100644 (file)
@@ -336,11 +336,18 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
 
        hpte_v = hptep->v;
        actual_psize = hpte_actual_psize(hptep, psize);
+       /*
+        * We need to invalidate the TLB always because hpte_remove doesn't do
+        * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
+        * random entry from it. When we do that we don't invalidate the TLB
+        * (hpte_remove) because we assume the old translation is still
+        * technically "valid".
+        */
        if (actual_psize < 0) {
-               native_unlock_hpte(hptep);
-               return -1;
+               actual_psize = psize;
+               ret = -1;
+               goto err_out;
        }
-       /* Even if we miss, we need to invalidate the TLB */
        if (!HPTE_V_COMPARE(hpte_v, want_v)) {
                DBG_LOW(" -> miss\n");
                ret = -1;
@@ -350,6 +357,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
                hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
                        (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
        }
+err_out:
        native_unlock_hpte(hptep);
 
        /* Ensure it is out of the tlb too. */
@@ -409,7 +417,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
        hptep = htab_address + slot;
        actual_psize = hpte_actual_psize(hptep, psize);
        if (actual_psize < 0)
-               return;
+               actual_psize = psize;
 
        /* Update the HPTE */
        hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
@@ -437,21 +445,27 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
        hpte_v = hptep->v;
 
        actual_psize = hpte_actual_psize(hptep, psize);
+       /*
+        * We need to invalidate the TLB always because hpte_remove doesn't do
+        * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
+        * random entry from it. When we do that we don't invalidate the TLB
+        * (hpte_remove) because we assume the old translation is still
+        * technically "valid".
+        */
        if (actual_psize < 0) {
+               actual_psize = psize;
                native_unlock_hpte(hptep);
-               local_irq_restore(flags);
-               return;
+               goto err_out;
        }
-       /* Even if we miss, we need to invalidate the TLB */
        if (!HPTE_V_COMPARE(hpte_v, want_v))
                native_unlock_hpte(hptep);
        else
                /* Invalidate the hpte. NOTE: this also unlocks it */
                hptep->v = 0;
 
+err_out:
        /* Invalidate the TLB */
        tlbie(vpn, psize, actual_psize, ssize, local);
-
        local_irq_restore(flags);
 }
 
index 88ac0eeaadde69bfeceb82d44527754638ed351b..e303a6d74e3a72ca2f1db230073f7ec9896a9895 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/init.h>
 #include <linux/signal.h>
 #include <linux/memblock.h>
+#include <linux/context_tracking.h>
 
 #include <asm/processor.h>
 #include <asm/pgtable.h>
@@ -954,6 +955,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
  */
 int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
 {
+       enum ctx_state prev_state = exception_enter();
        pgd_t *pgdir;
        unsigned long vsid;
        struct mm_struct *mm;
@@ -973,7 +975,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                mm = current->mm;
                if (! mm) {
                        DBG_LOW(" user region with no mm !\n");
-                       return 1;
+                       rc = 1;
+                       goto bail;
                }
                psize = get_slice_psize(mm, ea);
                ssize = user_segment_size(ea);
@@ -992,19 +995,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                /* Not a valid range
                 * Send the problem up to do_page_fault 
                 */
-               return 1;
+               rc = 1;
+               goto bail;
        }
        DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
 
        /* Bad address. */
        if (!vsid) {
                DBG_LOW("Bad address!\n");
-               return 1;
+               rc = 1;
+               goto bail;
        }
        /* Get pgdir */
        pgdir = mm->pgd;
-       if (pgdir == NULL)
-               return 1;
+       if (pgdir == NULL) {
+               rc = 1;
+               goto bail;
+       }
 
        /* Check CPU locality */
        tmp = cpumask_of(smp_processor_id());
@@ -1027,7 +1034,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
        ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
        if (ptep == NULL || !pte_present(*ptep)) {
                DBG_LOW(" no PTE !\n");
-               return 1;
+               rc = 1;
+               goto bail;
        }
 
        /* Add _PAGE_PRESENT to the required access perm */
@@ -1038,13 +1046,16 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
         */
        if (access & ~pte_val(*ptep)) {
                DBG_LOW(" no access !\n");
-               return 1;
+               rc = 1;
+               goto bail;
        }
 
 #ifdef CONFIG_HUGETLB_PAGE
-       if (hugeshift)
-               return __hash_page_huge(ea, access, vsid, ptep, trap, local,
+       if (hugeshift) {
+               rc = __hash_page_huge(ea, access, vsid, ptep, trap, local,
                                        ssize, hugeshift, psize);
+               goto bail;
+       }
 #endif /* CONFIG_HUGETLB_PAGE */
 
 #ifndef CONFIG_PPC_64K_PAGES
@@ -1124,6 +1135,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                pte_val(*(ptep + PTRS_PER_PTE)));
 #endif
        DBG_LOW(" -> rc=%d\n", rc);
+
+bail:
+       exception_exit(prev_state);
        return rc;
 }
 EXPORT_SYMBOL_GPL(hash_page);
@@ -1259,6 +1273,8 @@ void flush_hash_range(unsigned long number, int local)
  */
 void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
 {
+       enum ctx_state prev_state = exception_enter();
+
        if (user_mode(regs)) {
 #ifdef CONFIG_PPC_SUBPAGE_PROT
                if (rc == -2)
@@ -1268,6 +1284,8 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
                        _exception(SIGBUS, regs, BUS_ADRERR, address);
        } else
                bad_page_fault(regs, address, SIGBUS);
+
+       exception_exit(prev_state);
 }
 
 long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
index c2787bf779ca0f8730f23b542f54c2657fe77471..a90b9c4589908078f139d953707b6ce13b6860d0 100644 (file)
@@ -215,7 +215,8 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
                                             unsigned long phys)
 {
        int  mapped = htab_bolt_mapping(start, start + page_size, phys,
-                                       PAGE_KERNEL, mmu_vmemmap_psize,
+                                       pgprot_val(PAGE_KERNEL),
+                                       mmu_vmemmap_psize,
                                        mmu_kernel_ssize);
        BUG_ON(mapped < 0);
 }
index c627843c5b2eff6084cfd3d23972e2cbd5735d7d..845c867444e6ff27bffc1a203790bce5543c5e12 100644 (file)
 #include <linux/perf_event.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
+#include <linux/uaccess.h>
 #include <asm/reg.h>
 #include <asm/pmc.h>
 #include <asm/machdep.h>
 #include <asm/firmware.h>
 #include <asm/ptrace.h>
+#include <asm/code-patching.h>
 
 #define BHRB_MAX_ENTRIES       32
 #define BHRB_TARGET            0x0000000000000002
@@ -100,11 +102,15 @@ static inline int siar_valid(struct pt_regs *regs)
        return 1;
 }
 
+static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
+static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
+void power_pmu_flush_branch_stack(void) {}
+static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
 #endif /* CONFIG_PPC32 */
 
 static bool regs_use_siar(struct pt_regs *regs)
 {
-       return !!(regs->result & 1);
+       return !!regs->result;
 }
 
 /*
@@ -130,22 +136,30 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
  * If we're not doing instruction sampling, give them the SDAR
  * (sampled data address).  If we are doing instruction sampling, then
  * only give them the SDAR if it corresponds to the instruction
- * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
- * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
+ * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
+ * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
  */
 static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
 {
        unsigned long mmcra = regs->dsisr;
-       unsigned long sdsync;
+       bool sdar_valid;
 
-       if (ppmu->flags & PPMU_SIAR_VALID)
-               sdsync = POWER7P_MMCRA_SDAR_VALID;
-       else if (ppmu->flags & PPMU_ALT_SIPR)
-               sdsync = POWER6_MMCRA_SDSYNC;
-       else
-               sdsync = MMCRA_SDSYNC;
+       if (ppmu->flags & PPMU_HAS_SIER)
+               sdar_valid = regs->dar & SIER_SDAR_VALID;
+       else {
+               unsigned long sdsync;
+
+               if (ppmu->flags & PPMU_SIAR_VALID)
+                       sdsync = POWER7P_MMCRA_SDAR_VALID;
+               else if (ppmu->flags & PPMU_ALT_SIPR)
+                       sdsync = POWER6_MMCRA_SDSYNC;
+               else
+                       sdsync = MMCRA_SDSYNC;
 
-       if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
+               sdar_valid = mmcra & sdsync;
+       }
+
+       if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
                *addrp = mfspr(SPRN_SDAR);
 }
 
@@ -175,11 +189,6 @@ static bool regs_sipr(struct pt_regs *regs)
        return !!(regs->dsisr & sipr);
 }
 
-static bool regs_no_sipr(struct pt_regs *regs)
-{
-       return !!(regs->result & 2);
-}
-
 static inline u32 perf_flags_from_msr(struct pt_regs *regs)
 {
        if (regs->msr & MSR_PR)
@@ -202,7 +211,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
         * SIAR which should give slightly more reliable
         * results
         */
-       if (regs_no_sipr(regs)) {
+       if (ppmu->flags & PPMU_NO_SIPR) {
                unsigned long siar = mfspr(SPRN_SIAR);
                if (siar >= PAGE_OFFSET)
                        return PERF_RECORD_MISC_KERNEL;
@@ -233,22 +242,9 @@ static inline void perf_read_regs(struct pt_regs *regs)
        int use_siar;
 
        regs->dsisr = mmcra;
-       regs->result = 0;
-
-       if (ppmu->flags & PPMU_NO_SIPR)
-               regs->result |= 2;
-
-       /*
-        * On power8 if we're in random sampling mode, the SIER is updated.
-        * If we're in continuous sampling mode, we don't have SIPR.
-        */
-       if (ppmu->flags & PPMU_HAS_SIER) {
-               if (marked)
-                       regs->dar = mfspr(SPRN_SIER);
-               else
-                       regs->result |= 2;
-       }
 
+       if (ppmu->flags & PPMU_HAS_SIER)
+               regs->dar = mfspr(SPRN_SIER);
 
        /*
         * If this isn't a PMU exception (eg a software event) the SIAR is
@@ -273,12 +269,12 @@ static inline void perf_read_regs(struct pt_regs *regs)
                use_siar = 1;
        else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
                use_siar = 0;
-       else if (!regs_no_sipr(regs) && regs_sipr(regs))
+       else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
                use_siar = 0;
        else
                use_siar = 1;
 
-       regs->result |= use_siar;
+       regs->result = use_siar;
 }
 
 /*
@@ -302,12 +298,170 @@ static inline int siar_valid(struct pt_regs *regs)
        unsigned long mmcra = regs->dsisr;
        int marked = mmcra & MMCRA_SAMPLE_ENABLE;
 
-       if ((ppmu->flags & PPMU_SIAR_VALID) && marked)
-               return mmcra & POWER7P_MMCRA_SIAR_VALID;
+       if (marked) {
+               if (ppmu->flags & PPMU_HAS_SIER)
+                       return regs->dar & SIER_SIAR_VALID;
+
+               if (ppmu->flags & PPMU_SIAR_VALID)
+                       return mmcra & POWER7P_MMCRA_SIAR_VALID;
+       }
 
        return 1;
 }
 
+
+/* Reset all possible BHRB entries */
+static void power_pmu_bhrb_reset(void)
+{
+       asm volatile(PPC_CLRBHRB);
+}
+
+static void power_pmu_bhrb_enable(struct perf_event *event)
+{
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+       if (!ppmu->bhrb_nr)
+               return;
+
+       /* Clear BHRB if we changed task context to avoid data leaks */
+       if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
+               power_pmu_bhrb_reset();
+               cpuhw->bhrb_context = event->ctx;
+       }
+       cpuhw->bhrb_users++;
+}
+
+static void power_pmu_bhrb_disable(struct perf_event *event)
+{
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+       if (!ppmu->bhrb_nr)
+               return;
+
+       cpuhw->bhrb_users--;
+       WARN_ON_ONCE(cpuhw->bhrb_users < 0);
+
+       if (!cpuhw->disabled && !cpuhw->bhrb_users) {
+               /* BHRB cannot be turned off when other
+                * events are active on the PMU.
+                */
+
+               /* avoid stale pointer */
+               cpuhw->bhrb_context = NULL;
+       }
+}
+
+/* Called from ctxsw to prevent one process's branch entries to
+ * mingle with the other process's entries during context switch.
+ */
+void power_pmu_flush_branch_stack(void)
+{
+       if (ppmu->bhrb_nr)
+               power_pmu_bhrb_reset();
+}
+/* Calculate the to address for a branch */
+static __u64 power_pmu_bhrb_to(u64 addr)
+{
+       unsigned int instr;
+       int ret;
+       __u64 target;
+
+       if (is_kernel_addr(addr))
+               return branch_target((unsigned int *)addr);
+
+       /* Userspace: need copy instruction here then translate it */
+       pagefault_disable();
+       ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
+       if (ret) {
+               pagefault_enable();
+               return 0;
+       }
+       pagefault_enable();
+
+       target = branch_target(&instr);
+       if ((!target) || (instr & BRANCH_ABSOLUTE))
+               return target;
+
+       /* Translate relative branch target from kernel to user address */
+       return target - (unsigned long)&instr + addr;
+}
+
+/* Processing BHRB entries */
+void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
+{
+       u64 val;
+       u64 addr;
+       int r_index, u_index, pred;
+
+       r_index = 0;
+       u_index = 0;
+       while (r_index < ppmu->bhrb_nr) {
+               /* Assembly read function */
+               val = read_bhrb(r_index++);
+               if (!val)
+                       /* Terminal marker: End of valid BHRB entries */
+                       break;
+               else {
+                       addr = val & BHRB_EA;
+                       pred = val & BHRB_PREDICTION;
+
+                       if (!addr)
+                               /* invalid entry */
+                               continue;
+
+                       /* Branches are read most recent first (ie. mfbhrb 0 is
+                        * the most recent branch).
+                        * There are two types of valid entries:
+                        * 1) a target entry which is the to address of a
+                        *    computed goto like a blr,bctr,btar.  The next
+                        *    entry read from the bhrb will be branch
+                        *    corresponding to this target (ie. the actual
+                        *    blr/bctr/btar instruction).
+                        * 2) a from address which is an actual branch.  If a
+                        *    target entry proceeds this, then this is the
+                        *    matching branch for that target.  If this is not
+                        *    following a target entry, then this is a branch
+                        *    where the target is given as an immediate field
+                        *    in the instruction (ie. an i or b form branch).
+                        *    In this case we need to read the instruction from
+                        *    memory to determine the target/to address.
+                        */
+
+                       if (val & BHRB_TARGET) {
+                               /* Target branches use two entries
+                                * (ie. computed gotos/XL form)
+                                */
+                               cpuhw->bhrb_entries[u_index].to = addr;
+                               cpuhw->bhrb_entries[u_index].mispred = pred;
+                               cpuhw->bhrb_entries[u_index].predicted = ~pred;
+
+                               /* Get from address in next entry */
+                               val = read_bhrb(r_index++);
+                               addr = val & BHRB_EA;
+                               if (val & BHRB_TARGET) {
+                                       /* Shouldn't have two targets in a
+                                          row.. Reset index and try again */
+                                       r_index--;
+                                       addr = 0;
+                               }
+                               cpuhw->bhrb_entries[u_index].from = addr;
+                       } else {
+                               /* Branches to immediate field 
+                                  (ie I or B form) */
+                               cpuhw->bhrb_entries[u_index].from = addr;
+                               cpuhw->bhrb_entries[u_index].to =
+                                       power_pmu_bhrb_to(addr);
+                               cpuhw->bhrb_entries[u_index].mispred = pred;
+                               cpuhw->bhrb_entries[u_index].predicted = ~pred;
+                       }
+                       u_index++;
+
+               }
+       }
+       cpuhw->bhrb_stack.nr = u_index;
+       return;
+}
+
 #endif /* CONFIG_PPC64 */
 
 static void perf_event_interrupt(struct pt_regs *regs);
@@ -904,47 +1058,6 @@ static int collect_events(struct perf_event *group, int max_count,
        return n;
 }
 
-/* Reset all possible BHRB entries */
-static void power_pmu_bhrb_reset(void)
-{
-       asm volatile(PPC_CLRBHRB);
-}
-
-void power_pmu_bhrb_enable(struct perf_event *event)
-{
-       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
-
-       if (!ppmu->bhrb_nr)
-               return;
-
-       /* Clear BHRB if we changed task context to avoid data leaks */
-       if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
-               power_pmu_bhrb_reset();
-               cpuhw->bhrb_context = event->ctx;
-       }
-       cpuhw->bhrb_users++;
-}
-
-void power_pmu_bhrb_disable(struct perf_event *event)
-{
-       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
-
-       if (!ppmu->bhrb_nr)
-               return;
-
-       cpuhw->bhrb_users--;
-       WARN_ON_ONCE(cpuhw->bhrb_users < 0);
-
-       if (!cpuhw->disabled && !cpuhw->bhrb_users) {
-               /* BHRB cannot be turned off when other
-                * events are active on the PMU.
-                */
-
-               /* avoid stale pointer */
-               cpuhw->bhrb_context = NULL;
-       }
-}
-
 /*
  * Add a event to the PMU.
  * If all events are not already frozen, then we disable and
@@ -1180,15 +1293,6 @@ int power_pmu_commit_txn(struct pmu *pmu)
        return 0;
 }
 
-/* Called from ctxsw to prevent one process's branch entries to
- * mingle with the other process's entries during context switch.
- */
-void power_pmu_flush_branch_stack(void)
-{
-       if (ppmu->bhrb_nr)
-               power_pmu_bhrb_reset();
-}
-
 /*
  * Return 1 if we might be able to put event on a limited PMC,
  * or 0 if not.
@@ -1458,77 +1562,6 @@ struct pmu power_pmu = {
        .flush_branch_stack = power_pmu_flush_branch_stack,
 };
 
-/* Processing BHRB entries */
-void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
-{
-       u64 val;
-       u64 addr;
-       int r_index, u_index, target, pred;
-
-       r_index = 0;
-       u_index = 0;
-       while (r_index < ppmu->bhrb_nr) {
-               /* Assembly read function */
-               val = read_bhrb(r_index);
-
-               /* Terminal marker: End of valid BHRB entries */
-               if (val == 0) {
-                       break;
-               } else {
-                       /* BHRB field break up */
-                       addr = val & BHRB_EA;
-                       pred = val & BHRB_PREDICTION;
-                       target = val & BHRB_TARGET;
-
-                       /* Probable Missed entry: Not applicable for POWER8 */
-                       if ((addr == 0) && (target == 0) && (pred == 1)) {
-                               r_index++;
-                               continue;
-                       }
-
-                       /* Real Missed entry: Power8 based missed entry */
-                       if ((addr == 0) && (target == 1) && (pred == 1)) {
-                               r_index++;
-                               continue;
-                       }
-
-                       /* Reserved condition: Not a valid entry  */
-                       if ((addr == 0) && (target == 1) && (pred == 0)) {
-                               r_index++;
-                               continue;
-                       }
-
-                       /* Is a target address */
-                       if (val & BHRB_TARGET) {
-                               /* First address cannot be a target address */
-                               if (r_index == 0) {
-                                       r_index++;
-                                       continue;
-                               }
-
-                               /* Update target address for the previous entry */
-                               cpuhw->bhrb_entries[u_index - 1].to = addr;
-                               cpuhw->bhrb_entries[u_index - 1].mispred = pred;
-                               cpuhw->bhrb_entries[u_index - 1].predicted = ~pred;
-
-                               /* Dont increment u_index */
-                               r_index++;
-                       } else {
-                               /* Update address, flags for current entry */
-                               cpuhw->bhrb_entries[u_index].from = addr;
-                               cpuhw->bhrb_entries[u_index].mispred = pred;
-                               cpuhw->bhrb_entries[u_index].predicted = ~pred;
-
-                               /* Successfully popullated one entry */
-                               u_index++;
-                               r_index++;
-                       }
-               }
-       }
-       cpuhw->bhrb_stack.nr = u_index;
-       return;
-}
-
 /*
  * A counter has overflowed; update its count and record
  * things if requested.  Note that interrupts are hard-disabled
index a881232a3cce1bdafee6f5f2a6536a89afe188ab..b62aab3e22ecd0447caf0db7ff97c2c16bc12644 100644 (file)
@@ -128,7 +128,7 @@ config PPC_RTAS_DAEMON
 
 config RTAS_PROC
        bool "Proc interface to RTAS"
-       depends on PPC_RTAS
+       depends on PPC_RTAS && PROC_FS
        default y
 
 config RTAS_FLASH
index d3e840d643af5135dc0b4e0acb6cec114967264a..c24684c818ab015cc9140d9870f26dc97bfb162a 100644 (file)
@@ -6,6 +6,7 @@ config PPC_POWERNV
        select PPC_ICP_NATIVE
        select PPC_P7_NAP
        select PPC_PCI_CHOICE if EMBEDDED
+       select EPAPR_BOOT
        default y
 
 config POWERNV_MSI
index ade4463226c612fd338b5cbaed1cb941d14881a9..628c564ceadbb32b1c96287f3ba5cb43390cc56a 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/interrupt.h>
+#include <linux/slab.h>
 #include <asm/opal.h>
 #include <asm/firmware.h>
 
@@ -28,6 +29,8 @@ struct opal {
 static struct device_node *opal_node;
 static DEFINE_SPINLOCK(opal_write_lock);
 extern u64 opal_mc_secondary_handler[];
+static unsigned int *opal_irqs;
+static unsigned int opal_irq_count;
 
 int __init early_init_dt_scan_opal(unsigned long node,
                                   const char *uname, int depth, void *data)
@@ -53,7 +56,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
                 opal.entry, entryp, entrysz);
 
        powerpc_firmware_features |= FW_FEATURE_OPAL;
-       if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
+       if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
+               powerpc_firmware_features |= FW_FEATURE_OPALv2;
+               powerpc_firmware_features |= FW_FEATURE_OPALv3;
+               printk("OPAL V3 detected !\n");
+       } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
                powerpc_firmware_features |= FW_FEATURE_OPALv2;
                printk("OPAL V2 detected !\n");
        } else {
@@ -144,6 +151,13 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
                                rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
                len = total_len;
                rc = opal_console_write(vtermno, &len, data);
+
+               /* Closed or other error drop */
+               if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
+                   rc != OPAL_BUSY_EVENT) {
+                       written = total_len;
+                       break;
+               }
                if (rc == OPAL_SUCCESS) {
                        total_len -= len;
                        data += len;
@@ -316,6 +330,8 @@ static int __init opal_init(void)
        irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
        pr_debug("opal: Found %d interrupts reserved for OPAL\n",
                 irqs ? (irqlen / 4) : 0);
+       opal_irq_count = irqlen / 4;
+       opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
        for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
                unsigned int hwirq = be32_to_cpup(irqs);
                unsigned int irq = irq_create_mapping(NULL, hwirq);
@@ -327,7 +343,19 @@ static int __init opal_init(void)
                if (rc)
                        pr_warning("opal: Error %d requesting irq %d"
                                   " (0x%x)\n", rc, irq, hwirq);
+               opal_irqs[i] = irq;
        }
        return 0;
 }
 subsys_initcall(opal_init);
+
+void opal_shutdown(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < opal_irq_count; i++) {
+               if (opal_irqs[i])
+                       free_irq(opal_irqs[i], 0);
+               opal_irqs[i] = 0;
+       }
+}
index 1da578b7c1bfc61d80902df4fdf3e2148fc98d8e..9c9d15e4cdf2700f803471667f83720430844c68 100644 (file)
@@ -68,16 +68,6 @@ define_pe_printk_level(pe_err, KERN_ERR);
 define_pe_printk_level(pe_warn, KERN_WARNING);
 define_pe_printk_level(pe_info, KERN_INFO);
 
-static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
-{
-       struct device_node *np;
-
-       np = pci_device_to_OF_node(dev);
-       if (!np)
-               return NULL;
-       return PCI_DN(np);
-}
-
 static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
 {
        unsigned long pe;
@@ -110,7 +100,7 @@ static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
 {
        struct pci_controller *hose = pci_bus_to_host(dev->bus);
        struct pnv_phb *phb = hose->private_data;
-       struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+       struct pci_dn *pdn = pci_get_pdn(dev);
 
        if (!pdn)
                return NULL;
@@ -173,7 +163,7 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
 
        /* Add to all parents PELT-V */
        while (parent) {
-               struct pci_dn *pdn = pnv_ioda_get_pdn(parent);
+               struct pci_dn *pdn = pci_get_pdn(parent);
                if (pdn && pdn->pe_number != IODA_INVALID_PE) {
                        rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
                                                pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
@@ -252,7 +242,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
 {
        struct pci_controller *hose = pci_bus_to_host(dev->bus);
        struct pnv_phb *phb = hose->private_data;
-       struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+       struct pci_dn *pdn = pci_get_pdn(dev);
        struct pnv_ioda_pe *pe;
        int pe_num;
 
@@ -323,7 +313,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
        struct pci_dev *dev;
 
        list_for_each_entry(dev, &bus->devices, bus_list) {
-               struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
+               struct pci_dn *pdn = pci_get_pdn(dev);
 
                if (pdn == NULL) {
                        pr_warn("%s: No device node associated with device !\n",
@@ -436,7 +426,7 @@ static void pnv_pci_ioda_setup_PEs(void)
 
 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
 {
-       struct pci_dn *pdn = pnv_ioda_get_pdn(pdev);
+       struct pci_dn *pdn = pci_get_pdn(pdev);
        struct pnv_ioda_pe *pe;
 
        /*
@@ -768,6 +758,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
                                  unsigned int is_64, struct msi_msg *msg)
 {
        struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
+       struct pci_dn *pdn = pci_get_pdn(dev);
        struct irq_data *idata;
        struct irq_chip *ichip;
        unsigned int xive_num = hwirq - phb->msi_base;
@@ -783,6 +774,10 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
        if (pe->mve_number < 0)
                return -ENXIO;
 
+       /* Force 32-bit MSI on some broken devices */
+       if (pdn && pdn->force_32bit_msi)
+               is_64 = 0;
+
        /* Assign XIVE to PE */
        rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
        if (rc) {
@@ -1035,7 +1030,7 @@ static int pnv_pci_enable_device_hook(struct pci_dev *dev)
        if (!phb->initialized)
                return 0;
 
-       pdn = pnv_ioda_get_pdn(dev);
+       pdn = pci_get_pdn(dev);
        if (!pdn || pdn->pe_number == IODA_INVALID_PE)
                return -EINVAL;
 
@@ -1048,6 +1043,12 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
        return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
 }
 
+static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
+{
+       opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
+                      OPAL_ASSERT_RESET);
+}
+
 void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
 {
        struct pci_controller *hose;
@@ -1178,6 +1179,9 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
        /* Setup TCEs */
        phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
 
+       /* Setup shutdown function for kexec */
+       phb->shutdown = pnv_pci_ioda_shutdown;
+
        /* Setup MSI support */
        pnv_pci_init_ioda_msis(phb);
 
index 55dfca844ddf0f8f38cbb5cb1c6863b71e217110..277343cc6a3d7f87966408f088a33b61f502ecda 100644 (file)
@@ -47,6 +47,10 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
 {
        struct pci_controller *hose = pci_bus_to_host(pdev->bus);
        struct pnv_phb *phb = hose->private_data;
+       struct pci_dn *pdn = pci_get_pdn(pdev);
+
+       if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
+               return -ENODEV;
 
        return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
 }
@@ -367,7 +371,7 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
        while (npages--)
                *(tcep++) = 0;
 
-       if (tbl->it_type & TCE_PCI_SWINV_CREATE)
+       if (tbl->it_type & TCE_PCI_SWINV_FREE)
                pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
 }
 
@@ -450,6 +454,18 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
                pnv_pci_dma_fallback_setup(hose, pdev);
 }
 
+void pnv_pci_shutdown(void)
+{
+       struct pci_controller *hose;
+
+       list_for_each_entry(hose, &hose_list, list_node) {
+               struct pnv_phb *phb = hose->private_data;
+
+               if (phb && phb->shutdown)
+                       phb->shutdown(phb);
+       }
+}
+
 /* Fixup wrong class code in p7ioc and p8 root complex */
 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
 {
index 48dc4bb856a14932878f29b917b5d7551897de66..25d76c4df50b27711c6bd1bb40481216a7009eb6 100644 (file)
@@ -86,6 +86,7 @@ struct pnv_phb {
        void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
        void (*fixup_phb)(struct pci_controller *hose);
        u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
+       void (*shutdown)(struct pnv_phb *phb);
 
        union {
                struct {
@@ -158,4 +159,5 @@ extern void pnv_pci_init_ioda_hub(struct device_node *np);
 extern void pnv_pci_init_ioda2_phb(struct device_node *np);
 extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
                                        u64 *startp, u64 *endp);
+
 #endif /* __POWERNV_PCI_H */
index 8a9df7f9667ede1a97abb5aee79a14843aa5a6ab..a1c6f83fc3916efab1a092e1c7b102ff4a3c788a 100644 (file)
@@ -9,8 +9,10 @@ static inline void pnv_smp_init(void) { }
 
 #ifdef CONFIG_PCI
 extern void pnv_pci_init(void);
+extern void pnv_pci_shutdown(void);
 #else
 static inline void pnv_pci_init(void) { }
+static inline void pnv_pci_shutdown(void) { }
 #endif
 
 #endif /* _POWERNV_H */
index db1ad1c8f68fd1ed18a91fbc542a55c9e31d60b1..d4459bfc92f76a7bd45c6714395023786f200cc9 100644 (file)
@@ -78,7 +78,9 @@ static void pnv_show_cpuinfo(struct seq_file *m)
        if (root)
                model = of_get_property(root, "model", NULL);
        seq_printf(m, "machine\t\t: PowerNV %s\n", model);
-       if (firmware_has_feature(FW_FEATURE_OPALv2))
+       if (firmware_has_feature(FW_FEATURE_OPALv3))
+               seq_printf(m, "firmware\t: OPAL v3\n");
+       else if (firmware_has_feature(FW_FEATURE_OPALv2))
                seq_printf(m, "firmware\t: OPAL v2\n");
        else if (firmware_has_feature(FW_FEATURE_OPAL))
                seq_printf(m, "firmware\t: OPAL v1\n");
@@ -126,6 +128,17 @@ static void pnv_progress(char *s, unsigned short hex)
 {
 }
 
+static void pnv_shutdown(void)
+{
+       /* Let the PCI code clear up IODA tables */
+       pnv_pci_shutdown();
+
+       /* And unregister all OPAL interrupts so they don't fire
+        * up while we kexec
+        */
+       opal_shutdown();
+}
+
 #ifdef CONFIG_KEXEC
 static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
 {
@@ -187,6 +200,7 @@ define_machine(powernv) {
        .init_IRQ               = pnv_init_IRQ,
        .show_cpuinfo           = pnv_show_cpuinfo,
        .progress               = pnv_progress,
+       .machine_shutdown       = pnv_shutdown,
        .power_save             = power7_idle,
        .calibrate_decr         = generic_calibrate_decr,
 #ifdef CONFIG_KEXEC
index 6a3ecca5b7253e5dafe06f488f393eeb7c439186..88c9459c3e07121a64765e4304bcbb4dd7a1b2a4 100644 (file)
@@ -71,18 +71,68 @@ int pnv_smp_kick_cpu(int nr)
 
        BUG_ON(nr < 0 || nr >= NR_CPUS);
 
-       /* On OPAL v2 the CPU are still spinning inside OPAL itself,
-        * get them back now
+       /*
+        * If we already started or OPALv2 is not supported, we just
+        * kick the CPU via the PACA
         */
-       if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) {
-               pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
-               rc = opal_start_cpu(pcpu, start_here);
+       if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
+               goto kick;
+
+       /*
+        * At this point, the CPU can either be spinning on the way in
+        * from kexec or be inside OPAL waiting to be started for the
+        * first time. OPAL v3 allows us to query OPAL to know if it
+        * has the CPUs, so we do that
+        */
+       if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+               uint8_t status;
+
+               rc = opal_query_cpu_status(pcpu, &status);
                if (rc != OPAL_SUCCESS) {
-                       pr_warn("OPAL Error %ld starting CPU %d\n",
+                       pr_warn("OPAL Error %ld querying CPU %d state\n",
                                rc, nr);
                        return -ENODEV;
                }
+
+               /*
+                * Already started, just kick it, probably coming from
+                * kexec and spinning
+                */
+               if (status == OPAL_THREAD_STARTED)
+                       goto kick;
+
+               /*
+                * Available/inactive, let's kick it
+                */
+               if (status == OPAL_THREAD_INACTIVE) {
+                       pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
+                                nr, pcpu);
+                       rc = opal_start_cpu(pcpu, start_here);
+                       if (rc != OPAL_SUCCESS) {
+                               pr_warn("OPAL Error %ld starting CPU %d\n",
+                                       rc, nr);
+                               return -ENODEV;
+                       }
+               } else {
+                       /*
+                        * An unavailable CPU (or any other unknown status)
+                        * shouldn't be started. It should also
+                        * not be in the possible map but currently it can
+                        * happen
+                        */
+                       pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
+                                " (status %d)...\n", nr, pcpu, status);
+                       return -ENODEV;
+               }
+       } else {
+               /*
+                * On OPAL v2, we just kick it and hope for the best,
+                * we must not test the error from opal_start_cpu() or
+                * we would fail to get CPUs from kexec.
+                */
+               opal_start_cpu(pcpu, start_here);
        }
+ kick:
        return smp_generic_kick_cpu(nr);
 }
 
index 9a0941bc4d31b30ed78cd2ff0527861ce1570b8a..4459eff7a75ad6b3f4135f015591e2b9575dcec4 100644 (file)
@@ -18,6 +18,9 @@ config PPC_PSERIES
        select PPC_PCI_CHOICE if EXPERT
        select ZLIB_DEFLATE
        select PPC_DOORBELL
+       select HAVE_CONTEXT_TRACKING
+       select HOTPLUG if SMP
+       select HOTPLUG_CPU if SMP
        default y
 
 config PPC_SPLPAR
index 420524e6f8c95c6118462cefb80723312d213437..6d2f0abce6fae652d207b499ac99a47e20a500f2 100644 (file)
@@ -26,26 +26,6 @@ static int query_token, change_token;
 #define RTAS_CHANGE_MSIX_FN    4
 #define RTAS_CHANGE_32MSI_FN   5
 
-static struct pci_dn *get_pdn(struct pci_dev *pdev)
-{
-       struct device_node *dn;
-       struct pci_dn *pdn;
-
-       dn = pci_device_to_OF_node(pdev);
-       if (!dn) {
-               dev_dbg(&pdev->dev, "rtas_msi: No OF device node\n");
-               return NULL;
-       }
-
-       pdn = PCI_DN(dn);
-       if (!pdn) {
-               dev_dbg(&pdev->dev, "rtas_msi: No PCI DN\n");
-               return NULL;
-       }
-
-       return pdn;
-}
-
 /* RTAS Helpers */
 
 static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
@@ -91,7 +71,7 @@ static void rtas_disable_msi(struct pci_dev *pdev)
 {
        struct pci_dn *pdn;
 
-       pdn = get_pdn(pdev);
+       pdn = pci_get_pdn(pdev);
        if (!pdn)
                return;
 
@@ -152,7 +132,7 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
        struct pci_dn *pdn;
        const u32 *req_msi;
 
-       pdn = get_pdn(pdev);
+       pdn = pci_get_pdn(pdev);
        if (!pdn)
                return -ENODEV;
 
@@ -394,6 +374,23 @@ static int check_msix_entries(struct pci_dev *pdev)
        return 0;
 }
 
+static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
+{
+       u32 addr_hi, addr_lo;
+
+       /*
+        * We should only get in here for IODA1 configs. This is based on the
+        * fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS
+        * support, and we are in a PCIe Gen2 slot.
+        */
+       dev_info(&pdev->dev,
+                "rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n");
+       pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi);
+       addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4);
+       pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo);
+       pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0);
+}
+
 static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
 {
        struct pci_dn *pdn;
@@ -401,8 +398,9 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
        struct msi_desc *entry;
        struct msi_msg msg;
        int nvec = nvec_in;
+       int use_32bit_msi_hack = 0;
 
-       pdn = get_pdn(pdev);
+       pdn = pci_get_pdn(pdev);
        if (!pdn)
                return -ENODEV;
 
@@ -428,15 +426,31 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
         */
 again:
        if (type == PCI_CAP_ID_MSI) {
-               if (pdn->force_32bit_msi)
+               if (pdn->force_32bit_msi) {
                        rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
-               else
+                       if (rc < 0) {
+                               /*
+                                * We only want to run the 32 bit MSI hack below if
+                                * the max bus speed is Gen2 speed
+                                */
+                               if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT)
+                                       return rc;
+
+                               use_32bit_msi_hack = 1;
+                       }
+               } else
+                       rc = -1;
+
+               if (rc < 0)
                        rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
 
-               if (rc < 0 && !pdn->force_32bit_msi) {
+               if (rc < 0) {
                        pr_debug("rtas_msi: trying the old firmware call.\n");
                        rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec);
                }
+
+               if (use_32bit_msi_hack && rc > 0)
+                       rtas_hack_32bit_msi_gen2(pdev);
        } else
                rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
 
@@ -518,12 +532,3 @@ static int rtas_msi_init(void)
 }
 arch_initcall(rtas_msi_init);
 
-static void quirk_radeon(struct pci_dev *dev)
-{
-       struct pci_dn *pdn = get_pdn(dev);
-
-       if (pdn)
-               pdn->force_32bit_msi = 1;
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon);
index 47226e04126d10e930a7a4a1bf42aa1c9abedb7a..5f997e79d570f8fb704cf9ec79ef49cb538b3b28 100644 (file)
@@ -16,6 +16,7 @@
   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   */
 
+#include <linux/cpu.h>
 #include <linux/delay.h>
 #include <linux/suspend.h>
 #include <linux/stat.h>
@@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev,
                               struct device_attribute *attr,
                               const char *buf, size_t count)
 {
+       cpumask_var_t offline_mask;
        int rc;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+               return -ENOMEM;
+
        stream_id = simple_strtoul(buf, NULL, 16);
 
        do {
@@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev,
        } while (rc == -EAGAIN);
 
        if (!rc) {
+               /* All present CPUs must be online */
+               cpumask_andnot(offline_mask, cpu_present_mask,
+                               cpu_online_mask);
+               rc = rtas_online_cpus_mask(offline_mask);
+               if (rc) {
+                       pr_err("%s: Could not bring present CPUs online.\n",
+                                       __func__);
+                       goto out;
+               }
+
                stop_topology_update();
                rc = pm_suspend(PM_SUSPEND_MEM);
                start_topology_update();
+
+               /* Take down CPUs not online prior to suspend */
+               if (!rtas_offline_cpus_mask(offline_mask))
+                       pr_warn("%s: Could not restore CPUs to offline "
+                                       "state.\n", __func__);
        }
 
        stream_id = 0;
 
        if (!rc)
                rc = count;
+out:
+       free_cpumask_var(offline_mask);
        return rc;
 }
 
index 97fe82ee863334664eb2864f18f0b2a7fbd1dc56..2d3b1dd9571da71aec4b11ab97ef5a0cc106be4f 100644 (file)
@@ -361,7 +361,7 @@ static int wsp_chip_set_affinity(struct irq_data *d,
        xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
        wsp_ics_set_xive(ics, hw_irq, xive);
 
-       return 0;
+       return IRQ_SET_MASK_OK;
 }
 
 static struct irq_chip wsp_irq_chip = {
index b0a518e9759978a7255a334388d53c79a387ba61..99464a7bdb3b1b0da09493dd4601d6b02951e13e 100644 (file)
@@ -64,6 +64,8 @@ endif
 
 obj-$(CONFIG_PPC_SCOM)         += scom.o
 
+obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS)  += udbg_memcons.o
+
 subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
 
 obj-$(CONFIG_PPC_XICS)         += xics/
index 6e0e1005227f934930982fd4d4827b464d05180d..9cd0e60716fef0f37b068d14f2ad38222595221e 100644 (file)
@@ -81,7 +81,7 @@ int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
        ev_int_set_config(src, config, prio, cpuid);
        spin_unlock_irqrestore(&ehv_pic_lock, flags);
 
-       return 0;
+       return IRQ_SET_MASK_OK;
 }
 
 static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
index ee21b5e71aecd6379d7e93a4e5b299cb8901cce6..3cc2f9159ab1185f9f8f00ccefbcac14995ae6b1 100644 (file)
@@ -54,7 +54,7 @@ static DEFINE_RAW_SPINLOCK(mpic_lock);
 
 #ifdef CONFIG_PPC32    /* XXX for now */
 #ifdef CONFIG_IRQ_ALL_CPUS
-#define distribute_irqs        (!(mpic->flags & MPIC_SINGLE_DEST_CPU))
+#define distribute_irqs        (1)
 #else
 #define distribute_irqs        (0)
 #endif
@@ -836,7 +836,7 @@ int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
                               mpic_physmask(mask));
        }
 
-       return 0;
+       return IRQ_SET_MASK_OK;
 }
 
 static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
@@ -1703,7 +1703,7 @@ void mpic_setup_this_cpu(void)
         * it differently, then we should make sure we also change the default
         * values of irq_desc[].affinity in irq.c.
         */
-       if (distribute_irqs) {
+       if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) {
                for (i = 0; i < mpic->num_sources ; i++)
                        mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
                                mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c
new file mode 100644 (file)
index 0000000..ce5a7b4
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * A udbg backend which logs messages and reads input from in memory
+ * buffers.
+ *
+ * The console output can be read from memcons_output which is a
+ * circular buffer whose next write position is stored in memcons.output_pos.
+ *
+ * Input may be passed by writing into the memcons_input buffer when it is
+ * empty. The input buffer is empty when both input_pos == input_start and
+ * *input_start == '\0'.
+ *
+ * Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp
+ * Copyright (C) 2013 Alistair Popple, IBM Corp
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/barrier.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/udbg.h>
+
+struct memcons {
+       char *output_start;
+       char *output_pos;
+       char *output_end;
+       char *input_start;
+       char *input_pos;
+       char *input_end;
+};
+
+static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE];
+static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE];
+
+struct memcons memcons = {
+       .output_start = memcons_output,
+       .output_pos = memcons_output,
+       .output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE],
+       .input_start = memcons_input,
+       .input_pos = memcons_input,
+       .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE],
+};
+
+void memcons_putc(char c)
+{
+       char *new_output_pos;
+
+       *memcons.output_pos = c;
+       wmb();
+       new_output_pos = memcons.output_pos + 1;
+       if (new_output_pos >= memcons.output_end)
+               new_output_pos = memcons.output_start;
+
+       memcons.output_pos = new_output_pos;
+}
+
+int memcons_getc_poll(void)
+{
+       char c;
+       char *new_input_pos;
+
+       if (*memcons.input_pos) {
+               c = *memcons.input_pos;
+
+               new_input_pos = memcons.input_pos + 1;
+               if (new_input_pos >= memcons.input_end)
+                       new_input_pos = memcons.input_start;
+               else if (*new_input_pos == '\0')
+                       new_input_pos = memcons.input_start;
+
+               *memcons.input_pos = '\0';
+               wmb();
+               memcons.input_pos = new_input_pos;
+               return c;
+       }
+
+       return -1;
+}
+
+int memcons_getc(void)
+{
+       int c;
+
+       while (1) {
+               c = memcons_getc_poll();
+               if (c == -1)
+                       cpu_relax();
+               else
+                       break;
+       }
+
+       return c;
+}
+
+void udbg_init_memcons(void)
+{
+       udbg_putc = memcons_putc;
+       udbg_getc = memcons_getc;
+       udbg_getc_poll = memcons_getc_poll;
+}
index f7e8609df0d5d059ea9230f22c9c9cccf7d3a801..39d72212655e3706ef5ca09dacd689156bc34399 100644 (file)
@@ -148,7 +148,7 @@ static int ics_opal_set_affinity(struct irq_data *d,
                       __func__, d->irq, hw_irq, server, rc);
                return -1;
        }
-       return 0;
+       return IRQ_SET_MASK_OK;
 }
 
 static struct irq_chip ics_opal_irq_chip = {
index 2c9789da0e249ffae45e5714ea505d13822efd2f..da183c5a103ce1df1da3617bbde788011f0351d3 100644 (file)
@@ -98,7 +98,6 @@ config S390
        select CLONE_BACKWARDS2
        select GENERIC_CLOCKEVENTS
        select GENERIC_CPU_DEVICES if !SMP
-       select GENERIC_KERNEL_THREAD
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_TIME_VSYSCALL_OLD
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
index bae0f402bf2ab7eb3c8c501c23bbd7e8f81e364a..87a22092b68f8b152a7df862d4c47604e10edee5 100644 (file)
@@ -212,7 +212,9 @@ appldata_timer_handler(ctl_table *ctl, int write,
                return 0;
        }
        if (!write) {
-               len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n");
+               strncpy(buf, appldata_timer_active ? "1\n" : "0\n",
+                       ARRAY_SIZE(buf));
+               len = strnlen(buf, ARRAY_SIZE(buf));
                if (len > *lenp)
                        len = *lenp;
                if (copy_to_user(buffer, buf, len))
@@ -317,7 +319,8 @@ appldata_generic_handler(ctl_table *ctl, int write,
                return 0;
        }
        if (!write) {
-               len = sprintf(buf, ops->active ? "1\n" : "0\n");
+               strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf));
+               len = strnlen(buf, ARRAY_SIZE(buf));
                if (len > *lenp)
                        len = *lenp;
                if (copy_to_user(buffer, buf, len)) {
index 9411db653baca1153c7701046ec2980d5d67b38e..886ac7d4937a85c8cbe25d9ca52fd67d8cd28795 100644 (file)
@@ -71,8 +71,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
 {
        struct dma_map_ops *dma_ops = get_dma_ops(dev);
 
-       dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
        debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+       dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
 }
 
 #endif /* _ASM_S390_DMA_MAPPING_H */
index b7931faaef6d76add44dd3bcb5c461cec5237a94..bf246dae1367333c109f7f28a60789c2f97deb84 100644 (file)
@@ -9,11 +9,6 @@ struct dyn_arch_ftrace { };
 
 #define MCOUNT_ADDR ((long)_mcount)
 
-#ifdef CONFIG_64BIT
-#define MCOUNT_INSN_SIZE  12
-#else
-#define MCOUNT_INSN_SIZE  20
-#endif
 
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
 {
@@ -21,4 +16,11 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
 }
 
 #endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_64BIT
+#define MCOUNT_INSN_SIZE  12
+#else
+#define MCOUNT_INSN_SIZE  22
+#endif
+
 #endif /* _ASM_S390_FTRACE_H */
index 379d96e2105ea1d60f79a4cd881593a188a61c01..fd9be010f9b2614c4b4e75f00e9fefad18cf0855 100644 (file)
@@ -36,6 +36,7 @@ static inline void * phys_to_virt(unsigned long address)
 }
 
 void *xlate_dev_mem_ptr(unsigned long phys);
+#define xlate_dev_mem_ptr xlate_dev_mem_ptr
 void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
 
 /*
index 75ce9b065f9f321f6b0ef3eb3c010c10e58802e6..5d64fb7619ccfc41047c3b1bb6ad45ade2704d1d 100644 (file)
@@ -32,7 +32,7 @@
 
 void storage_key_init_range(unsigned long start, unsigned long end);
 
-static unsigned long pfmf(unsigned long function, unsigned long address)
+static inline unsigned long pfmf(unsigned long function, unsigned long address)
 {
        asm volatile(
                "       .insn   rre,0xb9af0000,%[function],%[address]"
@@ -44,17 +44,13 @@ static unsigned long pfmf(unsigned long function, unsigned long address)
 
 static inline void clear_page(void *page)
 {
-       if (MACHINE_HAS_PFMF) {
-               pfmf(0x10000, (unsigned long)page);
-       } else {
-               register unsigned long reg1 asm ("1") = 0;
-               register void *reg2 asm ("2") = page;
-               register unsigned long reg3 asm ("3") = 4096;
-               asm volatile(
-                       "       mvcl    2,0"
-                       : "+d" (reg2), "+d" (reg3) : "d" (reg1)
-                       : "memory", "cc");
-       }
+       register unsigned long reg1 asm ("1") = 0;
+       register void *reg2 asm ("2") = page;
+       register unsigned long reg3 asm ("3") = 4096;
+       asm volatile(
+               "       mvcl    2,0"
+               : "+d" (reg2), "+d" (reg3) : "d" (reg1)
+               : "memory", "cc");
 }
 
 static inline void copy_page(void *to, void *from)
index 4105b8221fddfd3c180caf78ef65eb9350c93193..ac01463038f1e9b8236a32d50d0a37024b882a19 100644 (file)
@@ -306,7 +306,7 @@ extern unsigned long MODULES_END;
 #define RCP_HC_BIT     0x00200000UL
 #define RCP_GR_BIT     0x00040000UL
 #define RCP_GC_BIT     0x00020000UL
-#define RCP_IN_BIT     0x00008000UL    /* IPTE notify bit */
+#define RCP_IN_BIT     0x00002000UL    /* IPTE notify bit */
 
 /* User dirty / referenced bit for KVM's migration feature */
 #define KVM_UR_BIT     0x00008000UL
@@ -374,7 +374,7 @@ extern unsigned long MODULES_END;
 #define RCP_HC_BIT     0x0020000000000000UL
 #define RCP_GR_BIT     0x0004000000000000UL
 #define RCP_GC_BIT     0x0002000000000000UL
-#define RCP_IN_BIT     0x0000800000000000UL    /* IPTE notify bit */
+#define RCP_IN_BIT     0x0000200000000000UL    /* IPTE notify bit */
 
 /* User dirty / referenced bit for KVM's migration feature */
 #define KVM_UR_BIT     0x0000800000000000UL
@@ -646,7 +646,7 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
        unsigned long address, bits;
        unsigned char skey;
 
-       if (!pte_present(*ptep))
+       if (pte_val(*ptep) & _PAGE_INVALID)
                return pgste;
        address = pte_val(*ptep) & PAGE_MASK;
        skey = page_get_storage_key(address);
@@ -680,7 +680,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
 #ifdef CONFIG_PGSTE
        int young;
 
-       if (!pte_present(*ptep))
+       if (pte_val(*ptep) & _PAGE_INVALID)
                return pgste;
        /* Get referenced bit from storage key */
        young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
@@ -706,7 +706,7 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
        unsigned long address;
        unsigned long okey, nkey;
 
-       if (!pte_present(entry))
+       if (pte_val(entry) & _PAGE_INVALID)
                return;
        address = pte_val(entry) & PAGE_MASK;
        okey = nkey = page_get_storage_key(address);
@@ -1098,6 +1098,9 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
        pte = *ptep;
        if (!mm_exclusive(mm))
                __ptep_ipte(address, ptep);
+
+       if (mm_has_pgste(mm))
+               pgste = pgste_update_all(&pte, pgste);
        return pte;
 }
 
@@ -1105,9 +1108,13 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
                                           unsigned long address,
                                           pte_t *ptep, pte_t pte)
 {
+       pgste_t pgste;
+
        if (mm_has_pgste(mm)) {
+               pgste = *(pgste_t *)(ptep + PTRS_PER_PTE);
+               pgste_set_key(ptep, pgste, pte);
                pgste_set_pte(ptep, pte);
-               pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
+               pgste_set_unlock(ptep, pgste);
        } else
                *ptep = pte;
 }
index 2d72d9e96c153b360ae5eb13d68c7d0370b9139e..9cb1b975b3532f3cc6237ab1cd168146f99a9904 100644 (file)
@@ -793,10 +793,6 @@ ENTRY(sys32_stime_wrapper)
        llgtr   %r2,%r2                 # long *
        jg      compat_sys_stime        # branch to system call
 
-ENTRY(sys32_sysctl_wrapper)
-       llgtr   %r2,%r2                 # struct compat_sysctl_args *
-       jg      compat_sys_sysctl
-
 ENTRY(sys32_fstat64_wrapper)
        llgfr   %r2,%r2                 # unsigned long
        llgtr   %r3,%r3                 # struct stat64 *
@@ -1349,15 +1345,6 @@ ENTRY(sys_fanotify_init_wrapper)
        llgfr   %r3,%r3                 # unsigned int
        jg      sys_fanotify_init       # branch to system call
 
-ENTRY(sys_fanotify_mark_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgfr   %r3,%r3                 # unsigned int
-       sllg    %r4,%r4,32              # get high word of 64bit mask
-       lr      %r4,%r5                 # get low word of 64bit mask
-       llgfr   %r5,%r6                 # unsigned int
-       llgt    %r6,164(%r15)           # char *
-       jg      sys_fanotify_mark       # branch to system call
-
 ENTRY(sys_prlimit64_wrapper)
        lgfr    %r2,%r2                 # pid_t
        llgfr   %r3,%r3                 # unsigned int
index 7f4a4a8c847c7a3807fd6d913f0ca93fe5b874b8..be87d3e05a5be69265a6100f87afe2fa60d51137 100644 (file)
@@ -1862,6 +1862,8 @@ void print_fn_code(unsigned char *code, unsigned long len)
        while (len) {
                ptr = buffer;
                opsize = insn_length(*code);
+               if (opsize > len)
+                       break;
                ptr += sprintf(ptr, "%p: ", code);
                for (i = 0; i < opsize; i++)
                        ptr += sprintf(ptr, "%02x", code[i]);
index 78bdf0e5dff77999c72b2aae1a7df71347e35053..e3043aef87a96d17d62defc3c14a828625668507 100644 (file)
 #include <trace/syscall.h>
 #include <asm/asm-offsets.h>
 
-#ifdef CONFIG_64BIT
-#define MCOUNT_OFFSET_RET 12
-#else
-#define MCOUNT_OFFSET_RET 22
-#endif
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 void ftrace_disable_code(void);
@@ -155,9 +149,10 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
 
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                goto out;
+       ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
        if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
                goto out;
-       trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
+       trace.func = ip;
        /* Only trace if the calling function expects to. */
        if (!ftrace_graph_entry(&trace)) {
                current->curr_ret_stack--;
index 4567ce20d900c128a23d27a6b8668b72d921149f..08dcf21cb8dfc040e627b23c1ee279b8822110df 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/ftrace.h>
 
        .section .kprobes.text, "ax"
 
@@ -33,6 +34,7 @@ ENTRY(ftrace_caller)
        la      %r2,0(%r14)
        st      %r0,__SF_BACKCHAIN(%r15)
        la      %r3,0(%r3)
+       ahi     %r2,-MCOUNT_INSN_SIZE
        l       %r14,0b-0b(%r1)
        l       %r14,0(%r14)
        basr    %r14,%r14
index 11332193db30552a7d028de8debf6af8e019531b..1c52eae3396a0845f3176ba8305607ef049f2608 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/ftrace.h>
 
        .section .kprobes.text, "ax"
 
@@ -29,6 +30,7 @@ ENTRY(ftrace_caller)
        stg     %r1,__SF_BACKCHAIN(%r15)
        lgr     %r2,%r14
        lg      %r3,168(%r15)
+       aghi    %r2,-MCOUNT_INSN_SIZE
        larl    %r14,ftrace_trace_function
        lg      %r14,0(%r14)
        basr    %r14,%r14
index 8074cb4b7cbf9ea7988160131cf2481241a5115b..4f977d0d25c2d1b1af674314a0b3da263d602b5a 100644 (file)
@@ -428,34 +428,27 @@ void smp_stop_cpu(void)
  * This is the main routine where commands issued by other
  * cpus are handled.
  */
-static void do_ext_call_interrupt(struct ext_code ext_code,
-                                 unsigned int param32, unsigned long param64)
+static void smp_handle_ext_call(void)
 {
        unsigned long bits;
-       int cpu;
-
-       cpu = smp_processor_id();
-       if (ext_code.code == 0x1202)
-               inc_irq_stat(IRQEXT_EXC);
-       else
-               inc_irq_stat(IRQEXT_EMS);
-       /*
-        * handle bit signal external calls
-        */
-       bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
 
+       /* handle bit signal external calls */
+       bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
        if (test_bit(ec_stop_cpu, &bits))
                smp_stop_cpu();
-
        if (test_bit(ec_schedule, &bits))
                scheduler_ipi();
-
        if (test_bit(ec_call_function, &bits))
                generic_smp_call_function_interrupt();
-
        if (test_bit(ec_call_function_single, &bits))
                generic_smp_call_function_single_interrupt();
+}
 
+static void do_ext_call_interrupt(struct ext_code ext_code,
+                                 unsigned int param32, unsigned long param64)
+{
+       inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
+       smp_handle_ext_call();
 }
 
 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -645,7 +638,7 @@ static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info,
                        continue;
                pcpu = pcpu_devices + cpu;
                pcpu->address = info->cpu[i].address;
-               pcpu->state = (cpu >= info->configured) ?
+               pcpu->state = (i >= info->configured) ?
                        CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
                smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
                set_cpu_present(cpu, true);
@@ -760,6 +753,8 @@ int __cpu_disable(void)
 {
        unsigned long cregs[16];
 
+       /* Handle possible pending IPIs */
+       smp_handle_ext_call();
        set_cpu_online(smp_processor_id(), false);
        /* Disable pseudo page faults on this cpu. */
        pfault_fini();
index 9f214e992eed2bbbdfbd2bb77a278a92de2dddf5..913410bd74a335c0693637ccb69e49cb89fd63c1 100644 (file)
@@ -157,7 +157,7 @@ SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper)               /* 145 */
 SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper)
 SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper)
 SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper)
-SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper)
+SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl)
 SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper)               /* 150 */
 SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper)
 SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper)
@@ -341,7 +341,7 @@ SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev)
 SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
 SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
 SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper)
-SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper)
+SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark)
 SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper)
 SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */
 SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at)
index 7805ddca833d7092149958e8ed56522598b2db07..a938b548f07e2d18c5510dbe99e47cdf231265a8 100644 (file)
@@ -492,7 +492,7 @@ static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
        mp = (struct gmap_pgtable *) page->index;
        rmap->gmap = gmap;
        rmap->entry = segment_ptr;
-       rmap->vmaddr = address;
+       rmap->vmaddr = address & PMD_MASK;
        spin_lock(&mm->page_table_lock);
        if (*segment_ptr == segment) {
                list_add(&rmap->list, &mp->mapper);
@@ -677,8 +677,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
                        break;
                }
                /* Get the page mapped */
-               if (get_user_pages(current, gmap->mm, addr, 1, 1, 0,
-                                  NULL, NULL) != 1) {
+               if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
                        rc = -EFAULT;
                        break;
                }
index d8f988a37d16a59518bac5e612a6d19d2bb0ad95..0940682ab38bdd700227226e22803e6310096aa5 100644 (file)
@@ -41,8 +41,6 @@
 unsigned long empty_zero_page;
 EXPORT_SYMBOL_GPL(empty_zero_page);
 
-static struct kcore_list kcore_mem, kcore_vmalloc;
-
 static void setup_zero_page(void)
 {
        struct page *page;
index 2e680b5245c9b4cc0b2b8941414e11ce540b424d..f7c72b6efc27556cd2e2de7a74539b1ba21831ce 100644 (file)
@@ -239,15 +239,6 @@ do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
        nop
        nop
 
-       .globl          sys32_fanotify_mark
-sys32_fanotify_mark:
-       sethi           %hi(sys_fanotify_mark), %g1
-       sllx            %o2, 32, %o2
-       or              %o2, %o3, %o2
-       mov             %o4, %o3
-       jmpl            %g1 + %lo(sys_fanotify_mark), %g0
-        mov            %o5, %o4
-
        .section        __ex_table,"a"
        .align          4
        .word           1b, __retl_efault, 2b, __retl_efault
index 8fd9320802153c5d4ece190b8dd535b8a3747679..6d81597064b6b5c7efa0861fc6c5c230e3c4984f 100644 (file)
@@ -84,7 +84,7 @@ sys_call_table32:
        .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
 /*320*/        .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
        .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
-/*330*/        .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
+/*330*/        .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
        .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module
 
index cfe79c9529b353f2d132cd004e336b578f2faee4..f9e86253931407bcf851c6e84f0c007480ce6cf1 100644 (file)
 #include <asm/syscalls.h>
 #include <asm/cacheflush.h>
 
-/* Note: used by the compat code even in 64-bit Linux. */
-SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
-               unsigned long, prot, unsigned long, flags,
-               unsigned long, fd, unsigned long, off_4k)
-{
-       return sys_mmap_pgoff(addr, len, prot, flags, fd,
-                             off_4k);
-}
-
 /* Provide the actual syscall number to call mapping. */
 #undef __SYSCALL
 #define __SYSCALL(nr, call)    [nr] = (call),
 
+#define sys_mmap2 sys_mmap_pgoff
 /* Note that we don't include <linux/unistd.h> but <asm/unistd.h> */
 void *sys_call_table[__NR_syscalls] = {
        [0 ... __NR_syscalls-1] = sys_ni_syscall,
index 6a154a91c7e746342f35cf6aa13bdae24f544a88..685692c94f051a8a7ad582442efcc3fa11173332 100644 (file)
@@ -108,7 +108,6 @@ config X86
        select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
        select GENERIC_TIME_VSYSCALL if X86_64
        select KTIME_SCALAR if X86_32
-       select ALWAYS_USE_PERSISTENT_CLOCK
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
        select HAVE_CONTEXT_TRACKING if X86_64
index 94c27df8a549c2ce21234c4a621a5a963a917fc4..f247304299a28b1f3245563b2437ab54a8a82889 100644 (file)
@@ -240,7 +240,7 @@ fold_64:
        pand    %xmm3, %xmm1
        PCLMULQDQ 0x00, CONSTANT, %xmm1
        pxor    %xmm2, %xmm1
-       pextrd  $0x01, %xmm1, %eax
+       PEXTRD  0x01, %xmm1, %eax
 
        ret
 ENDPROC(crc32_pclmul_le_16)
index 56610c4bf31b22a47ef58a0362fcabe3cd8437f8..642f15687a0ac4205f59850cacab030768eaf6d4 100644 (file)
@@ -118,7 +118,7 @@ y2 = %r15d
 
 _INP_END_SIZE = 8
 _INP_SIZE = 8
-_XFER_SIZE = 8
+_XFER_SIZE = 16
 _XMM_SAVE_SIZE = 0
 
 _INP_END = 0
index 98d3c391da81b5de50469c7c0d4c33afb7e90b8a..f833b74d902ba87919184ba7828b226ee067c096 100644 (file)
@@ -111,7 +111,7 @@ y2 = %r15d
 
 _INP_END_SIZE = 8
 _INP_SIZE = 8
-_XFER_SIZE = 8
+_XFER_SIZE = 16
 _XMM_SAVE_SIZE = 0
 
 _INP_END = 0
index 4e4907c67d92d0940270de5c7c55c731681afaf5..8e0ceecdc95790d7a53eb3fe5bc1c3867bcb9e7f 100644 (file)
@@ -243,12 +243,3 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
        return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
                             ((u64)len_hi << 32) | len_lo);
 }
-
-asmlinkage long sys32_fanotify_mark(int fanotify_fd, unsigned int flags,
-                                   u32 mask_lo, u32 mask_hi,
-                                   int fd, const char  __user *pathname)
-{
-       return sys_fanotify_mark(fanotify_fd, flags,
-                                ((u64)mask_hi << 32) | mask_lo,
-                                fd, pathname);
-}
index 280bf7fb6aba5c0e1976c852a71692d1d3f888b2..3e115273ed885110de583ce5b228ad6caf993eec 100644 (file)
@@ -9,12 +9,68 @@
 
 #define REG_NUM_INVALID                100
 
-#define REG_TYPE_R64           0
-#define REG_TYPE_XMM           1
+#define REG_TYPE_R32           0
+#define REG_TYPE_R64           1
+#define REG_TYPE_XMM           2
 #define REG_TYPE_INVALID       100
 
+       .macro R32_NUM opd r32
+       \opd = REG_NUM_INVALID
+       .ifc \r32,%eax
+       \opd = 0
+       .endif
+       .ifc \r32,%ecx
+       \opd = 1
+       .endif
+       .ifc \r32,%edx
+       \opd = 2
+       .endif
+       .ifc \r32,%ebx
+       \opd = 3
+       .endif
+       .ifc \r32,%esp
+       \opd = 4
+       .endif
+       .ifc \r32,%ebp
+       \opd = 5
+       .endif
+       .ifc \r32,%esi
+       \opd = 6
+       .endif
+       .ifc \r32,%edi
+       \opd = 7
+       .endif
+#ifdef CONFIG_X86_64
+       .ifc \r32,%r8d
+       \opd = 8
+       .endif
+       .ifc \r32,%r9d
+       \opd = 9
+       .endif
+       .ifc \r32,%r10d
+       \opd = 10
+       .endif
+       .ifc \r32,%r11d
+       \opd = 11
+       .endif
+       .ifc \r32,%r12d
+       \opd = 12
+       .endif
+       .ifc \r32,%r13d
+       \opd = 13
+       .endif
+       .ifc \r32,%r14d
+       \opd = 14
+       .endif
+       .ifc \r32,%r15d
+       \opd = 15
+       .endif
+#endif
+       .endm
+
        .macro R64_NUM opd r64
        \opd = REG_NUM_INVALID
+#ifdef CONFIG_X86_64
        .ifc \r64,%rax
        \opd = 0
        .endif
        .ifc \r64,%r15
        \opd = 15
        .endif
+#endif
        .endm
 
        .macro XMM_NUM opd xmm
        .endm
 
        .macro REG_TYPE type reg
+       R32_NUM reg_type_r32 \reg
        R64_NUM reg_type_r64 \reg
        XMM_NUM reg_type_xmm \reg
        .if reg_type_r64 <> REG_NUM_INVALID
        \type = REG_TYPE_R64
+       .elseif reg_type_r32 <> REG_NUM_INVALID
+       \type = REG_TYPE_R32
        .elseif reg_type_xmm <> REG_NUM_INVALID
        \type = REG_TYPE_XMM
        .else
        .byte \imm8
        .endm
 
+       .macro PEXTRD imm8 xmm gpr
+       R32_NUM extrd_opd1 \gpr
+       XMM_NUM extrd_opd2 \xmm
+       PFX_OPD_SIZE
+       PFX_REX extrd_opd1 extrd_opd2
+       .byte 0x0f, 0x3a, 0x16
+       MODRM 0xc0 extrd_opd1 extrd_opd2
+       .byte \imm8
+       .endm
+
        .macro AESKEYGENASSIST rcon xmm1 xmm2
        XMM_NUM aeskeygen_opd1 \xmm1
        XMM_NUM aeskeygen_opd2 \xmm2
index 0ef202e232d642e97f9bb2c5a389a35ef10cc99d..82c34ee25a651760c9950ce6c54625896fd9ea2f 100644 (file)
@@ -50,9 +50,6 @@ asmlinkage long sys32_fallocate(int, int, unsigned,
 asmlinkage long sys32_sigreturn(void);
 asmlinkage long sys32_rt_sigreturn(void);
 
-asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
-                                   const char __user *);
-
 #endif /* CONFIG_COMPAT */
 
 #endif /* _ASM_X86_SYS_IA32_H */
index 5f87b35fd2ef5d6a9cc4eb6e7cdf1c822d4e679d..2917a6452c496e20d254589f789e8ad0f499f6dc 100644 (file)
@@ -37,8 +37,8 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *);
 unsigned long sys_sigreturn(void);
 
 /* kernel/vm86_32.c */
-int sys_vm86old(struct vm86_struct __user *);
-int sys_vm86(unsigned long, unsigned long);
+asmlinkage long sys_vm86old(struct vm86_struct __user *);
+asmlinkage long sys_vm86(unsigned long, unsigned long);
 
 #else /* CONFIG_X86_32 */
 
index b3a4866661c5c426efb0504294a3c7228ccd107a..2af848dfa75424b7798924dd9a91524af7f088e3 100644 (file)
 #define MSR_CORE_C6_RESIDENCY          0x000003fd
 #define MSR_CORE_C7_RESIDENCY          0x000003fe
 #define MSR_PKG_C2_RESIDENCY           0x0000060d
+#define MSR_PKG_C8_RESIDENCY           0x00000630
+#define MSR_PKG_C9_RESIDENCY           0x00000631
+#define MSR_PKG_C10_RESIDENCY          0x00000632
 
 /* Run Time Average Power Limiting (RAPL) Interface */
 
index dab95a85f7f8590d240790e981830ea91f729cdd..55b67614ed942fad37e13fe5a48e4fab19f4feea 100644 (file)
@@ -34,7 +34,7 @@
 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
 static unsigned int __initdata next_early_pgt = 2;
-pmdval_t __initdata early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
+pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
 
 /* Wipe all early page tables except for the kernel symbol map */
 static void __init reset_early_page_tables(void)
index 08f7e8039099dc6b8787bbad4797553d7da6ac3f..321d65ebaffe255bbb1dc1bc6aaf2ac217d55aaf 100644 (file)
@@ -115,8 +115,10 @@ startup_64:
        movq    %rdi, %rax
        shrq    $PUD_SHIFT, %rax
        andl    $(PTRS_PER_PUD-1), %eax
-       movq    %rdx, (4096+0)(%rbx,%rax,8)
-       movq    %rdx, (4096+8)(%rbx,%rax,8)
+       movq    %rdx, 4096(%rbx,%rax,8)
+       incl    %eax
+       andl    $(PTRS_PER_PUD-1), %eax
+       movq    %rdx, 4096(%rbx,%rax,8)
 
        addq    $8192, %rbx
        movq    %rdi, %rax
index 245a71db401af0e50be0deeba56d680c278ca2fc..cb339097b9ea0cf4f57b2b406fa308a489cee881 100644 (file)
 /*
  * Were we in an interrupt that interrupted kernel mode?
  *
- * For now, with eagerfpu we will return interrupted kernel FPU
- * state as not-idle. TBD: Ideally we can change the return value
- * to something like __thread_has_fpu(current). But we need to
- * be careful of doing __thread_clear_has_fpu() before saving
- * the FPU etc for supporting nested uses etc. For now, take
- * the simple route!
- *
  * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
  * pair does nothing at all: the thread must not have fpu (so
  * that we don't try to save the FPU state), and TS must
  * be set (so that the clts/stts pair does nothing that is
  * visible in the interrupted kernel thread).
+ *
+ * Except for the eagerfpu case when we return 1 unless we've already
+ * been eager and saved the state in kernel_fpu_begin().
  */
 static inline bool interrupted_kernel_fpu_idle(void)
 {
        if (use_eager_fpu())
-               return 0;
+               return __thread_has_fpu(current);
 
        return !__thread_has_fpu(current) &&
                (read_cr0() & X86_CR0_TS);
@@ -78,8 +74,8 @@ void __kernel_fpu_begin(void)
        struct task_struct *me = current;
 
        if (__thread_has_fpu(me)) {
-               __save_init_fpu(me);
                __thread_clear_has_fpu(me);
+               __save_init_fpu(me);
                /* We do 'stts()' in __kernel_fpu_end() */
        } else if (!use_eager_fpu()) {
                this_cpu_write(fpu_owner_task, NULL);
index d893e8ed8ac96559b2175b00c3d68d71658a4105..2e9e12871c2b51a9fe48068369f8dd4346a4db51 100644 (file)
@@ -487,6 +487,7 @@ static inline void show_saved_mc(void)
 #endif
 
 #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
+static DEFINE_MUTEX(x86_cpu_microcode_mutex);
 /*
  * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
  * hot added or resumes.
@@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc)
         * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
         * hotplug.
         */
-       cpu_hotplug_driver_lock();
+       mutex_lock(&x86_cpu_microcode_mutex);
 
        mc_saved_count_init = mc_saved_data.mc_saved_count;
        mc_saved_count = mc_saved_data.mc_saved_count;
@@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc)
        }
 
 out:
-       cpu_hotplug_driver_unlock();
+       mutex_unlock(&x86_cpu_microcode_mutex);
 
        return ret;
 }
index 607af0d4d5ef5f34afc0d5e7caad375dafff6663..4e7a37ff03ab9f6aba634be08f7650e1ef5f2f7c 100644 (file)
@@ -312,6 +312,8 @@ void arch_cpu_idle(void)
 {
        if (cpuidle_idle_call())
                x86_idle();
+       else
+               local_irq_enable();
 }
 
 /*
@@ -368,9 +370,6 @@ void amd_e400_remove_cpu(int cpu)
  */
 static void amd_e400_idle(void)
 {
-       if (need_resched())
-               return;
-
        if (!amd_e400_c1e_detected) {
                u32 lo, hi;
 
index 1cf5766dde169448e109101e981632bb6a6407a5..e8edcf52e06911fe5446e543f40368ea69114225 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/capability.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
+#include <linux/syscalls.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/signal.h>
@@ -48,7 +49,6 @@
 #include <asm/io.h>
 #include <asm/tlbflush.h>
 #include <asm/irq.h>
-#include <asm/syscalls.h>
 
 /*
  * Known problems:
@@ -202,36 +202,32 @@ static void mark_screen_rdonly(struct mm_struct *mm)
 static int do_vm86_irq_handling(int subfunction, int irqnumber);
 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
 
-int sys_vm86old(struct vm86_struct __user *v86)
+SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
 {
        struct kernel_vm86_struct info; /* declare this _on top_,
                                         * this avoids wasting of stack space.
                                         * This remains on the stack until we
                                         * return to 32 bit user space.
                                         */
-       struct task_struct *tsk;
-       int tmp, ret = -EPERM;
+       struct task_struct *tsk = current;
+       int tmp;
 
-       tsk = current;
        if (tsk->thread.saved_sp0)
-               goto out;
+               return -EPERM;
        tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
                                       offsetof(struct kernel_vm86_struct, vm86plus) -
                                       sizeof(info.regs));
-       ret = -EFAULT;
        if (tmp)
-               goto out;
+               return -EFAULT;
        memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
        info.regs32 = current_pt_regs();
        tsk->thread.vm86_info = v86;
        do_sys_vm86(&info, tsk);
-       ret = 0;        /* we never return here */
-out:
-       return ret;
+       return 0;       /* we never return here */
 }
 
 
-int sys_vm86(unsigned long cmd, unsigned long arg)
+SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
 {
        struct kernel_vm86_struct info; /* declare this _on top_,
                                         * this avoids wasting of stack space.
@@ -239,7 +235,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
                                         * return to 32 bit user space.
                                         */
        struct task_struct *tsk;
-       int tmp, ret;
+       int tmp;
        struct vm86plus_struct __user *v86;
 
        tsk = current;
@@ -248,8 +244,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
        case VM86_FREE_IRQ:
        case VM86_GET_IRQ_BITS:
        case VM86_GET_AND_RESET_IRQ:
-               ret = do_vm86_irq_handling(cmd, (int)arg);
-               goto out;
+               return do_vm86_irq_handling(cmd, (int)arg);
        case VM86_PLUS_INSTALL_CHECK:
                /*
                 * NOTE: on old vm86 stuff this will return the error
@@ -257,28 +252,23 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
                 *  interpreted as (invalid) address to vm86_struct.
                 *  So the installation check works.
                 */
-               ret = 0;
-               goto out;
+               return 0;
        }
 
        /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
-       ret = -EPERM;
        if (tsk->thread.saved_sp0)
-               goto out;
+               return -EPERM;
        v86 = (struct vm86plus_struct __user *)arg;
        tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
                                       offsetof(struct kernel_vm86_struct, regs32) -
                                       sizeof(info.regs));
-       ret = -EFAULT;
        if (tmp)
-               goto out;
+               return -EFAULT;
        info.regs32 = current_pt_regs();
        info.vm86plus.is_vm86pus = 1;
        tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
        do_sys_vm86(&info, tsk);
-       ret = 0;        /* we never return here */
-out:
-       return ret;
+       return 0;       /* we never return here */
 }
 
 
index 8e517bba6a7c9434099139ecbb82a5017ca49cbf..5953dcea752d08e950d62293abbdec94ae95f62b 100644 (file)
@@ -60,6 +60,7 @@
 #define OpGS              25ull  /* GS */
 #define OpMem8            26ull  /* 8-bit zero extended memory operand */
 #define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
+#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
 
 #define OpBits             5  /* Width of operand field */
 #define OpMask             ((1ull << OpBits) - 1)
 #define SrcImmUByte (OpImmUByte << SrcShift)
 #define SrcImmU     (OpImmU << SrcShift)
 #define SrcSI       (OpSI << SrcShift)
+#define SrcXLat     (OpXLat << SrcShift)
 #define SrcImmFAddr (OpImmFAddr << SrcShift)
 #define SrcMemFAddr (OpMemFAddr << SrcShift)
 #define SrcAcc      (OpAcc << SrcShift)
@@ -533,6 +535,9 @@ FOP_SETCC(setle)
 FOP_SETCC(setnle)
 FOP_END;
 
+FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
+FOP_END;
+
 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex)                 \
        do {                                                            \
                unsigned long _tmp;                                     \
@@ -1235,9 +1240,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
        ctxt->modrm_seg = VCPU_SREG_DS;
 
        if (ctxt->modrm_mod == 3) {
+               int highbyte_regs = ctxt->rex_prefix == 0;
+
                op->type = OP_REG;
                op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
-               op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp);
+               op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
+                                              highbyte_regs && (ctxt->d & ByteOp));
                if (ctxt->d & Sse) {
                        op->type = OP_XMM;
                        op->bytes = 16;
@@ -2996,6 +3004,28 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
+static int em_aam(struct x86_emulate_ctxt *ctxt)
+{
+       u8 al, ah;
+
+       if (ctxt->src.val == 0)
+               return emulate_de(ctxt);
+
+       al = ctxt->dst.val & 0xff;
+       ah = al / ctxt->src.val;
+       al %= ctxt->src.val;
+
+       ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
+
+       /* Set PF, ZF, SF */
+       ctxt->src.type = OP_IMM;
+       ctxt->src.val = 0;
+       ctxt->src.bytes = 1;
+       fastop(ctxt, em_or);
+
+       return X86EMUL_CONTINUE;
+}
+
 static int em_aad(struct x86_emulate_ctxt *ctxt)
 {
        u8 al = ctxt->dst.val & 0xff;
@@ -3936,7 +3966,10 @@ static const struct opcode opcode_table[256] = {
        /* 0xD0 - 0xD7 */
        G(Src2One | ByteOp, group2), G(Src2One, group2),
        G(Src2CL | ByteOp, group2), G(Src2CL, group2),
-       N, I(DstAcc | SrcImmByte | No64, em_aad), N, N,
+       I(DstAcc | SrcImmUByte | No64, em_aam),
+       I(DstAcc | SrcImmUByte | No64, em_aad),
+       F(DstAcc | ByteOp | No64, em_salc),
+       I(DstAcc | SrcXLat | ByteOp, em_mov),
        /* 0xD8 - 0xDF */
        N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
        /* 0xE0 - 0xE7 */
@@ -3967,7 +4000,8 @@ static const struct opcode twobyte_table[256] = {
        DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
        N, D(ImplicitOps | ModRM), N, N,
        /* 0x10 - 0x1F */
-       N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
+       N, N, N, N, N, N, N, N,
+       D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
        /* 0x20 - 0x2F */
        DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
        DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
@@ -4198,6 +4232,16 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
                op->val = 0;
                op->count = 1;
                break;
+       case OpXLat:
+               op->type = OP_MEM;
+               op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+               op->addr.mem.ea =
+                       register_address(ctxt,
+                               reg_read(ctxt, VCPU_REGS_RBX) +
+                               (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
+               op->addr.mem.seg = seg_override(ctxt);
+               op->val = 0;
+               break;
        case OpImmFAddr:
                op->type = OP_IMM;
                op->addr.mem.ea = ctxt->_eip;
@@ -4796,6 +4840,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
        case 0x08:              /* invd */
        case 0x0d:              /* GrpP (prefetch) */
        case 0x18:              /* Grp16 (prefetch/nop) */
+       case 0x1f:              /* nop */
                break;
        case 0x20: /* mov cr, reg */
                ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
index e1adbb4aca753657bfa246ecf56e3b52b130a9ab..0eee2c8b64d1cafecdf7f587dbeef5566b4449df 100644 (file)
@@ -1861,11 +1861,14 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
        unsigned int sipi_vector;
+       unsigned long pe;
 
-       if (!kvm_vcpu_has_lapic(vcpu))
+       if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events)
                return;
 
-       if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
+       pe = xchg(&apic->pending_events, 0);
+
+       if (test_bit(KVM_APIC_INIT, &pe)) {
                kvm_lapic_reset(vcpu);
                kvm_vcpu_reset(vcpu);
                if (kvm_vcpu_is_bsp(apic->vcpu))
@@ -1873,7 +1876,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
                else
                        vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
        }
-       if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events) &&
+       if (test_bit(KVM_APIC_SIPI, &pe) &&
            vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
                /* evaluate pending_events before reading the vector */
                smp_rmb();
index 25a791ed21c88057697eb06339f7066d8cfa63e2..260a9193955538b4fea743045b2f964b2736b24e 100644 (file)
@@ -5434,6 +5434,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                        return 0;
                }
 
+               if (vcpu->arch.halt_request) {
+                       vcpu->arch.halt_request = 0;
+                       ret = kvm_emulate_halt(vcpu);
+                       goto out;
+               }
+
                if (signal_pending(current))
                        goto out;
                if (need_resched())
index 05a8b1a2300df0997d116e753506bf6e6ffa1fe2..094b5d96ab1468c1875a2a2a5e682245f2909db9 100644 (file)
@@ -555,6 +555,25 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
+static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+{
+       if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
+                       !vcpu->guest_xcr0_loaded) {
+               /* kvm_set_xcr() also depends on this */
+               xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+               vcpu->guest_xcr0_loaded = 1;
+       }
+}
+
+static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->guest_xcr0_loaded) {
+               if (vcpu->arch.xcr0 != host_xcr0)
+                       xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+               vcpu->guest_xcr0_loaded = 0;
+       }
+}
+
 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
        u64 xcr0;
@@ -571,8 +590,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
                return 1;
        if (xcr0 & ~host_xcr0)
                return 1;
+       kvm_put_guest_xcr0(vcpu);
        vcpu->arch.xcr0 = xcr0;
-       vcpu->guest_xcr0_loaded = 0;
        return 0;
 }
 
@@ -5614,25 +5633,6 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
        }
 }
 
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
-{
-       if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
-                       !vcpu->guest_xcr0_loaded) {
-               /* kvm_set_xcr() also depends on this */
-               xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
-               vcpu->guest_xcr0_loaded = 1;
-       }
-}
-
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
-{
-       if (vcpu->guest_xcr0_loaded) {
-               if (vcpu->arch.xcr0 != host_xcr0)
-                       xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
-               vcpu->guest_xcr0_loaded = 0;
-       }
-}
-
 static void process_nmi(struct kvm_vcpu *vcpu)
 {
        unsigned limit = 2;
index fdc5dca14fb35de5537765acca869ccfb447262d..eaac1743def7ea70cccda0926e53fc2e343f7f06 100644 (file)
@@ -359,7 +359,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
 }
 
 /*
- * would have hole in the middle or ends, and only ram parts will be mapped.
+ * We need to iterate through the E820 memory map and create direct mappings
+ * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
+ * create direct mappings for all pfns from [0 to max_low_pfn) and
+ * [4GB to max_pfn) because of possible memory holes in high addresses
+ * that cannot be marked as UC by fixed/variable range MTRRs.
+ * Depending on the alignment of E820 ranges, this may possibly result
+ * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
+ *
+ * init_mem_mapping() calls init_range_memory_mapping() with big range.
+ * That range would have hole in the middle or ends, and only ram parts
+ * will be mapped in init_range_memory_mapping().
  */
 static unsigned long __init init_range_memory_mapping(
                                           unsigned long r_start,
@@ -419,6 +429,13 @@ void __init init_mem_mapping(void)
        max_pfn_mapped = 0; /* will get exact value next */
        min_pfn_mapped = real_end >> PAGE_SHIFT;
        last_start = start = real_end;
+
+       /*
+        * We start from the top (end of memory) and go to the bottom.
+        * The memblock_find_in_range() gets us a block of RAM from the
+        * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
+        * for page table.
+        */
        while (last_start > ISA_END_ADDRESS) {
                if (last_start > step_size) {
                        start = round_down(last_start - 1, step_size);
index 305c68b8d53825fbda7f635de0ae4c1af43fe03b..981c2dbd72cc45e6d77a98dab694d87b0523696a 100644 (file)
@@ -628,7 +628,9 @@ int pcibios_add_device(struct pci_dev *dev)
 
        pa_data = boot_params.hdr.setup_data;
        while (pa_data) {
-               data = phys_to_virt(pa_data);
+               data = ioremap(pa_data, sizeof(*rom));
+               if (!data)
+                       return -ENOMEM;
 
                if (data->type == SETUP_PCI) {
                        rom = (struct pci_setup_rom *)data;
@@ -645,6 +647,7 @@ int pcibios_add_device(struct pci_dev *dev)
                        }
                }
                pa_data = data->next;
+               iounmap(data);
        }
        return 0;
 }
index 0e0fabf173429006612eb014ec591df98e4b2deb..6eb18c42a28a3584546e87a57f9cccf5af3473e3 100644 (file)
@@ -141,11 +141,6 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
  */
 static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
 {
-       if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
-                               || devfn == PCI_DEVFN(0, 0)
-                               || devfn == PCI_DEVFN(3, 0)))
-               return 1;
-
        /* This is a workaround for A0 LNC bug where PCI status register does
         * not have new CAP bit set. can not be written by SW either.
         *
@@ -155,7 +150,10 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
         */
        if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
                return 0;
-
+       if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
+                               || devfn == PCI_DEVFN(0, 0)
+                               || devfn == PCI_DEVFN(3, 0)))
+               return 1;
        return 0; /* langwell on others */
 }
 
index 4a9be6ddf05437e66ab8c94862a9b3e8a30e4373..48e8461057ba9346442aa5c72d6772b190dcf4c9 100644 (file)
@@ -295,11 +295,10 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                        int pos;
                        u32 table_offset, bir;
 
-                       pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
-
+                       pos = dev->msix_cap;
                        pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
                                              &table_offset);
-                       bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
+                       bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
 
                        map_irq.table_base = pci_resource_start(dev, bir);
                        map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
index 55856b2310d37d76240a2af47140f2deb97e8482..82089d8b1954b562087c65b0a187b5cdf441748f 100644 (file)
@@ -206,7 +206,7 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
        }
 
        if (boot_used_size && !finished) {
-               unsigned long size;
+               unsigned long size = 0;
                u32 attr;
                efi_status_t s;
                void *tmp;
index d0d59bfbccce01e2e019ffd64b7d57e3ddad9622..aabfb8380a1c6cf91e5af8aaed9dd30bd088d9de 100644 (file)
 336    i386    perf_event_open         sys_perf_event_open
 337    i386    recvmmsg                sys_recvmmsg                    compat_sys_recvmmsg
 338    i386    fanotify_init           sys_fanotify_init
-339    i386    fanotify_mark           sys_fanotify_mark               sys32_fanotify_mark
+339    i386    fanotify_mark           sys_fanotify_mark               compat_sys_fanotify_mark
 340    i386    prlimit64               sys_prlimit64
 341    i386    name_to_handle_at       sys_name_to_handle_at
 342    i386    open_by_handle_at       sys_open_by_handle_at           compat_sys_open_by_handle_at
index 53d4f680c9b59f498d41148325cd5a96ef1723e3..a492be2635ac048a527d856b796431ccac72d9fd 100644 (file)
 
 EXPORT_SYMBOL_GPL(hypercall_page);
 
+/*
+ * Pointer to the xen_vcpu_info structure or
+ * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
+ * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
+ * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point
+ * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to
+ * acknowledge pending events.
+ * Also more subtly it is used by the patched version of irq enable/disable
+ * e.g. xen_irq_enable_direct and xen_iret in PV mode.
+ *
+ * The desire to be able to do those mask/unmask operations as a single
+ * instruction by using the per-cpu offset held in %gs is the real reason
+ * vcpu info is in a per-cpu pointer and the original reason for this
+ * hypercall.
+ *
+ */
 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+
+/*
+ * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info
+ * hypercall. This can be used both in PV and PVHVM mode. The structure
+ * overrides the default per_cpu(xen_vcpu, cpu) value.
+ */
 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 
 enum xen_domain_type xen_domain_type = XEN_NATIVE;
@@ -157,6 +179,21 @@ static void xen_vcpu_setup(int cpu)
 
        BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
 
+       /*
+        * This path is called twice on PVHVM - first during bootup via
+        * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
+        * hotplugged: cpu_up -> xen_hvm_cpu_notify.
+        * As we can only do the VCPUOP_register_vcpu_info once lets
+        * not over-write its result.
+        *
+        * For PV it is called during restore (xen_vcpu_restore) and bootup
+        * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
+        * use this function.
+        */
+       if (xen_hvm_domain()) {
+               if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
+                       return;
+       }
        if (cpu < MAX_VIRT_CPUS)
                per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
 
@@ -172,7 +209,12 @@ static void xen_vcpu_setup(int cpu)
 
        /* Check to see if the hypervisor will put the vcpu_info
           structure where we want it, which allows direct access via
-          a percpu-variable. */
+          a percpu-variable.
+          N.B. This hypercall can _only_ be called once per CPU. Subsequent
+          calls will error out with -EINVAL. This is due to the fact that
+          hypervisor has no unregister variant and this hypercall does not
+          allow to over-write info.mfn and info.offset.
+        */
        err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
 
        if (err) {
@@ -387,6 +429,9 @@ static void __init xen_init_cpuid_mask(void)
                cpuid_leaf1_edx_mask &=
                        ~((1 << X86_FEATURE_APIC) |  /* disable local APIC */
                          (1 << X86_FEATURE_ACPI));  /* disable ACPI */
+
+       cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
+
        ax = 1;
        cx = 0;
        xen_cpuid(&ax, &bx, &cx, &dx);
@@ -1603,6 +1648,9 @@ void __ref xen_hvm_init_shared_info(void)
         * online but xen_hvm_init_shared_info is run at resume time too and
         * in that case multiple vcpus might be online. */
        for_each_online_cpu(cpu) {
+               /* Leave it to be NULL. */
+               if (cpu >= MAX_VIRT_CPUS)
+                       continue;
                per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
        }
 }
index 8ff37995d54e92518e3da0d7400e1dc661ddf0a7..fb44426fe931dcaee09a1b6918a2fe41785a1bc7 100644 (file)
@@ -576,24 +576,22 @@ void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
 {
        unsigned cpu;
        unsigned int this_cpu = smp_processor_id();
+       int xen_vector = xen_map_vector(vector);
 
-       if (!(num_online_cpus() > 1))
+       if (!(num_online_cpus() > 1) || (xen_vector < 0))
                return;
 
        for_each_cpu_and(cpu, mask, cpu_online_mask) {
                if (this_cpu == cpu)
                        continue;
 
-               xen_smp_send_call_function_single_ipi(cpu);
+               xen_send_IPI_one(cpu, xen_vector);
        }
 }
 
 void xen_send_IPI_allbutself(int vector)
 {
-       int xen_vector = xen_map_vector(vector);
-
-       if (xen_vector >= 0)
-               xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
+       xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
 }
 
 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
index 8981a76d081a0c0cff16f89e99b1dc2b5e53bc98..c7c2d89efd76ac3c7627168b0a3d2f3964627f66 100644 (file)
@@ -5,7 +5,6 @@ extern void xen_send_IPI_mask(const struct cpumask *mask,
 extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
                                int vector);
 extern void xen_send_IPI_allbutself(int vector);
-extern void physflat_send_IPI_allbutself(int vector);
 extern void xen_send_IPI_all(int vector);
 extern void xen_send_IPI_self(int vector);
 
index 8b54603ce81613fee6783a6d3869b0021878feb4..3002ec1bb71a27d193ce4b1e041f8b3a3c5b49fa 100644 (file)
@@ -364,7 +364,7 @@ void __cpuinit xen_init_lock_cpu(int cpu)
        int irq;
        const char *name;
 
-       WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n",
+       WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
             cpu, per_cpu(lock_kicker_irq, cpu));
 
        /*
index ecb743bf05a54c4541f028f60170f45dcd20bb8b..536562c626a2fab4bb20c128abcf27f4404ca23f 100644 (file)
@@ -24,7 +24,7 @@ acpi-y                                += nvs.o
 # Power management related files
 acpi-y                         += wakeup.o
 acpi-y                         += sleep.o
-acpi-$(CONFIG_PM)              += device_pm.o
+acpi-y                         += device_pm.o
 acpi-$(CONFIG_ACPI_SLEEP)      += proc.o
 
 
@@ -38,7 +38,6 @@ acpi-y                                += processor_core.o
 acpi-y                         += ec.o
 acpi-$(CONFIG_ACPI_DOCK)       += dock.o
 acpi-y                         += pci_root.o pci_link.o pci_irq.o
-acpi-y                         += csrt.o
 acpi-$(CONFIG_X86_INTEL_LPSS)  += acpi_lpss.o
 acpi-y                         += acpi_platform.o
 acpi-y                         += power.o
index 00d2efd674df5b7a2ecca39ac2e161cfcf14b719..4f4e741d34b2c9616808177c31e7b488ee00ed8e 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/types.h>
+#include <linux/dmi.h>
+#include <linux/delay.h>
 #ifdef CONFIG_ACPI_PROCFS_POWER
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
@@ -74,6 +76,8 @@ static int acpi_ac_resume(struct device *dev);
 #endif
 static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
 
+static int ac_sleep_before_get_state_ms;
+
 static struct acpi_driver acpi_ac_driver = {
        .name = "ac",
        .class = ACPI_AC_CLASS,
@@ -252,6 +256,16 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
        case ACPI_AC_NOTIFY_STATUS:
        case ACPI_NOTIFY_BUS_CHECK:
        case ACPI_NOTIFY_DEVICE_CHECK:
+               /*
+                * A buggy BIOS may notify AC first and then sleep for
+                * a specific time before doing actual operations in the
+                * EC event handler (_Qxx). This will cause the AC state
+                * reported by the ACPI event to be incorrect, so wait for a
+                * specific time for the EC event handler to make progress.
+                */
+               if (ac_sleep_before_get_state_ms > 0)
+                       msleep(ac_sleep_before_get_state_ms);
+
                acpi_ac_get_state(ac);
                acpi_bus_generate_proc_event(device, event, (u32) ac->state);
                acpi_bus_generate_netlink_event(device->pnp.device_class,
@@ -264,6 +278,24 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
        return;
 }
 
+static int thinkpad_e530_quirk(const struct dmi_system_id *d)
+{
+       ac_sleep_before_get_state_ms = 1000;
+       return 0;
+}
+
+static struct dmi_system_id ac_dmi_table[] = {
+       {
+       .callback = thinkpad_e530_quirk,
+       .ident = "thinkpad e530",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"),
+               },
+       },
+       {},
+};
+
 static int acpi_ac_add(struct acpi_device *device)
 {
        int result = 0;
@@ -312,6 +344,7 @@ static int acpi_ac_add(struct acpi_device *device)
                kfree(ac);
        }
 
+       dmi_check_system(ac_dmi_table);
        return result;
 }
 
index b1c95422ce74ab79dd13937fd6a7ad260e1055e5..652fd5ce303c4a9efdbfa3f6d4eb330fba42a5bd 100644 (file)
@@ -35,11 +35,16 @@ ACPI_MODULE_NAME("acpi_lpss");
 
 struct lpss_device_desc {
        bool clk_required;
-       const char *clk_parent;
+       const char *clkdev_name;
        bool ltr_required;
        unsigned int prv_offset;
 };
 
+static struct lpss_device_desc lpss_dma_desc = {
+       .clk_required = true,
+       .clkdev_name = "hclk",
+};
+
 struct lpss_private_data {
        void __iomem *mmio_base;
        resource_size_t mmio_size;
@@ -49,7 +54,6 @@ struct lpss_private_data {
 
 static struct lpss_device_desc lpt_dev_desc = {
        .clk_required = true,
-       .clk_parent = "lpss_clk",
        .prv_offset = 0x800,
        .ltr_required = true,
 };
@@ -60,6 +64,9 @@ static struct lpss_device_desc lpt_sdio_dev_desc = {
 };
 
 static const struct acpi_device_id acpi_lpss_device_ids[] = {
+       /* Generic LPSS devices */
+       { "INTL9C60", (unsigned long)&lpss_dma_desc },
+
        /* Lynxpoint LPSS devices */
        { "INT33C0", (unsigned long)&lpt_dev_desc },
        { "INT33C1", (unsigned long)&lpt_dev_desc },
@@ -91,16 +98,27 @@ static int register_device_clock(struct acpi_device *adev,
                                 struct lpss_private_data *pdata)
 {
        const struct lpss_device_desc *dev_desc = pdata->dev_desc;
+       struct lpss_clk_data *clk_data;
 
        if (!lpss_clk_dev)
                lpt_register_clock_device();
 
-       if (!dev_desc->clk_parent || !pdata->mmio_base
+       clk_data = platform_get_drvdata(lpss_clk_dev);
+       if (!clk_data)
+               return -ENODEV;
+
+       if (dev_desc->clkdev_name) {
+               clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name,
+                                   dev_name(&adev->dev));
+               return 0;
+       }
+
+       if (!pdata->mmio_base
            || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
                return -ENODATA;
 
        pdata->clk = clk_register_gate(NULL, dev_name(&adev->dev),
-                                      dev_desc->clk_parent, 0,
+                                      clk_data->name, 0,
                                       pdata->mmio_base + dev_desc->prv_offset,
                                       0, 0, NULL);
        if (IS_ERR(pdata->clk))
index fefc2ca7cc3e0199b5f98b707107d424dd40b8eb..33dc6a004802120fff9eb1e0e1e757dd542cc4f8 100644 (file)
@@ -250,10 +250,6 @@ static const char *cper_pcie_port_type_strs[] = {
 static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
                            const struct acpi_hest_generic_data *gdata)
 {
-#ifdef CONFIG_ACPI_APEI_PCIEAER
-       struct pci_dev *dev;
-#endif
-
        if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
                printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
                       pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
@@ -285,20 +281,6 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
                printk(
        "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
        pfx, pcie->bridge.secondary_status, pcie->bridge.control);
-#ifdef CONFIG_ACPI_APEI_PCIEAER
-       dev = pci_get_domain_bus_and_slot(pcie->device_id.segment,
-                       pcie->device_id.bus, pcie->device_id.function);
-       if (!dev) {
-               pr_err("PCI AER Cannot get PCI device %04x:%02x:%02x.%d\n",
-                       pcie->device_id.segment, pcie->device_id.bus,
-                       pcie->device_id.slot, pcie->device_id.function);
-               return;
-       }
-       if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO)
-               cper_print_aer(pfx, dev, gdata->error_severity,
-                               (struct aer_capability_regs *) pcie->aer_info);
-       pci_dev_put(dev);
-#endif
 }
 
 static const char *apei_estatus_section_flag_strs[] = {
index d668a8ae602bb4533b793911408cf138c047a4ce..403baf4dffc18dd45d62228c7cd1139efd25ae30 100644 (file)
@@ -454,7 +454,9 @@ static void ghes_do_proc(struct ghes *ghes,
                                aer_severity = cper_severity_to_aer(sev);
                                aer_recover_queue(pcie_err->device_id.segment,
                                                  pcie_err->device_id.bus,
-                                                 devfn, aer_severity);
+                                                 devfn, aer_severity,
+                                                 (struct aer_capability_regs *)
+                                                 pcie_err->aer_info);
                        }
 
                }
diff --git a/drivers/acpi/csrt.c b/drivers/acpi/csrt.c
deleted file mode 100644 (file)
index 5c15a91..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Support for Core System Resources Table (CSRT)
- *
- * Copyright (C) 2013, Intel Corporation
- * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
- *         Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) "ACPI: CSRT: " fmt
-
-#include <linux/acpi.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sizes.h>
-
-ACPI_MODULE_NAME("CSRT");
-
-static int __init acpi_csrt_parse_shared_info(struct platform_device *pdev,
-                                             const struct acpi_csrt_group *grp)
-{
-       const struct acpi_csrt_shared_info *si;
-       struct resource res[3];
-       size_t nres;
-       int ret;
-
-       memset(res, 0, sizeof(res));
-       nres = 0;
-
-       si = (const struct acpi_csrt_shared_info *)&grp[1];
-       /*
-        * The peripherals that are listed on CSRT typically support only
-        * 32-bit addresses so we only use the low part of MMIO base for
-        * now.
-        */
-       if (!si->mmio_base_high && si->mmio_base_low) {
-               /*
-                * There is no size of the memory resource in shared_info
-                * so we assume that it is 4k here.
-                */
-               res[nres].start = si->mmio_base_low;
-               res[nres].end = res[0].start + SZ_4K - 1;
-               res[nres++].flags = IORESOURCE_MEM;
-       }
-
-       if (si->gsi_interrupt) {
-               int irq = acpi_register_gsi(NULL, si->gsi_interrupt,
-                                           si->interrupt_mode,
-                                           si->interrupt_polarity);
-               res[nres].start = irq;
-               res[nres].end = irq;
-               res[nres++].flags = IORESOURCE_IRQ;
-       }
-
-       if (si->base_request_line || si->num_handshake_signals) {
-               /*
-                * We pass the driver a DMA resource describing the range
-                * of request lines the device supports.
-                */
-               res[nres].start = si->base_request_line;
-               res[nres].end = res[nres].start + si->num_handshake_signals - 1;
-               res[nres++].flags = IORESOURCE_DMA;
-       }
-
-       ret = platform_device_add_resources(pdev, res, nres);
-       if (ret) {
-               if (si->gsi_interrupt)
-                       acpi_unregister_gsi(si->gsi_interrupt);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int __init
-acpi_csrt_parse_resource_group(const struct acpi_csrt_group *grp)
-{
-       struct platform_device *pdev;
-       char vendor[5], name[16];
-       int ret, i;
-
-       vendor[0] = grp->vendor_id;
-       vendor[1] = grp->vendor_id >> 8;
-       vendor[2] = grp->vendor_id >> 16;
-       vendor[3] = grp->vendor_id >> 24;
-       vendor[4] = '\0';
-
-       if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
-               return -ENODEV;
-
-       snprintf(name, sizeof(name), "%s%04X", vendor, grp->device_id);
-       pdev = platform_device_alloc(name, PLATFORM_DEVID_AUTO);
-       if (!pdev)
-               return -ENOMEM;
-
-       /* Add resources based on the shared info */
-       ret = acpi_csrt_parse_shared_info(pdev, grp);
-       if (ret)
-               goto fail;
-
-       ret = platform_device_add(pdev);
-       if (ret)
-               goto fail;
-
-       for (i = 0; i < pdev->num_resources; i++)
-               dev_dbg(&pdev->dev, "%pR\n", &pdev->resource[i]);
-
-       return 0;
-
-fail:
-       platform_device_put(pdev);
-       return ret;
-}
-
-/*
- * CSRT or Core System Resources Table is a proprietary ACPI table
- * introduced by Microsoft. This table can contain devices that are not in
- * the system DSDT table. In particular DMA controllers might be described
- * here.
- *
- * We present these devices as normal platform devices that don't have ACPI
- * IDs or handle. The platform device name will be something like
- * <VENDOR><DEVID>.<n>.auto for example: INTL9C06.0.auto.
- */
-void __init acpi_csrt_init(void)
-{
-       struct acpi_csrt_group *grp, *end;
-       struct acpi_table_csrt *csrt;
-       acpi_status status;
-       int ret;
-
-       status = acpi_get_table(ACPI_SIG_CSRT, 0,
-                               (struct acpi_table_header **)&csrt);
-       if (ACPI_FAILURE(status)) {
-               if (status != AE_NOT_FOUND)
-                       pr_warn("failed to get the CSRT table\n");
-               return;
-       }
-
-       pr_debug("parsing CSRT table for devices\n");
-
-       grp = (struct acpi_csrt_group *)(csrt + 1);
-       end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
-
-       while (grp < end) {
-               ret = acpi_csrt_parse_resource_group(grp);
-               if (ret) {
-                       pr_warn("error in parsing resource group: %d\n", ret);
-                       return;
-               }
-
-               grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
-       }
-}
index 96de787e6104d5509b024d27d4ad9b32527a1d7b..bc493aa3af19d87e42a18da312815de3c6fbb125 100644 (file)
 #define _COMPONENT     ACPI_POWER_COMPONENT
 ACPI_MODULE_NAME("device_pm");
 
-static DEFINE_MUTEX(acpi_pm_notifier_lock);
-
-/**
- * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
- * @adev: ACPI device to add the notifier for.
- * @context: Context information to pass to the notifier routine.
- *
- * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
- * PM wakeup events.  For example, wakeup events may be generated for bridges
- * if one of the devices below the bridge is signaling wakeup, even if the
- * bridge itself doesn't have a wakeup GPE associated with it.
- */
-acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
-                                acpi_notify_handler handler, void *context)
-{
-       acpi_status status = AE_ALREADY_EXISTS;
-
-       mutex_lock(&acpi_pm_notifier_lock);
-
-       if (adev->wakeup.flags.notifier_present)
-               goto out;
-
-       status = acpi_install_notify_handler(adev->handle,
-                                            ACPI_SYSTEM_NOTIFY,
-                                            handler, context);
-       if (ACPI_FAILURE(status))
-               goto out;
-
-       adev->wakeup.flags.notifier_present = true;
-
- out:
-       mutex_unlock(&acpi_pm_notifier_lock);
-       return status;
-}
-
-/**
- * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
- * @adev: ACPI device to remove the notifier from.
- */
-acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
-                                   acpi_notify_handler handler)
-{
-       acpi_status status = AE_BAD_PARAMETER;
-
-       mutex_lock(&acpi_pm_notifier_lock);
-
-       if (!adev->wakeup.flags.notifier_present)
-               goto out;
-
-       status = acpi_remove_notify_handler(adev->handle,
-                                           ACPI_SYSTEM_NOTIFY,
-                                           handler);
-       if (ACPI_FAILURE(status))
-               goto out;
-
-       adev->wakeup.flags.notifier_present = false;
-
- out:
-       mutex_unlock(&acpi_pm_notifier_lock);
-       return status;
-}
-
 /**
  * acpi_power_state_string - String representation of ACPI device power state.
  * @state: ACPI device power state to return the string representation of.
@@ -385,6 +323,69 @@ bool acpi_bus_power_manageable(acpi_handle handle)
 }
 EXPORT_SYMBOL(acpi_bus_power_manageable);
 
+#ifdef CONFIG_PM
+static DEFINE_MUTEX(acpi_pm_notifier_lock);
+
+/**
+ * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
+ * @adev: ACPI device to add the notifier for.
+ * @context: Context information to pass to the notifier routine.
+ *
+ * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
+ * PM wakeup events.  For example, wakeup events may be generated for bridges
+ * if one of the devices below the bridge is signaling wakeup, even if the
+ * bridge itself doesn't have a wakeup GPE associated with it.
+ */
+acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
+                                acpi_notify_handler handler, void *context)
+{
+       acpi_status status = AE_ALREADY_EXISTS;
+
+       mutex_lock(&acpi_pm_notifier_lock);
+
+       if (adev->wakeup.flags.notifier_present)
+               goto out;
+
+       status = acpi_install_notify_handler(adev->handle,
+                                            ACPI_SYSTEM_NOTIFY,
+                                            handler, context);
+       if (ACPI_FAILURE(status))
+               goto out;
+
+       adev->wakeup.flags.notifier_present = true;
+
+ out:
+       mutex_unlock(&acpi_pm_notifier_lock);
+       return status;
+}
+
+/**
+ * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
+ * @adev: ACPI device to remove the notifier from.
+ */
+acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
+                                   acpi_notify_handler handler)
+{
+       acpi_status status = AE_BAD_PARAMETER;
+
+       mutex_lock(&acpi_pm_notifier_lock);
+
+       if (!adev->wakeup.flags.notifier_present)
+               goto out;
+
+       status = acpi_remove_notify_handler(adev->handle,
+                                           ACPI_SYSTEM_NOTIFY,
+                                           handler);
+       if (ACPI_FAILURE(status))
+               goto out;
+
+       adev->wakeup.flags.notifier_present = false;
+
+ out:
+       mutex_unlock(&acpi_pm_notifier_lock);
+       return status;
+}
+
 bool acpi_bus_can_wakeup(acpi_handle handle)
 {
        struct acpi_device *device;
@@ -1023,3 +1024,4 @@ void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
        mutex_unlock(&adev->physical_node_lock);
 }
 EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
+#endif /* CONFIG_PM */
index d45b2871d33b12fa2453f4f91da1a1d55bf19edc..edc00818c80321086ff3938f5df3cf2afae04d61 100644 (file)
@@ -223,7 +223,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
 static int ec_poll(struct acpi_ec *ec)
 {
        unsigned long flags;
-       int repeat = 2; /* number of command restarts */
+       int repeat = 5; /* number of command restarts */
        while (repeat--) {
                unsigned long delay = jiffies +
                        msecs_to_jiffies(ec_delay);
@@ -241,8 +241,6 @@ static int ec_poll(struct acpi_ec *ec)
                        }
                        advance_transaction(ec, acpi_ec_read_status(ec));
                } while (time_before(jiffies, delay));
-               if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
-                       break;
                pr_debug(PREFIX "controller reset, restart transaction\n");
                spin_lock_irqsave(&ec->lock, flags);
                start_transaction(ec);
index 6f1afd9118c806f728ce161033e932f3dcfc485f..297cbf456f86bee35300ef6c72058615663f563f 100644 (file)
@@ -35,7 +35,6 @@ void acpi_pci_link_init(void);
 void acpi_pci_root_hp_init(void);
 void acpi_platform_init(void);
 int acpi_sysfs_init(void);
-void acpi_csrt_init(void);
 #ifdef CONFIG_ACPI_CONTAINER
 void acpi_container_init(void);
 #else
index 1dd6f6c8587442ab1fcbdf7d7bd78d98ef551a9b..e427dc516c76d1d8d73b5a91c7d4f1f73b83cfbf 100644 (file)
@@ -641,7 +641,9 @@ static void _handle_hotplug_event_root(struct work_struct *work)
                /* bus enumerate */
                printk(KERN_DEBUG "%s: Bus check notify on %s\n", __func__,
                                 (char *)buffer.pointer);
-               if (!root)
+               if (root)
+                       acpiphp_check_host_bridge(handle);
+               else
                        handle_root_bridge_insertion(handle);
 
                break;
index bec717ffd25f5ff7c016f57365a380dc8e91b7fb..c266cdc117840bcdfc7909c2605840501b2c87c2 100644 (file)
@@ -95,9 +95,6 @@ static const struct acpi_device_id processor_device_ids[] = {
 };
 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
 
-static SIMPLE_DEV_PM_OPS(acpi_processor_pm,
-                        acpi_processor_suspend, acpi_processor_resume);
-
 static struct acpi_driver acpi_processor_driver = {
        .name = "processor",
        .class = ACPI_PROCESSOR_CLASS,
@@ -107,7 +104,6 @@ static struct acpi_driver acpi_processor_driver = {
                .remove = acpi_processor_remove,
                .notify = acpi_processor_notify,
                },
-       .drv.pm = &acpi_processor_pm,
 };
 
 #define INSTALL_NOTIFY_HANDLER         1
@@ -934,6 +930,8 @@ static int __init acpi_processor_init(void)
        if (result < 0)
                return result;
 
+       acpi_processor_syscore_init();
+
        acpi_processor_install_hotplug_notify();
 
        acpi_thermal_cpufreq_init();
@@ -956,6 +954,8 @@ static void __exit acpi_processor_exit(void)
 
        acpi_processor_uninstall_hotplug_notify();
 
+       acpi_processor_syscore_exit();
+
        acpi_bus_unregister_driver(&acpi_processor_driver);
 
        return;
index f0df2c9434d2ee14809db94e9150b48de0878a7d..eb133c77aadb5f38a7d60824958fa02457c4c67b 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/sched.h>       /* need_resched() */
 #include <linux/clockchips.h>
 #include <linux/cpuidle.h>
+#include <linux/syscore_ops.h>
 
 /*
  * Include the apic definitions for x86 to have the APIC timer related defines
@@ -210,33 +211,41 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
 
 #endif
 
+#ifdef CONFIG_PM_SLEEP
 static u32 saved_bm_rld;
 
-static void acpi_idle_bm_rld_save(void)
+int acpi_processor_suspend(void)
 {
        acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
+       return 0;
 }
-static void acpi_idle_bm_rld_restore(void)
+
+void acpi_processor_resume(void)
 {
        u32 resumed_bm_rld;
 
        acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
+       if (resumed_bm_rld == saved_bm_rld)
+               return;
 
-       if (resumed_bm_rld != saved_bm_rld)
-               acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
+       acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
 }
 
-int acpi_processor_suspend(struct device *dev)
+static struct syscore_ops acpi_processor_syscore_ops = {
+       .suspend = acpi_processor_suspend,
+       .resume = acpi_processor_resume,
+};
+
+void acpi_processor_syscore_init(void)
 {
-       acpi_idle_bm_rld_save();
-       return 0;
+       register_syscore_ops(&acpi_processor_syscore_ops);
 }
 
-int acpi_processor_resume(struct device *dev)
+void acpi_processor_syscore_exit(void)
 {
-       acpi_idle_bm_rld_restore();
-       return 0;
+       unregister_syscore_ops(&acpi_processor_syscore_ops);
 }
+#endif /* CONFIG_PM_SLEEP */
 
 #if defined(CONFIG_X86)
 static void tsc_check_state(int state)
index fe158fd4f1df401d74c04c612cca591f4a7d8eaf..44225cb15f3a248fb842c6e3b3d3f188f47aa99d 100644 (file)
@@ -1785,7 +1785,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
        acpi_set_pnp_ids(handle, &pnp, type);
 
        if (!pnp.type.hardware_id)
-               return;
+               goto out;
 
        /*
         * This relies on the fact that acpi_install_notify_handler() will not
@@ -1800,6 +1800,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
                }
        }
 
+out:
        acpi_free_pnp_ids(&pnp);
 }
 
@@ -2042,7 +2043,6 @@ int __init acpi_scan_init(void)
        acpi_pci_link_init();
        acpi_platform_init();
        acpi_lpss_init();
-       acpi_csrt_init();
        acpi_container_init();
        acpi_memory_hotplug_init();
 
index c3932d0876e0059c4a7c7aee7342b7123388e27c..5b32e15a65ceee63861992d51f7731a140946e3c 100644 (file)
@@ -456,6 +456,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
                },
        },
+       {
+        .callback = video_ignore_initial_backlight,
+        .ident = "HP 1000 Notebook PC",
+        .matches = {
+               DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
+               },
+       },
        {}
 };
 
index 66f67626f02ec42c5324085fc0a026f2ffca783d..e6bd910bc6edee4b55e3b6de7888bdd5c5ec06f6 100644 (file)
@@ -161,6 +161,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
                },
        },
+       {
+       .callback = video_detect_force_vendor,
+       .ident = "Asus UL30A",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
+               },
+       },
        { },
 };
 
index 4e94ba29cb8d321eae212fa27ee1e72658801c34..9d0cf019ce59a5034984ced1eb113f77b94206ff 100644 (file)
@@ -2,7 +2,7 @@
 /*
  *  acard-ahci.c - ACard AHCI SATA support
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index 251e57d38942cdec01790d5fa02cb2c7aed87535..2b50dfdf1cfc3ea3a6e468e3776e64958fd38e5e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  ahci.c - AHCI SATA support
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
@@ -423,6 +423,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
          .driver_data = board_ahci_yes_fbs },                  /* 88se9125 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
+         .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 on some Gigabyte */
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
index b830e6c9fe49f385b77405c188b100a7afcc0570..10b14d45cfd2f8d2d9bb712a75e7787cdf67bdc1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  ahci.h - Common AHCI SATA definitions and declarations
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index 2f48123d74c4c85d84180e3e7a39c712a1b7a175..9a8a674e8fac9df8ac2c6d08515ce88e6b213fc8 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *    ata_piix.c - Intel PATA/SATA controllers
  *
- *    Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *    Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
@@ -151,6 +151,7 @@ enum piix_controller_ids {
        piix_pata_vmw,                  /* PIIX4 for VMware, spurious DMA_ERR */
        ich8_sata_snb,
        ich8_2port_sata_snb,
+       ich8_2port_sata_byt,
 };
 
 struct piix_map_db {
@@ -334,6 +335,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
        /* SATA Controller IDE (Wellsburg) */
        { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+       /* SATA Controller IDE (BayTrail) */
+       { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+       { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
 
        { }     /* terminate list */
 };
@@ -441,6 +445,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
        [tolapai_sata]          = &tolapai_map_db,
        [ich8_sata_snb]         = &ich8_map_db,
        [ich8_2port_sata_snb]   = &ich8_2port_map_db,
+       [ich8_2port_sata_byt]   = &ich8_2port_map_db,
 };
 
 static struct pci_bits piix_enable_bits[] = {
@@ -1254,6 +1259,16 @@ static struct ata_port_info piix_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &piix_sata_ops,
        },
+
+       [ich8_2port_sata_byt] =
+       {
+               .flags          = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
+               .pio_mask       = ATA_PIO4,
+               .mwdma_mask     = ATA_MWDMA2,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &piix_sata_ops,
+       },
+
 };
 
 #define AHCI_PCI_BAR 5
index 34c82167b9625f41666125685148b68774036fd9..a70ff154f586753d6c096b858ae5dbee0ad869c3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  libahci.c - Common AHCI SATA low-level routines
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index 63c743baf920217181bdb6fcd7cde0f3dc61d0c6..f2184276539d885d167c2048a305847b51e58417 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  libata-core.c - helper library for ATA
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
@@ -1602,6 +1602,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
        qc->tf = *tf;
        if (cdb)
                memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
+
+       /* some SATA bridges need us to indicate data xfer direction */
+       if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
+           dma_dir == DMA_FROM_DEVICE)
+               qc->tf.feature |= ATAPI_DMADIR;
+
        qc->flags |= ATA_QCFLAG_RESULT_TF;
        qc->dma_dir = dma_dir;
        if (dma_dir != DMA_NONE) {
index f9476fb3ac43b44596c1134fe8fea55bffdd5f71..c69fcce505c03d06c7b20ae333ff9708a228f869 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  libata-eh.c - libata error handling
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index dd310b27b24c3dddcf7c4c7e6d4b3b042ee4dc53..0101af541436f8076b056c237fd2dc9cc8e4dce6 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  libata-scsi.c - helper library for ATA
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index d8af325a6bda8cc8f76dff8a69b9d746d5c9c3a6..b603720b877dd0344478ddecf234bb14f77975fa 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  libata-sff.c - helper library for PCI IDE BMDMA
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index c1bfaf43d109119e5f47d709365f91c5c170c656..980b88e109fcf5109e164d155dfca86de735c2c2 100644 (file)
@@ -933,11 +933,6 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
        }
 
        mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!mem_res) {
-               err = -ENXIO;
-               goto err_rel_gpio;
-       }
-
        ide_base = devm_ioremap_resource(&pdev->dev, mem_res);
        if (IS_ERR(ide_base)) {
                err = PTR_ERR(ide_base);
index 505333340ad517ecc54e924463ab13e364534977..8ea6e6afd041db7736e6a69c88641253fa4b4078 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  pdc_adma.c - Pacific Digital Corporation ADMA
  *
- *  Maintained by:  Mark Lord <mlord@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *
  *  Copyright 2005 Mark Lord
  *
index fb0dd87f889378a952a339e048faa3629177779d..958ba2a420c34b0e5411b116d639214e700f5c47 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  sata_promise.c - Promise SATA
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Mikael Pettersson <mikpe@it.uu.se>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
index 4799868bd7339c40ad863a317e92e2d0ddfffe9a..249c8a289bfd5dbb97f25fbcbd99ff241ac40c21 100644 (file)
@@ -549,6 +549,7 @@ static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc)
 
        /* start host DMA transaction */
        dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+       dmactl &= ~ATAPI_CONTROL1_STOP;
        dmactl |= ATAPI_CONTROL1_START;
        iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
 }
@@ -618,17 +619,16 @@ static struct ata_port_operations sata_rcar_port_ops = {
        .bmdma_status           = sata_rcar_bmdma_status,
 };
 
-static int sata_rcar_serr_interrupt(struct ata_port *ap)
+static void sata_rcar_serr_interrupt(struct ata_port *ap)
 {
        struct sata_rcar_priv *priv = ap->host->private_data;
        struct ata_eh_info *ehi = &ap->link.eh_info;
        int freeze = 0;
-       int handled = 0;
        u32 serror;
 
        serror = ioread32(priv->base + SCRSERR_REG);
        if (!serror)
-               return 0;
+               return;
 
        DPRINTK("SError @host_intr: 0x%x\n", serror);
 
@@ -641,7 +641,6 @@ static int sata_rcar_serr_interrupt(struct ata_port *ap)
                ata_ehi_push_desc(ehi, "%s", "hotplug");
 
                freeze = serror & SERR_COMM_WAKE ? 0 : 1;
-               handled = 1;
        }
 
        /* freeze or abort */
@@ -649,11 +648,9 @@ static int sata_rcar_serr_interrupt(struct ata_port *ap)
                ata_port_freeze(ap);
        else
                ata_port_abort(ap);
-
-       return handled;
 }
 
-static int sata_rcar_ata_interrupt(struct ata_port *ap)
+static void sata_rcar_ata_interrupt(struct ata_port *ap)
 {
        struct ata_queued_cmd *qc;
        int handled = 0;
@@ -662,7 +659,9 @@ static int sata_rcar_ata_interrupt(struct ata_port *ap)
        if (qc)
                handled |= ata_bmdma_port_intr(ap, qc);
 
-       return handled;
+       /* be sure to clear ATA interrupt */
+       if (!handled)
+               sata_rcar_check_status(ap);
 }
 
 static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
@@ -677,20 +676,21 @@ static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
        spin_lock_irqsave(&host->lock, flags);
 
        sataintstat = ioread32(priv->base + SATAINTSTAT_REG);
+       sataintstat &= SATA_RCAR_INT_MASK;
        if (!sataintstat)
                goto done;
        /* ack */
-       iowrite32(sataintstat & ~SATA_RCAR_INT_MASK,
-                priv->base + SATAINTSTAT_REG);
+       iowrite32(~sataintstat & 0x7ff, priv->base + SATAINTSTAT_REG);
 
        ap = host->ports[0];
 
        if (sataintstat & SATAINTSTAT_ATA)
-               handled |= sata_rcar_ata_interrupt(ap);
+               sata_rcar_ata_interrupt(ap);
 
        if (sataintstat & SATAINTSTAT_SERR)
-               handled |= sata_rcar_serr_interrupt(ap);
+               sata_rcar_serr_interrupt(ap);
 
+       handled = 1;
 done:
        spin_unlock_irqrestore(&host->lock, flags);
 
index a7b31672c4b7fd88fb1e999308d531e17b1e8b49..0ae3ca4bf5c0a063077885f18e7375826a696c24 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  sata_sil.c - Silicon Image SATA
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index 7b7127a58f51de8b848086425a50ce1b78560474..9947010afc0f7973e2af77664f6ef39e53840358 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  sata_sx4.c - Promise SATA
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index 5913ea9d57b211959ed30b0850c1b7f8e1ab7f64..87f056e54a9d7566504925ca812347baf6b3e62f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  sata_via.c - VIA Serial ATA controllers
  *
- *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *  Maintained by:  Tejun Heo <tj@kernel.org>
  *                Please ALWAYS copy linux-ide@vger.kernel.org
  *                on emails.
  *
index 1a68f947ded86c45e546c837005704a1ed45f293..d414331b480e72afc9ac8ac505dee449f1c795d9 100644 (file)
@@ -1295,6 +1295,7 @@ int subsys_virtual_register(struct bus_type *subsys,
 
        return subsys_register(subsys, groups, virtual_dir);
 }
+EXPORT_SYMBOL_GPL(subsys_virtual_register);
 
 int __init buses_init(void)
 {
index 016312437577ca3378706560a77068dcda67de81..2499cefdcdf2429d9503506d5514a4943f9ec4e1 100644 (file)
@@ -572,9 +572,11 @@ int device_create_file(struct device *dev,
 
        if (dev) {
                WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
-                               "Write permission without 'store'\n");
+                       "Attribute %s: write permission without 'store'\n",
+                       attr->attr.name);
                WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
-                               "Read permission without 'show'\n");
+                       "Attribute %s: read permission without 'show'\n",
+                       attr->attr.name);
                error = sysfs_create_file(&dev->kobj, &attr->attr);
        }
 
index 39c32529b83374c36eda18188c393930732ad99f..5da914041305907d30f26289f12643b5c581be07 100644 (file)
@@ -61,24 +61,24 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
 int dev_pm_put_subsys_data(struct device *dev)
 {
        struct pm_subsys_data *psd;
-       int ret = 0;
+       int ret = 1;
 
        spin_lock_irq(&dev->power.lock);
 
        psd = dev_to_psd(dev);
-       if (!psd) {
-               ret = -EINVAL;
+       if (!psd)
                goto out;
-       }
 
        if (--psd->refcount == 0) {
                dev->power.subsys_data = NULL;
-               kfree(psd);
-               ret = 1;
+       } else {
+               psd = NULL;
+               ret = 0;
        }
 
  out:
        spin_unlock_irq(&dev->power.lock);
+       kfree(psd);
 
        return ret;
 }
index f1a29f8e9d33dbe45474172a24f952f32ce4ba47..9bf4371755f22fa93d4ccbe7d2bb605ec56a4048 100644 (file)
@@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
 
        spin_lock(&brd->brd_lock);
        idx = sector >> PAGE_SECTORS_SHIFT;
+       page->index = idx;
        if (radix_tree_insert(&brd->brd_pages, idx, page)) {
                __free_page(page);
                page = radix_tree_lookup(&brd->brd_pages, idx);
                BUG_ON(!page);
                BUG_ON(page->index != idx);
-       } else
-               page->index = idx;
+       }
        spin_unlock(&brd->brd_lock);
 
        radix_tree_preload_end();
index ca63104136e0db46d0248aa290c355483989ec2e..d6d314027b5d45a95ee762ef3c55dcb0769e15a8 100644 (file)
 #define        SECTOR_SHIFT    9
 #define        SECTOR_SIZE     (1ULL << SECTOR_SHIFT)
 
+/*
+ * Increment the given counter and return its updated value.
+ * If the counter is already 0 it will not be incremented.
+ * If the counter is already at its maximum value returns
+ * -EINVAL without updating it.
+ */
+static int atomic_inc_return_safe(atomic_t *v)
+{
+       unsigned int counter;
+
+       counter = (unsigned int)__atomic_add_unless(v, 1, 0);
+       if (counter <= (unsigned int)INT_MAX)
+               return (int)counter;
+
+       atomic_dec(v);
+
+       return -EINVAL;
+}
+
+/* Decrement the counter.  Return the resulting value, or -EINVAL */
+static int atomic_dec_return_safe(atomic_t *v)
+{
+       int counter;
+
+       counter = atomic_dec_return(v);
+       if (counter >= 0)
+               return counter;
+
+       atomic_inc(v);
+
+       return -EINVAL;
+}
+
 #define RBD_DRV_NAME "rbd"
 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
 
  * block device image metadata (in-memory version)
  */
 struct rbd_image_header {
-       /* These four fields never change for a given rbd image */
+       /* These six fields never change for a given rbd image */
        char *object_prefix;
-       u64 features;
        __u8 obj_order;
        __u8 crypt_type;
        __u8 comp_type;
+       u64 stripe_unit;
+       u64 stripe_count;
+       u64 features;           /* Might be changeable someday? */
 
        /* The remaining fields need to be updated occasionally */
        u64 image_size;
        struct ceph_snap_context *snapc;
-       char *snap_names;
-       u64 *snap_sizes;
-
-       u64 stripe_unit;
-       u64 stripe_count;
+       char *snap_names;       /* format 1 only */
+       u64 *snap_sizes;        /* format 1 only */
 };
 
 /*
@@ -225,6 +257,7 @@ struct rbd_obj_request {
                };
        };
        struct page             **copyup_pages;
+       u32                     copyup_page_count;
 
        struct ceph_osd_request *osd_req;
 
@@ -257,6 +290,7 @@ struct rbd_img_request {
                struct rbd_obj_request  *obj_request;   /* obj req initiator */
        };
        struct page             **copyup_pages;
+       u32                     copyup_page_count;
        spinlock_t              completion_lock;/* protects next_completion */
        u32                     next_completion;
        rbd_img_callback_t      callback;
@@ -311,6 +345,7 @@ struct rbd_device {
 
        struct rbd_spec         *parent_spec;
        u64                     parent_overlap;
+       atomic_t                parent_ref;
        struct rbd_device       *parent;
 
        /* protects updating the header */
@@ -359,7 +394,8 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf,
                       size_t count);
 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
                          size_t count);
-static int rbd_dev_image_probe(struct rbd_device *rbd_dev);
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
+static void rbd_spec_put(struct rbd_spec *spec);
 
 static struct bus_attribute rbd_bus_attrs[] = {
        __ATTR(add, S_IWUSR, NULL, rbd_add),
@@ -426,7 +462,8 @@ static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
 
 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
-static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
                                        u64 snap_id);
 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
@@ -726,88 +763,123 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
 }
 
 /*
- * Create a new header structure, translate header format from the on-disk
- * header.
+ * Fill an rbd image header with information from the given format 1
+ * on-disk header.
  */
-static int rbd_header_from_disk(struct rbd_image_header *header,
+static int rbd_header_from_disk(struct rbd_device *rbd_dev,
                                 struct rbd_image_header_ondisk *ondisk)
 {
+       struct rbd_image_header *header = &rbd_dev->header;
+       bool first_time = header->object_prefix == NULL;
+       struct ceph_snap_context *snapc;
+       char *object_prefix = NULL;
+       char *snap_names = NULL;
+       u64 *snap_sizes = NULL;
        u32 snap_count;
-       size_t len;
        size_t size;
+       int ret = -ENOMEM;
        u32 i;
 
-       memset(header, 0, sizeof (*header));
+       /* Allocate this now to avoid having to handle failure below */
 
-       snap_count = le32_to_cpu(ondisk->snap_count);
+       if (first_time) {
+               size_t len;
 
-       len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
-       header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
-       if (!header->object_prefix)
-               return -ENOMEM;
-       memcpy(header->object_prefix, ondisk->object_prefix, len);
-       header->object_prefix[len] = '\0';
+               len = strnlen(ondisk->object_prefix,
+                               sizeof (ondisk->object_prefix));
+               object_prefix = kmalloc(len + 1, GFP_KERNEL);
+               if (!object_prefix)
+                       return -ENOMEM;
+               memcpy(object_prefix, ondisk->object_prefix, len);
+               object_prefix[len] = '\0';
+       }
 
+       /* Allocate the snapshot context and fill it in */
+
+       snap_count = le32_to_cpu(ondisk->snap_count);
+       snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
+       if (!snapc)
+               goto out_err;
+       snapc->seq = le64_to_cpu(ondisk->snap_seq);
        if (snap_count) {
+               struct rbd_image_snap_ondisk *snaps;
                u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
 
-               /* Save a copy of the snapshot names */
+               /* We'll keep a copy of the snapshot names... */
 
-               if (snap_names_len > (u64) SIZE_MAX)
-                       return -EIO;
-               header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
-               if (!header->snap_names)
+               if (snap_names_len > (u64)SIZE_MAX)
+                       goto out_2big;
+               snap_names = kmalloc(snap_names_len, GFP_KERNEL);
+               if (!snap_names)
                        goto out_err;
+
+               /* ...as well as the array of their sizes. */
+
+               size = snap_count * sizeof (*header->snap_sizes);
+               snap_sizes = kmalloc(size, GFP_KERNEL);
+               if (!snap_sizes)
+                       goto out_err;
+
                /*
-                * Note that rbd_dev_v1_header_read() guarantees
-                * the ondisk buffer we're working with has
+                * Copy the names, and fill in each snapshot's id
+                * and size.
+                *
+                * Note that rbd_dev_v1_header_info() guarantees the
+                * ondisk buffer we're working with has
                 * snap_names_len bytes beyond the end of the
                 * snapshot id array, this memcpy() is safe.
                 */
-               memcpy(header->snap_names, &ondisk->snaps[snap_count],
-                       snap_names_len);
+               memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
+               snaps = ondisk->snaps;
+               for (i = 0; i < snap_count; i++) {
+                       snapc->snaps[i] = le64_to_cpu(snaps[i].id);
+                       snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
+               }
+       }
 
-               /* Record each snapshot's size */
+       /* We won't fail any more, fill in the header */
 
-               size = snap_count * sizeof (*header->snap_sizes);
-               header->snap_sizes = kmalloc(size, GFP_KERNEL);
-               if (!header->snap_sizes)
-                       goto out_err;
-               for (i = 0; i < snap_count; i++)
-                       header->snap_sizes[i] =
-                               le64_to_cpu(ondisk->snaps[i].image_size);
+       down_write(&rbd_dev->header_rwsem);
+       if (first_time) {
+               header->object_prefix = object_prefix;
+               header->obj_order = ondisk->options.order;
+               header->crypt_type = ondisk->options.crypt_type;
+               header->comp_type = ondisk->options.comp_type;
+               /* The rest aren't used for format 1 images */
+               header->stripe_unit = 0;
+               header->stripe_count = 0;
+               header->features = 0;
        } else {
-               header->snap_names = NULL;
-               header->snap_sizes = NULL;
+               ceph_put_snap_context(header->snapc);
+               kfree(header->snap_names);
+               kfree(header->snap_sizes);
        }
 
-       header->features = 0;   /* No features support in v1 images */
-       header->obj_order = ondisk->options.order;
-       header->crypt_type = ondisk->options.crypt_type;
-       header->comp_type = ondisk->options.comp_type;
-
-       /* Allocate and fill in the snapshot context */
+       /* The remaining fields always get updated (when we refresh) */
 
        header->image_size = le64_to_cpu(ondisk->image_size);
+       header->snapc = snapc;
+       header->snap_names = snap_names;
+       header->snap_sizes = snap_sizes;
 
-       header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
-       if (!header->snapc)
-               goto out_err;
-       header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
-       for (i = 0; i < snap_count; i++)
-               header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
+       /* Make sure mapping size is consistent with header info */
 
-       return 0;
+       if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
+               if (rbd_dev->mapping.size != header->image_size)
+                       rbd_dev->mapping.size = header->image_size;
+
+       up_write(&rbd_dev->header_rwsem);
 
+       return 0;
+out_2big:
+       ret = -EIO;
 out_err:
-       kfree(header->snap_sizes);
-       header->snap_sizes = NULL;
-       kfree(header->snap_names);
-       header->snap_names = NULL;
-       kfree(header->object_prefix);
-       header->object_prefix = NULL;
+       kfree(snap_sizes);
+       kfree(snap_names);
+       ceph_put_snap_context(snapc);
+       kfree(object_prefix);
 
-       return -ENOMEM;
+       return ret;
 }
 
 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
@@ -934,20 +1006,11 @@ static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
 
 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
 {
-       const char *snap_name = rbd_dev->spec->snap_name;
-       u64 snap_id;
+       u64 snap_id = rbd_dev->spec->snap_id;
        u64 size = 0;
        u64 features = 0;
        int ret;
 
-       if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) {
-               snap_id = rbd_snap_id_by_name(rbd_dev, snap_name);
-               if (snap_id == CEPH_NOSNAP)
-                       return -ENOENT;
-       } else {
-               snap_id = CEPH_NOSNAP;
-       }
-
        ret = rbd_snap_size(rbd_dev, snap_id, &size);
        if (ret)
                return ret;
@@ -958,11 +1021,6 @@ static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
        rbd_dev->mapping.size = size;
        rbd_dev->mapping.features = features;
 
-       /* If we are mapping a snapshot it must be marked read-only */
-
-       if (snap_id != CEPH_NOSNAP)
-               rbd_dev->mapping.read_only = true;
-
        return 0;
 }
 
@@ -970,14 +1028,6 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
 {
        rbd_dev->mapping.size = 0;
        rbd_dev->mapping.features = 0;
-       rbd_dev->mapping.read_only = true;
-}
-
-static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
-{
-       rbd_dev->mapping.size = 0;
-       rbd_dev->mapping.features = 0;
-       rbd_dev->mapping.read_only = true;
 }
 
 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
@@ -1342,20 +1392,18 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
        kref_put(&obj_request->kref, rbd_obj_request_destroy);
 }
 
-static void rbd_img_request_get(struct rbd_img_request *img_request)
-{
-       dout("%s: img %p (was %d)\n", __func__, img_request,
-               atomic_read(&img_request->kref.refcount));
-       kref_get(&img_request->kref);
-}
-
+static bool img_request_child_test(struct rbd_img_request *img_request);
+static void rbd_parent_request_destroy(struct kref *kref);
 static void rbd_img_request_destroy(struct kref *kref);
 static void rbd_img_request_put(struct rbd_img_request *img_request)
 {
        rbd_assert(img_request != NULL);
        dout("%s: img %p (was %d)\n", __func__, img_request,
                atomic_read(&img_request->kref.refcount));
-       kref_put(&img_request->kref, rbd_img_request_destroy);
+       if (img_request_child_test(img_request))
+               kref_put(&img_request->kref, rbd_parent_request_destroy);
+       else
+               kref_put(&img_request->kref, rbd_img_request_destroy);
 }
 
 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
@@ -1472,6 +1520,12 @@ static void img_request_child_set(struct rbd_img_request *img_request)
        smp_mb();
 }
 
+static void img_request_child_clear(struct rbd_img_request *img_request)
+{
+       clear_bit(IMG_REQ_CHILD, &img_request->flags);
+       smp_mb();
+}
+
 static bool img_request_child_test(struct rbd_img_request *img_request)
 {
        smp_mb();
@@ -1484,6 +1538,12 @@ static void img_request_layered_set(struct rbd_img_request *img_request)
        smp_mb();
 }
 
+static void img_request_layered_clear(struct rbd_img_request *img_request)
+{
+       clear_bit(IMG_REQ_LAYERED, &img_request->flags);
+       smp_mb();
+}
+
 static bool img_request_layered_test(struct rbd_img_request *img_request)
 {
        smp_mb();
@@ -1827,6 +1887,74 @@ static void rbd_obj_request_destroy(struct kref *kref)
        kmem_cache_free(rbd_obj_request_cache, obj_request);
 }
 
+/* It's OK to call this for a device with no parent */
+
+static void rbd_spec_put(struct rbd_spec *spec);
+static void rbd_dev_unparent(struct rbd_device *rbd_dev)
+{
+       rbd_dev_remove_parent(rbd_dev);
+       rbd_spec_put(rbd_dev->parent_spec);
+       rbd_dev->parent_spec = NULL;
+       rbd_dev->parent_overlap = 0;
+}
+
+/*
+ * Parent image reference counting is used to determine when an
+ * image's parent fields can be safely torn down--after there are no
+ * more in-flight requests to the parent image.  When the last
+ * reference is dropped, cleaning them up is safe.
+ */
+static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
+{
+       int counter;
+
+       if (!rbd_dev->parent_spec)
+               return;
+
+       counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
+       if (counter > 0)
+               return;
+
+       /* Last reference; clean up parent data structures */
+
+       if (!counter)
+               rbd_dev_unparent(rbd_dev);
+       else
+               rbd_warn(rbd_dev, "parent reference underflow\n");
+}
+
+/*
+ * If an image has a non-zero parent overlap, get a reference to its
+ * parent.
+ *
+ * We must get the reference before checking for the overlap to
+ * coordinate properly with zeroing the parent overlap in
+ * rbd_dev_v2_parent_info() when an image gets flattened.  We
+ * drop it again if there is no overlap.
+ *
+ * Returns true if the rbd device has a parent with a non-zero
+ * overlap and a reference for it was successfully taken, or
+ * false otherwise.
+ */
+static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
+{
+       int counter;
+
+       if (!rbd_dev->parent_spec)
+               return false;
+
+       counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+       if (counter > 0 && rbd_dev->parent_overlap)
+               return true;
+
+       /* Image was flattened, but parent is not yet torn down */
+
+       if (counter < 0)
+               rbd_warn(rbd_dev, "parent reference overflow\n");
+
+       return false;
+}
+
 /*
  * Caller is responsible for filling in the list of object requests
  * that comprises the image request, and the Linux request pointer
@@ -1835,8 +1963,7 @@ static void rbd_obj_request_destroy(struct kref *kref)
 static struct rbd_img_request *rbd_img_request_create(
                                        struct rbd_device *rbd_dev,
                                        u64 offset, u64 length,
-                                       bool write_request,
-                                       bool child_request)
+                                       bool write_request)
 {
        struct rbd_img_request *img_request;
 
@@ -1861,9 +1988,7 @@ static struct rbd_img_request *rbd_img_request_create(
        } else {
                img_request->snap_id = rbd_dev->spec->snap_id;
        }
-       if (child_request)
-               img_request_child_set(img_request);
-       if (rbd_dev->parent_spec)
+       if (rbd_dev_parent_get(rbd_dev))
                img_request_layered_set(img_request);
        spin_lock_init(&img_request->completion_lock);
        img_request->next_completion = 0;
@@ -1873,9 +1998,6 @@ static struct rbd_img_request *rbd_img_request_create(
        INIT_LIST_HEAD(&img_request->obj_requests);
        kref_init(&img_request->kref);
 
-       rbd_img_request_get(img_request);       /* Avoid a warning */
-       rbd_img_request_put(img_request);       /* TEMPORARY */
-
        dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
                write_request ? "write" : "read", offset, length,
                img_request);
@@ -1897,15 +2019,54 @@ static void rbd_img_request_destroy(struct kref *kref)
                rbd_img_obj_request_del(img_request, obj_request);
        rbd_assert(img_request->obj_request_count == 0);
 
+       if (img_request_layered_test(img_request)) {
+               img_request_layered_clear(img_request);
+               rbd_dev_parent_put(img_request->rbd_dev);
+       }
+
        if (img_request_write_test(img_request))
                ceph_put_snap_context(img_request->snapc);
 
-       if (img_request_child_test(img_request))
-               rbd_obj_request_put(img_request->obj_request);
-
        kmem_cache_free(rbd_img_request_cache, img_request);
 }
 
+static struct rbd_img_request *rbd_parent_request_create(
+                                       struct rbd_obj_request *obj_request,
+                                       u64 img_offset, u64 length)
+{
+       struct rbd_img_request *parent_request;
+       struct rbd_device *rbd_dev;
+
+       rbd_assert(obj_request->img_request);
+       rbd_dev = obj_request->img_request->rbd_dev;
+
+       parent_request = rbd_img_request_create(rbd_dev->parent,
+                                               img_offset, length, false);
+       if (!parent_request)
+               return NULL;
+
+       img_request_child_set(parent_request);
+       rbd_obj_request_get(obj_request);
+       parent_request->obj_request = obj_request;
+
+       return parent_request;
+}
+
+static void rbd_parent_request_destroy(struct kref *kref)
+{
+       struct rbd_img_request *parent_request;
+       struct rbd_obj_request *orig_request;
+
+       parent_request = container_of(kref, struct rbd_img_request, kref);
+       orig_request = parent_request->obj_request;
+
+       parent_request->obj_request = NULL;
+       rbd_obj_request_put(orig_request);
+       img_request_child_clear(parent_request);
+
+       rbd_img_request_destroy(kref);
+}
+
 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
 {
        struct rbd_img_request *img_request;
@@ -2114,7 +2275,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
 {
        struct rbd_img_request *img_request;
        struct rbd_device *rbd_dev;
-       u64 length;
+       struct page **pages;
        u32 page_count;
 
        rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
@@ -2124,12 +2285,14 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
 
        rbd_dev = img_request->rbd_dev;
        rbd_assert(rbd_dev);
-       length = (u64)1 << rbd_dev->header.obj_order;
-       page_count = (u32)calc_pages_for(0, length);
 
-       rbd_assert(obj_request->copyup_pages);
-       ceph_release_page_vector(obj_request->copyup_pages, page_count);
+       pages = obj_request->copyup_pages;
+       rbd_assert(pages != NULL);
        obj_request->copyup_pages = NULL;
+       page_count = obj_request->copyup_page_count;
+       rbd_assert(page_count);
+       obj_request->copyup_page_count = 0;
+       ceph_release_page_vector(pages, page_count);
 
        /*
         * We want the transfer count to reflect the size of the
@@ -2153,9 +2316,11 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
        struct ceph_osd_client *osdc;
        struct rbd_device *rbd_dev;
        struct page **pages;
-       int result;
-       u64 obj_size;
-       u64 xferred;
+       u32 page_count;
+       int img_result;
+       u64 parent_length;
+       u64 offset;
+       u64 length;
 
        rbd_assert(img_request_child_test(img_request));
 
@@ -2164,46 +2329,74 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
        pages = img_request->copyup_pages;
        rbd_assert(pages != NULL);
        img_request->copyup_pages = NULL;
+       page_count = img_request->copyup_page_count;
+       rbd_assert(page_count);
+       img_request->copyup_page_count = 0;
 
        orig_request = img_request->obj_request;
        rbd_assert(orig_request != NULL);
-       rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
-       result = img_request->result;
-       obj_size = img_request->length;
-       xferred = img_request->xferred;
+       rbd_assert(obj_request_type_valid(orig_request->type));
+       img_result = img_request->result;
+       parent_length = img_request->length;
+       rbd_assert(parent_length == img_request->xferred);
+       rbd_img_request_put(img_request);
 
-       rbd_dev = img_request->rbd_dev;
+       rbd_assert(orig_request->img_request);
+       rbd_dev = orig_request->img_request->rbd_dev;
        rbd_assert(rbd_dev);
-       rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
 
-       rbd_img_request_put(img_request);
+       /*
+        * If the overlap has become 0 (most likely because the
+        * image has been flattened) we need to free the pages
+        * and re-submit the original write request.
+        */
+       if (!rbd_dev->parent_overlap) {
+               struct ceph_osd_client *osdc;
 
-       if (result)
-               goto out_err;
+               ceph_release_page_vector(pages, page_count);
+               osdc = &rbd_dev->rbd_client->client->osdc;
+               img_result = rbd_obj_request_submit(osdc, orig_request);
+               if (!img_result)
+                       return;
+       }
 
-       /* Allocate the new copyup osd request for the original request */
+       if (img_result)
+               goto out_err;
 
-       result = -ENOMEM;
-       rbd_assert(!orig_request->osd_req);
+       /*
+        * The original osd request is of no use to use any more.
+        * We need a new one that can hold the two ops in a copyup
+        * request.  Allocate the new copyup osd request for the
+        * original request, and release the old one.
+        */
+       img_result = -ENOMEM;
        osd_req = rbd_osd_req_create_copyup(orig_request);
        if (!osd_req)
                goto out_err;
+       rbd_osd_req_destroy(orig_request->osd_req);
        orig_request->osd_req = osd_req;
        orig_request->copyup_pages = pages;
+       orig_request->copyup_page_count = page_count;
 
        /* Initialize the copyup op */
 
        osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
-       osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
+       osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
                                                false, false);
 
        /* Then the original write request op */
 
+       offset = orig_request->offset;
+       length = orig_request->length;
        osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
-                                       orig_request->offset,
-                                       orig_request->length, 0, 0);
-       osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
-                                       orig_request->length);
+                                       offset, length, 0, 0);
+       if (orig_request->type == OBJ_REQUEST_BIO)
+               osd_req_op_extent_osd_data_bio(osd_req, 1,
+                                       orig_request->bio_list, length);
+       else
+               osd_req_op_extent_osd_data_pages(osd_req, 1,
+                                       orig_request->pages, length,
+                                       offset & ~PAGE_MASK, false, false);
 
        rbd_osd_req_format_write(orig_request);
 
@@ -2211,13 +2404,13 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
 
        orig_request->callback = rbd_img_obj_copyup_callback;
        osdc = &rbd_dev->rbd_client->client->osdc;
-       result = rbd_obj_request_submit(osdc, orig_request);
-       if (!result)
+       img_result = rbd_obj_request_submit(osdc, orig_request);
+       if (!img_result)
                return;
 out_err:
        /* Record the error code and complete the request */
 
-       orig_request->result = result;
+       orig_request->result = img_result;
        orig_request->xferred = 0;
        obj_request_done_set(orig_request);
        rbd_obj_request_complete(orig_request);
@@ -2249,22 +2442,13 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
        int result;
 
        rbd_assert(obj_request_img_data_test(obj_request));
-       rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+       rbd_assert(obj_request_type_valid(obj_request->type));
 
        img_request = obj_request->img_request;
        rbd_assert(img_request != NULL);
        rbd_dev = img_request->rbd_dev;
        rbd_assert(rbd_dev->parent != NULL);
 
-       /*
-        * First things first.  The original osd request is of no
-        * use to use any more, we'll need a new one that can hold
-        * the two ops in a copyup request.  We'll get that later,
-        * but for now we can release the old one.
-        */
-       rbd_osd_req_destroy(obj_request->osd_req);
-       obj_request->osd_req = NULL;
-
        /*
         * Determine the byte range covered by the object in the
         * child image to which the original request was to be sent.
@@ -2295,18 +2479,16 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
        }
 
        result = -ENOMEM;
-       parent_request = rbd_img_request_create(rbd_dev->parent,
-                                               img_offset, length,
-                                               false, true);
+       parent_request = rbd_parent_request_create(obj_request,
+                                               img_offset, length);
        if (!parent_request)
                goto out_err;
-       rbd_obj_request_get(obj_request);
-       parent_request->obj_request = obj_request;
 
        result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
        if (result)
                goto out_err;
        parent_request->copyup_pages = pages;
+       parent_request->copyup_page_count = page_count;
 
        parent_request->callback = rbd_img_obj_parent_read_full_callback;
        result = rbd_img_request_submit(parent_request);
@@ -2314,6 +2496,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
                return 0;
 
        parent_request->copyup_pages = NULL;
+       parent_request->copyup_page_count = 0;
        parent_request->obj_request = NULL;
        rbd_obj_request_put(obj_request);
 out_err:
@@ -2331,6 +2514,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
 {
        struct rbd_obj_request *orig_request;
+       struct rbd_device *rbd_dev;
        int result;
 
        rbd_assert(!obj_request_img_data_test(obj_request));
@@ -2353,8 +2537,21 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
                obj_request->xferred, obj_request->length);
        rbd_obj_request_put(obj_request);
 
-       rbd_assert(orig_request);
-       rbd_assert(orig_request->img_request);
+       /*
+        * If the overlap has become 0 (most likely because the
+        * image has been flattened) we need to free the pages
+        * and re-submit the original write request.
+        */
+       rbd_dev = orig_request->img_request->rbd_dev;
+       if (!rbd_dev->parent_overlap) {
+               struct ceph_osd_client *osdc;
+
+               rbd_obj_request_put(orig_request);
+               osdc = &rbd_dev->rbd_client->client->osdc;
+               result = rbd_obj_request_submit(osdc, orig_request);
+               if (!result)
+                       return;
+       }
 
        /*
         * Our only purpose here is to determine whether the object
@@ -2512,14 +2709,36 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
        struct rbd_obj_request *obj_request;
        struct rbd_device *rbd_dev;
        u64 obj_end;
+       u64 img_xferred;
+       int img_result;
 
        rbd_assert(img_request_child_test(img_request));
 
+       /* First get what we need from the image request and release it */
+
        obj_request = img_request->obj_request;
+       img_xferred = img_request->xferred;
+       img_result = img_request->result;
+       rbd_img_request_put(img_request);
+
+       /*
+        * If the overlap has become 0 (most likely because the
+        * image has been flattened) we need to re-submit the
+        * original request.
+        */
        rbd_assert(obj_request);
        rbd_assert(obj_request->img_request);
+       rbd_dev = obj_request->img_request->rbd_dev;
+       if (!rbd_dev->parent_overlap) {
+               struct ceph_osd_client *osdc;
+
+               osdc = &rbd_dev->rbd_client->client->osdc;
+               img_result = rbd_obj_request_submit(osdc, obj_request);
+               if (!img_result)
+                       return;
+       }
 
-       obj_request->result = img_request->result;
+       obj_request->result = img_result;
        if (obj_request->result)
                goto out;
 
@@ -2532,7 +2751,6 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
         */
        rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
        obj_end = obj_request->img_offset + obj_request->length;
-       rbd_dev = obj_request->img_request->rbd_dev;
        if (obj_end > rbd_dev->parent_overlap) {
                u64 xferred = 0;
 
@@ -2540,43 +2758,39 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
                        xferred = rbd_dev->parent_overlap -
                                        obj_request->img_offset;
 
-               obj_request->xferred = min(img_request->xferred, xferred);
+               obj_request->xferred = min(img_xferred, xferred);
        } else {
-               obj_request->xferred = img_request->xferred;
+               obj_request->xferred = img_xferred;
        }
 out:
-       rbd_img_request_put(img_request);
        rbd_img_obj_request_read_callback(obj_request);
        rbd_obj_request_complete(obj_request);
 }
 
 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
 {
-       struct rbd_device *rbd_dev;
        struct rbd_img_request *img_request;
        int result;
 
        rbd_assert(obj_request_img_data_test(obj_request));
        rbd_assert(obj_request->img_request != NULL);
        rbd_assert(obj_request->result == (s32) -ENOENT);
-       rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+       rbd_assert(obj_request_type_valid(obj_request->type));
 
-       rbd_dev = obj_request->img_request->rbd_dev;
-       rbd_assert(rbd_dev->parent != NULL);
        /* rbd_read_finish(obj_request, obj_request->length); */
-       img_request = rbd_img_request_create(rbd_dev->parent,
+       img_request = rbd_parent_request_create(obj_request,
                                                obj_request->img_offset,
-                                               obj_request->length,
-                                               false, true);
+                                               obj_request->length);
        result = -ENOMEM;
        if (!img_request)
                goto out_err;
 
-       rbd_obj_request_get(obj_request);
-       img_request->obj_request = obj_request;
-
-       result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
-                                       obj_request->bio_list);
+       if (obj_request->type == OBJ_REQUEST_BIO)
+               result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
+                                               obj_request->bio_list);
+       else
+               result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
+                                               obj_request->pages);
        if (result)
                goto out_err;
 
@@ -2626,6 +2840,7 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
 {
        struct rbd_device *rbd_dev = (struct rbd_device *)data;
+       int ret;
 
        if (!rbd_dev)
                return;
@@ -2633,7 +2848,9 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
        dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
                rbd_dev->header_name, (unsigned long long)notify_id,
                (unsigned int)opcode);
-       (void)rbd_dev_refresh(rbd_dev);
+       ret = rbd_dev_refresh(rbd_dev);
+       if (ret)
+               rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
 
        rbd_obj_notify_ack(rbd_dev, notify_id);
 }
@@ -2642,7 +2859,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
  * Request sync osd watch/unwatch.  The value of "start" determines
  * whether a watch request is being initiated or torn down.
  */
-static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
 {
        struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
        struct rbd_obj_request *obj_request;
@@ -2676,7 +2893,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
                                        rbd_dev->watch_request->osd_req);
 
        osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
-                               rbd_dev->watch_event->cookie, 0, start);
+                               rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
        rbd_osd_req_format_write(obj_request);
 
        ret = rbd_obj_request_submit(osdc, obj_request);
@@ -2869,9 +3086,16 @@ static void rbd_request_fn(struct request_queue *q)
                        goto end_request;       /* Shouldn't happen */
                }
 
+               result = -EIO;
+               if (offset + length > rbd_dev->mapping.size) {
+                       rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
+                               offset, length, rbd_dev->mapping.size);
+                       goto end_request;
+               }
+
                result = -ENOMEM;
                img_request = rbd_img_request_create(rbd_dev, offset, length,
-                                                       write_request, false);
+                                                       write_request);
                if (!img_request)
                        goto end_request;
 
@@ -3022,17 +3246,11 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
 }
 
 /*
- * Read the complete header for the given rbd device.
- *
- * Returns a pointer to a dynamically-allocated buffer containing
- * the complete and validated header.  Caller can pass the address
- * of a variable that will be filled in with the version of the
- * header object at the time it was read.
- *
- * Returns a pointer-coded errno if a failure occurs.
+ * Read the complete header for the given rbd device.  On successful
+ * return, the rbd_dev->header field will contain up-to-date
+ * information about the image.
  */
-static struct rbd_image_header_ondisk *
-rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
+static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
 {
        struct rbd_image_header_ondisk *ondisk = NULL;
        u32 snap_count = 0;
@@ -3057,22 +3275,22 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
                size += names_size;
                ondisk = kmalloc(size, GFP_KERNEL);
                if (!ondisk)
-                       return ERR_PTR(-ENOMEM);
+                       return -ENOMEM;
 
                ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
                                       0, size, ondisk);
                if (ret < 0)
-                       goto out_err;
+                       goto out;
                if ((size_t)ret < size) {
                        ret = -ENXIO;
                        rbd_warn(rbd_dev, "short header read (want %zd got %d)",
                                size, ret);
-                       goto out_err;
+                       goto out;
                }
                if (!rbd_dev_ondisk_valid(ondisk)) {
                        ret = -ENXIO;
                        rbd_warn(rbd_dev, "invalid header");
-                       goto out_err;
+                       goto out;
                }
 
                names_size = le64_to_cpu(ondisk->snap_names_len);
@@ -3080,85 +3298,13 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
                snap_count = le32_to_cpu(ondisk->snap_count);
        } while (snap_count != want_count);
 
-       return ondisk;
-
-out_err:
-       kfree(ondisk);
-
-       return ERR_PTR(ret);
-}
-
-/*
- * reload the ondisk the header
- */
-static int rbd_read_header(struct rbd_device *rbd_dev,
-                          struct rbd_image_header *header)
-{
-       struct rbd_image_header_ondisk *ondisk;
-       int ret;
-
-       ondisk = rbd_dev_v1_header_read(rbd_dev);
-       if (IS_ERR(ondisk))
-               return PTR_ERR(ondisk);
-       ret = rbd_header_from_disk(header, ondisk);
+       ret = rbd_header_from_disk(rbd_dev, ondisk);
+out:
        kfree(ondisk);
 
        return ret;
 }
 
-static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
-{
-       if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
-               return;
-
-       if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
-               sector_t size;
-
-               rbd_dev->mapping.size = rbd_dev->header.image_size;
-               size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
-               dout("setting size to %llu sectors", (unsigned long long)size);
-               set_capacity(rbd_dev->disk, size);
-       }
-}
-
-/*
- * only read the first part of the ondisk header, without the snaps info
- */
-static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
-{
-       int ret;
-       struct rbd_image_header h;
-
-       ret = rbd_read_header(rbd_dev, &h);
-       if (ret < 0)
-               return ret;
-
-       down_write(&rbd_dev->header_rwsem);
-
-       /* Update image size, and check for resize of mapped image */
-       rbd_dev->header.image_size = h.image_size;
-       rbd_update_mapping_size(rbd_dev);
-
-       /* rbd_dev->header.object_prefix shouldn't change */
-       kfree(rbd_dev->header.snap_sizes);
-       kfree(rbd_dev->header.snap_names);
-       /* osd requests may still refer to snapc */
-       ceph_put_snap_context(rbd_dev->header.snapc);
-
-       rbd_dev->header.image_size = h.image_size;
-       rbd_dev->header.snapc = h.snapc;
-       rbd_dev->header.snap_names = h.snap_names;
-       rbd_dev->header.snap_sizes = h.snap_sizes;
-       /* Free the extra copy of the object prefix */
-       if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
-               rbd_warn(rbd_dev, "object prefix changed (ignoring)");
-       kfree(h.object_prefix);
-
-       up_write(&rbd_dev->header_rwsem);
-
-       return ret;
-}
-
 /*
  * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
  * has disappeared from the (just updated) snapshot context.
@@ -3180,26 +3326,29 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
 
 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
 {
-       u64 image_size;
+       u64 mapping_size;
        int ret;
 
        rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
-       image_size = rbd_dev->header.image_size;
+       mapping_size = rbd_dev->mapping.size;
        mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
        if (rbd_dev->image_format == 1)
-               ret = rbd_dev_v1_refresh(rbd_dev);
+               ret = rbd_dev_v1_header_info(rbd_dev);
        else
-               ret = rbd_dev_v2_refresh(rbd_dev);
+               ret = rbd_dev_v2_header_info(rbd_dev);
 
        /* If it's a mapped snapshot, validate its EXISTS flag */
 
        rbd_exists_validate(rbd_dev);
        mutex_unlock(&ctl_mutex);
-       if (ret)
-               rbd_warn(rbd_dev, "got notification but failed to "
-                          " update snaps: %d\n", ret);
-       if (image_size != rbd_dev->header.image_size)
+       if (mapping_size != rbd_dev->mapping.size) {
+               sector_t size;
+
+               size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
+               dout("setting size to %llu sectors", (unsigned long long)size);
+               set_capacity(rbd_dev->disk, size);
                revalidate_disk(rbd_dev->disk);
+       }
 
        return ret;
 }
@@ -3403,6 +3552,8 @@ static ssize_t rbd_image_refresh(struct device *dev,
        int ret;
 
        ret = rbd_dev_refresh(rbd_dev);
+       if (ret)
+               rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
 
        return ret < 0 ? ret : size;
 }
@@ -3501,6 +3652,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
 
        spin_lock_init(&rbd_dev->lock);
        rbd_dev->flags = 0;
+       atomic_set(&rbd_dev->parent_ref, 0);
        INIT_LIST_HEAD(&rbd_dev->node);
        init_rwsem(&rbd_dev->header_rwsem);
 
@@ -3650,6 +3802,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        __le64 snapid;
        void *p;
        void *end;
+       u64 pool_id;
        char *image_id;
        u64 overlap;
        int ret;
@@ -3680,18 +3833,37 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        p = reply_buf;
        end = reply_buf + ret;
        ret = -ERANGE;
-       ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
-       if (parent_spec->pool_id == CEPH_NOPOOL)
+       ceph_decode_64_safe(&p, end, pool_id, out_err);
+       if (pool_id == CEPH_NOPOOL) {
+               /*
+                * Either the parent never existed, or we have
+                * record of it but the image got flattened so it no
+                * longer has a parent.  When the parent of a
+                * layered image disappears we immediately set the
+                * overlap to 0.  The effect of this is that all new
+                * requests will be treated as if the image had no
+                * parent.
+                */
+               if (rbd_dev->parent_overlap) {
+                       rbd_dev->parent_overlap = 0;
+                       smp_mb();
+                       rbd_dev_parent_put(rbd_dev);
+                       pr_info("%s: clone image has been flattened\n",
+                               rbd_dev->disk->disk_name);
+               }
+
                goto out;       /* No parent?  No problem. */
+       }
 
        /* The ceph file layout needs to fit pool id in 32 bits */
 
        ret = -EIO;
-       if (parent_spec->pool_id > (u64)U32_MAX) {
+       if (pool_id > (u64)U32_MAX) {
                rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
-                       (unsigned long long)parent_spec->pool_id, U32_MAX);
+                       (unsigned long long)pool_id, U32_MAX);
                goto out_err;
        }
+       parent_spec->pool_id = pool_id;
 
        image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
        if (IS_ERR(image_id)) {
@@ -3702,9 +3874,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
        ceph_decode_64_safe(&p, end, overlap, out_err);
 
-       rbd_dev->parent_overlap = overlap;
-       rbd_dev->parent_spec = parent_spec;
-       parent_spec = NULL;     /* rbd_dev now owns this */
+       if (overlap) {
+               rbd_spec_put(rbd_dev->parent_spec);
+               rbd_dev->parent_spec = parent_spec;
+               parent_spec = NULL;     /* rbd_dev now owns this */
+               rbd_dev->parent_overlap = overlap;
+       } else {
+               rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
+       }
 out:
        ret = 0;
 out_err:
@@ -4002,6 +4179,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
        for (i = 0; i < snap_count; i++)
                snapc->snaps[i] = ceph_decode_64(&p);
 
+       ceph_put_snap_context(rbd_dev->header.snapc);
        rbd_dev->header.snapc = snapc;
 
        dout("  snap context seq = %llu, snap_count = %u\n",
@@ -4053,21 +4231,56 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
        return snap_name;
 }
 
-static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
 {
+       bool first_time = rbd_dev->header.object_prefix == NULL;
        int ret;
 
        down_write(&rbd_dev->header_rwsem);
 
+       if (first_time) {
+               ret = rbd_dev_v2_header_onetime(rbd_dev);
+               if (ret)
+                       goto out;
+       }
+
+       /*
+        * If the image supports layering, get the parent info.  We
+        * need to probe the first time regardless.  Thereafter we
+        * only need to if there's a parent, to see if it has
+        * disappeared due to the mapped image getting flattened.
+        */
+       if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
+                       (first_time || rbd_dev->parent_spec)) {
+               bool warn;
+
+               ret = rbd_dev_v2_parent_info(rbd_dev);
+               if (ret)
+                       goto out;
+
+               /*
+                * Print a warning if this is the initial probe and
+                * the image has a parent.  Don't print it if the
+                * image now being probed is itself a parent.  We
+                * can tell at this point because we won't know its
+                * pool name yet (just its pool id).
+                */
+               warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
+               if (first_time && warn)
+                       rbd_warn(rbd_dev, "WARNING: kernel layering "
+                                       "is EXPERIMENTAL!");
+       }
+
        ret = rbd_dev_v2_image_size(rbd_dev);
        if (ret)
                goto out;
-       rbd_update_mapping_size(rbd_dev);
+
+       if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
+               if (rbd_dev->mapping.size != rbd_dev->header.image_size)
+                       rbd_dev->mapping.size = rbd_dev->header.image_size;
 
        ret = rbd_dev_v2_snap_context(rbd_dev);
        dout("rbd_dev_v2_snap_context returned %d\n", ret);
-       if (ret)
-               goto out;
 out:
        up_write(&rbd_dev->header_rwsem);
 
@@ -4490,10 +4703,10 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
 {
        struct rbd_image_header *header;
 
-       rbd_dev_remove_parent(rbd_dev);
-       rbd_spec_put(rbd_dev->parent_spec);
-       rbd_dev->parent_spec = NULL;
-       rbd_dev->parent_overlap = 0;
+       /* Drop parent reference unless it's already been done (or none) */
+
+       if (rbd_dev->parent_overlap)
+               rbd_dev_parent_put(rbd_dev);
 
        /* Free dynamic fields from the header, then zero it out */
 
@@ -4505,72 +4718,22 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
        memset(header, 0, sizeof (*header));
 }
 
-static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
 {
        int ret;
 
-       /* Populate rbd image metadata */
-
-       ret = rbd_read_header(rbd_dev, &rbd_dev->header);
-       if (ret < 0)
-               goto out_err;
-
-       /* Version 1 images have no parent (no layering) */
-
-       rbd_dev->parent_spec = NULL;
-       rbd_dev->parent_overlap = 0;
-
-       dout("discovered version 1 image, header name is %s\n",
-               rbd_dev->header_name);
-
-       return 0;
-
-out_err:
-       kfree(rbd_dev->header_name);
-       rbd_dev->header_name = NULL;
-       kfree(rbd_dev->spec->image_id);
-       rbd_dev->spec->image_id = NULL;
-
-       return ret;
-}
-
-static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
-{
-       int ret;
-
-       ret = rbd_dev_v2_image_size(rbd_dev);
-       if (ret)
-               goto out_err;
-
-       /* Get the object prefix (a.k.a. block_name) for the image */
-
        ret = rbd_dev_v2_object_prefix(rbd_dev);
        if (ret)
                goto out_err;
 
-       /* Get the and check features for the image */
-
+       /*
+        * Get the and check features for the image.  Currently the
+        * features are assumed to never change.
+        */
        ret = rbd_dev_v2_features(rbd_dev);
        if (ret)
                goto out_err;
 
-       /* If the image supports layering, get the parent info */
-
-       if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
-               ret = rbd_dev_v2_parent_info(rbd_dev);
-               if (ret)
-                       goto out_err;
-
-               /*
-                * Don't print a warning for parent images.  We can
-                * tell this point because we won't know its pool
-                * name yet (just its pool id).
-                */
-               if (rbd_dev->spec->pool_name)
-                       rbd_warn(rbd_dev, "WARNING: kernel layering "
-                                       "is EXPERIMENTAL!");
-       }
-
        /* If the image supports fancy striping, get its parameters */
 
        if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
@@ -4578,28 +4741,11 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
                if (ret < 0)
                        goto out_err;
        }
-
-       /* crypto and compression type aren't (yet) supported for v2 images */
-
-       rbd_dev->header.crypt_type = 0;
-       rbd_dev->header.comp_type = 0;
-
-       /* Get the snapshot context, plus the header version */
-
-       ret = rbd_dev_v2_snap_context(rbd_dev);
-       if (ret)
-               goto out_err;
-
-       dout("discovered version 2 image, header name is %s\n",
-               rbd_dev->header_name);
+       /* No support for crypto and compression type format 2 images */
 
        return 0;
 out_err:
-       rbd_dev->parent_overlap = 0;
-       rbd_spec_put(rbd_dev->parent_spec);
-       rbd_dev->parent_spec = NULL;
-       kfree(rbd_dev->header_name);
-       rbd_dev->header_name = NULL;
+       rbd_dev->header.features = 0;
        kfree(rbd_dev->header.object_prefix);
        rbd_dev->header.object_prefix = NULL;
 
@@ -4628,15 +4774,16 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
        if (!parent)
                goto out_err;
 
-       ret = rbd_dev_image_probe(parent);
+       ret = rbd_dev_image_probe(parent, false);
        if (ret < 0)
                goto out_err;
        rbd_dev->parent = parent;
+       atomic_set(&rbd_dev->parent_ref, 1);
 
        return 0;
 out_err:
        if (parent) {
-               rbd_spec_put(rbd_dev->parent_spec);
+               rbd_dev_unparent(rbd_dev);
                kfree(rbd_dev->header_name);
                rbd_dev_destroy(parent);
        } else {
@@ -4651,10 +4798,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
 {
        int ret;
 
-       ret = rbd_dev_mapping_set(rbd_dev);
-       if (ret)
-               return ret;
-
        /* generate unique id: find highest unique id, add one */
        rbd_dev_id_get(rbd_dev);
 
@@ -4676,13 +4819,17 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
        if (ret)
                goto err_out_blkdev;
 
-       ret = rbd_bus_add_dev(rbd_dev);
+       ret = rbd_dev_mapping_set(rbd_dev);
        if (ret)
                goto err_out_disk;
+       set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
+
+       ret = rbd_bus_add_dev(rbd_dev);
+       if (ret)
+               goto err_out_mapping;
 
        /* Everything's ready.  Announce the disk to the world. */
 
-       set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
        set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
        add_disk(rbd_dev->disk);
 
@@ -4691,6 +4838,8 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
 
        return ret;
 
+err_out_mapping:
+       rbd_dev_mapping_clear(rbd_dev);
 err_out_disk:
        rbd_free_disk(rbd_dev);
 err_out_blkdev:
@@ -4731,12 +4880,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev)
 
 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
 {
-       int ret;
-
        rbd_dev_unprobe(rbd_dev);
-       ret = rbd_dev_header_watch_sync(rbd_dev, 0);
-       if (ret)
-               rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
        kfree(rbd_dev->header_name);
        rbd_dev->header_name = NULL;
        rbd_dev->image_format = 0;
@@ -4748,10 +4892,11 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
 
 /*
  * Probe for the existence of the header object for the given rbd
- * device.  For format 2 images this includes determining the image
- * id.
+ * device.  If this image is the one being mapped (i.e., not a
+ * parent), initiate a watch on its header object before using that
+ * object to get detailed information about the rbd image.
  */
-static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
+static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
 {
        int ret;
        int tmp;
@@ -4771,14 +4916,16 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
        if (ret)
                goto err_out_format;
 
-       ret = rbd_dev_header_watch_sync(rbd_dev, 1);
-       if (ret)
-               goto out_header_name;
+       if (mapping) {
+               ret = rbd_dev_header_watch_sync(rbd_dev, true);
+               if (ret)
+                       goto out_header_name;
+       }
 
        if (rbd_dev->image_format == 1)
-               ret = rbd_dev_v1_probe(rbd_dev);
+               ret = rbd_dev_v1_header_info(rbd_dev);
        else
-               ret = rbd_dev_v2_probe(rbd_dev);
+               ret = rbd_dev_v2_header_info(rbd_dev);
        if (ret)
                goto err_out_watch;
 
@@ -4787,15 +4934,22 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
                goto err_out_probe;
 
        ret = rbd_dev_probe_parent(rbd_dev);
-       if (!ret)
-               return 0;
+       if (ret)
+               goto err_out_probe;
+
+       dout("discovered format %u image, header name is %s\n",
+               rbd_dev->image_format, rbd_dev->header_name);
 
+       return 0;
 err_out_probe:
        rbd_dev_unprobe(rbd_dev);
 err_out_watch:
-       tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
-       if (tmp)
-               rbd_warn(rbd_dev, "unable to tear down watch request\n");
+       if (mapping) {
+               tmp = rbd_dev_header_watch_sync(rbd_dev, false);
+               if (tmp)
+                       rbd_warn(rbd_dev, "unable to tear down "
+                                       "watch request (%d)\n", tmp);
+       }
 out_header_name:
        kfree(rbd_dev->header_name);
        rbd_dev->header_name = NULL;
@@ -4819,6 +4973,7 @@ static ssize_t rbd_add(struct bus_type *bus,
        struct rbd_spec *spec = NULL;
        struct rbd_client *rbdc;
        struct ceph_osd_client *osdc;
+       bool read_only;
        int rc = -ENOMEM;
 
        if (!try_module_get(THIS_MODULE))
@@ -4828,6 +4983,9 @@ static ssize_t rbd_add(struct bus_type *bus,
        rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
        if (rc < 0)
                goto err_out_module;
+       read_only = rbd_opts->read_only;
+       kfree(rbd_opts);
+       rbd_opts = NULL;        /* done with this */
 
        rbdc = rbd_get_client(ceph_opts);
        if (IS_ERR(rbdc)) {
@@ -4858,14 +5016,16 @@ static ssize_t rbd_add(struct bus_type *bus,
        rbdc = NULL;            /* rbd_dev now owns this */
        spec = NULL;            /* rbd_dev now owns this */
 
-       rbd_dev->mapping.read_only = rbd_opts->read_only;
-       kfree(rbd_opts);
-       rbd_opts = NULL;        /* done with this */
-
-       rc = rbd_dev_image_probe(rbd_dev);
+       rc = rbd_dev_image_probe(rbd_dev, true);
        if (rc < 0)
                goto err_out_rbd_dev;
 
+       /* If we are mapping a snapshot it must be marked read-only */
+
+       if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
+               read_only = true;
+       rbd_dev->mapping.read_only = read_only;
+
        rc = rbd_dev_device_setup(rbd_dev);
        if (!rc)
                return count;
@@ -4911,7 +5071,7 @@ static void rbd_dev_device_release(struct device *dev)
 
        rbd_free_disk(rbd_dev);
        clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
-       rbd_dev_clear_mapping(rbd_dev);
+       rbd_dev_mapping_clear(rbd_dev);
        unregister_blkdev(rbd_dev->major, rbd_dev->name);
        rbd_dev->major = 0;
        rbd_dev_id_put(rbd_dev);
@@ -4978,10 +5138,13 @@ static ssize_t rbd_remove(struct bus_type *bus,
        spin_unlock_irq(&rbd_dev->lock);
        if (ret < 0)
                goto done;
-       ret = count;
        rbd_bus_del_dev(rbd_dev);
+       ret = rbd_dev_header_watch_sync(rbd_dev, false);
+       if (ret)
+               rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
        rbd_dev_image_release(rbd_dev);
        module_put(THIS_MODULE);
+       ret = count;
 done:
        mutex_unlock(&ctl_mutex);
 
index f8ef15f37c5ec67eb6a35151b756c7496a1a666d..3fd130fdfbc1f742aa706ea96aa5d20361d05d37 100644 (file)
@@ -1160,8 +1160,7 @@ static int ace_probe(struct platform_device *dev)
        dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
 
        /* device id and bus width */
-       of_property_read_u32(dev->dev.of_node, "port-number", &id);
-       if (id < 0)
+       if (of_property_read_u32(dev->dev.of_node, "port-number", &id))
                id = 0;
        if (of_find_property(dev->dev.of_node, "8-bit", NULL))
                bus_width = ACE_BUS_WIDTH_8;
index 4ca35e8a5d8c60a73a375dd132d0bbe196810a23..19a12ac64a9ec966149286a84a2f0db5e3e0ee60 100644 (file)
@@ -167,11 +167,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
        clk_prepare_enable(mxc_rng->clk);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               err = -ENOENT;
-               goto err_region;
-       }
-
        mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(mxc_rng->mem)) {
                err = PTR_ERR(mxc_rng->mem);
@@ -189,7 +184,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
        return 0;
 
 err_ioremap:
-err_region:
        clk_disable_unprepare(mxc_rng->clk);
 
 out:
index 749dc16ca2cc084b81c78c47864ce430489e7e08..d2903e7722704843ac254390046b65b5b5540d6c 100644 (file)
@@ -119,11 +119,6 @@ static int omap_rng_probe(struct platform_device *pdev)
        dev_set_drvdata(&pdev->dev, priv);
 
        priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!priv->mem_res) {
-               ret = -ENOENT;
-               goto err_ioremap;
-       }
-
        priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res);
        if (IS_ERR(priv->base)) {
                ret = PTR_ERR(priv->base);
index cdd4c09fda963b2de1e1e3371090a93625d0b771..a22a7a502740ff42a4ce4413b620026d228909ee 100644 (file)
@@ -95,9 +95,9 @@ struct si_sm_data {
        enum bt_states  state;
        unsigned char   seq;            /* BT sequence number */
        struct si_sm_io *io;
-       unsigned char   write_data[IPMI_MAX_MSG_LENGTH];
+       unsigned char   write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
        int             write_count;
-       unsigned char   read_data[IPMI_MAX_MSG_LENGTH];
+       unsigned char   read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
        int             read_count;
        int             truncated;
        long            timeout;        /* microseconds countdown */
index 9eb360ff8cab0cc19178b6412ade5f4cf3d40e18..d5a5f020810afcdf8f6d5d19d1b45960c83589d5 100644 (file)
@@ -837,13 +837,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
                return ipmi_ioctl(filep, cmd, arg);
        }
 }
+
+static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+                                      unsigned long arg)
+{
+       int ret;
+
+       mutex_lock(&ipmi_mutex);
+       ret = compat_ipmi_ioctl(filep, cmd, arg);
+       mutex_unlock(&ipmi_mutex);
+
+       return ret;
+}
 #endif
 
 static const struct file_operations ipmi_fops = {
        .owner          = THIS_MODULE,
        .unlocked_ioctl = ipmi_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
-       .compat_ioctl   = compat_ipmi_ioctl,
+       .compat_ioctl   = unlocked_compat_ipmi_ioctl,
 #endif
        .open           = ipmi_open,
        .release        = ipmi_release,
index 4d439d2fcfd685a7e3e969aee8d7cef5e1cf14ee..4445fa164a2de5de14330af41847ad2bf1ef1c3d 100644 (file)
@@ -2037,12 +2037,11 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
-       entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
+       entry->name = kstrdup(name, GFP_KERNEL);
        if (!entry->name) {
                kfree(entry);
                return -ENOMEM;
        }
-       strcpy(entry->name, name);
 
        file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
        if (!file) {
index 313538abe63ceaef860b0a9b6a86ceb64a134907..af4b23ffc5a659c4d515e706c66fe80b202c056e 100644 (file)
@@ -663,8 +663,10 @@ static void handle_transaction_done(struct smi_info *smi_info)
                /* We got the flags from the SMI, now handle them. */
                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
                if (msg[2] != 0) {
-                       dev_warn(smi_info->dev, "Could not enable interrupts"
-                                ", failed get, using polled mode.\n");
+                       dev_warn(smi_info->dev,
+                                "Couldn't get irq info: %x.\n", msg[2]);
+                       dev_warn(smi_info->dev,
+                                "Maybe ok, but ipmi might run very slowly.\n");
                        smi_info->si_state = SI_NORMAL;
                } else {
                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -685,10 +687,12 @@ static void handle_transaction_done(struct smi_info *smi_info)
 
                /* We got the flags from the SMI, now handle them. */
                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
-               if (msg[2] != 0)
-                       dev_warn(smi_info->dev, "Could not enable interrupts"
-                                ", failed set, using polled mode.\n");
-               else
+               if (msg[2] != 0) {
+                       dev_warn(smi_info->dev,
+                                "Couldn't set irq info: %x.\n", msg[2]);
+                       dev_warn(smi_info->dev,
+                                "Maybe ok, but ipmi might run very slowly.\n");
+               } else
                        smi_info->interrupt_disabled = 0;
                smi_info->si_state = SI_NORMAL;
                break;
index dafd9ac6428f2de6e4c2120c675337307d8bd9d6..0913d79424d3a3e81930a4c138979a621de18605 100644 (file)
@@ -622,9 +622,12 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
                                return -EFAULT;
                        break;
                case LPGETSTATUS:
+                       if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
+                               return -EINTR;
                        lp_claim_parport_or_block (&lp_table[minor]);
                        status = r_str(minor);
                        lp_release_parport (&lp_table[minor]);
+                       mutex_unlock(&lp_table[minor].port_mutex);
 
                        if (copy_to_user(argp, &status, sizeof(int)))
                                return -EFAULT;
index cd9a6211dcadd61243905335a87d1e75bd91940e..35487e8ded59f106e3a7b157aa3d5bf46f9579e8 100644 (file)
@@ -865,16 +865,24 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
        if (r->entropy_count / 8 < min + reserved) {
                nbytes = 0;
        } else {
+               int entropy_count, orig;
+retry:
+               entropy_count = orig = ACCESS_ONCE(r->entropy_count);
                /* If limited, never pull more than available */
-               if (r->limit && nbytes + reserved >= r->entropy_count / 8)
-                       nbytes = r->entropy_count/8 - reserved;
-
-               if (r->entropy_count / 8 >= nbytes + reserved)
-                       r->entropy_count -= nbytes*8;
-               else
-                       r->entropy_count = reserved;
+               if (r->limit && nbytes + reserved >= entropy_count / 8)
+                       nbytes = entropy_count/8 - reserved;
+
+               if (entropy_count / 8 >= nbytes + reserved) {
+                       entropy_count -= nbytes*8;
+                       if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+                               goto retry;
+               } else {
+                       entropy_count = reserved;
+                       if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+                               goto retry;
+               }
 
-               if (r->entropy_count < random_write_wakeup_thresh)
+               if (entropy_count < random_write_wakeup_thresh)
                        wakeup_write = 1;
        }
 
@@ -957,10 +965,23 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
 {
        ssize_t ret = 0, i;
        __u8 tmp[EXTRACT_SIZE];
+       unsigned long flags;
 
        /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
-       if (fips_enabled && !r->last_data_init)
-               nbytes += EXTRACT_SIZE;
+       if (fips_enabled) {
+               spin_lock_irqsave(&r->lock, flags);
+               if (!r->last_data_init) {
+                       r->last_data_init = true;
+                       spin_unlock_irqrestore(&r->lock, flags);
+                       trace_extract_entropy(r->name, EXTRACT_SIZE,
+                                             r->entropy_count, _RET_IP_);
+                       xfer_secondary_pool(r, EXTRACT_SIZE);
+                       extract_buf(r, tmp);
+                       spin_lock_irqsave(&r->lock, flags);
+                       memcpy(r->last_data, tmp, EXTRACT_SIZE);
+               }
+               spin_unlock_irqrestore(&r->lock, flags);
+       }
 
        trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
        xfer_secondary_pool(r, nbytes);
@@ -970,19 +991,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
                extract_buf(r, tmp);
 
                if (fips_enabled) {
-                       unsigned long flags;
-
-
-                       /* prime last_data value if need be, per fips 140-2 */
-                       if (!r->last_data_init) {
-                               spin_lock_irqsave(&r->lock, flags);
-                               memcpy(r->last_data, tmp, EXTRACT_SIZE);
-                               r->last_data_init = true;
-                               nbytes -= EXTRACT_SIZE;
-                               spin_unlock_irqrestore(&r->lock, flags);
-                               extract_buf(r, tmp);
-                       }
-
                        spin_lock_irqsave(&r->lock, flags);
                        if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
                                panic("Hardware RNG duplicated output!\n");
index 4945bd3d18d0e9de294ba2f1fb8c996662820a39..d5d2e4a985aa8602ff75f840f6af7e963f68471d 100644 (file)
@@ -179,7 +179,6 @@ static int __init ttyprintk_init(void)
 {
        int ret = -ENOMEM;
 
-       tpk_port.port.ops = &null_ops;
        mutex_init(&tpk_port.port_write_mutex);
 
        ttyprintk_driver = tty_alloc_driver(1,
@@ -190,6 +189,7 @@ static int __init ttyprintk_init(void)
                return PTR_ERR(ttyprintk_driver);
 
        tty_port_init(&tpk_port.port);
+       tpk_port.port.ops = &null_ops;
 
        ttyprintk_driver->driver_name = "ttyprintk";
        ttyprintk_driver->name = "ttyprintk";
index 892728412e9ddb0af46ec0a7f93cdeceadc4ed40..24f553673b72d0502415cae85dcfd8a281de06ec 100644 (file)
@@ -932,7 +932,7 @@ static unsigned long si5351_clkout_recalc_rate(struct clk_hw *hw,
        unsigned char reg;
        unsigned char rdiv;
 
-       if (hwdata->num > 5)
+       if (hwdata->num <= 5)
                reg = si5351_msynth_params_address(hwdata->num) + 2;
        else
                reg = SI5351_CLK6_7_OUTPUT_DIVIDER;
@@ -1477,6 +1477,16 @@ static int si5351_i2c_probe(struct i2c_client *client,
                        return -EINVAL;
                }
                drvdata->onecell.clks[n] = clk;
+
+               /* set initial clkout rate */
+               if (pdata->clkout[n].rate != 0) {
+                       int ret;
+                       ret = clk_set_rate(clk, pdata->clkout[n].rate);
+                       if (ret != 0) {
+                               dev_err(&client->dev, "Cannot set rate : %d\n",
+                                       ret);
+                       }
+               }
        }
 
        ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
index debf688afa8e503c4dba690b5cfffe983c7abae0..553ac35bcc912ea9f975927f8f91ff1e680dec6d 100644 (file)
@@ -183,7 +183,7 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
        writel(divisor, cdev->div_reg);
        vt8500_pmc_wait_busy();
 
-       spin_lock_irqsave(cdev->lock, flags);
+       spin_unlock_irqrestore(cdev->lock, flags);
 
        return 0;
 }
index d0e5eed146de69baf6baf1295f12cadce8d4a923..4faf0afc44cd5a2ebe0761af3e8d1250ed6c2b77 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clk/mxs.h>
 #include <linux/clkdev.h>
 #include <linux/err.h>
 #include <linux/init.h>
index d0940e69d034ffbaebc53a36cfe6606035304a16..3c1f88868f295e9df2e2f367ee91ebbec93db981 100644 (file)
@@ -791,7 +791,8 @@ struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
        GATE(smmu_pcie, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0),
        GATE(modemif, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0),
        GATE(chipid, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
-       GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0),
+       GATE(sysreg, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0,
+                       CLK_IGNORE_UNUSED, 0),
        GATE(hdmi_cec, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, 0),
        GATE(smmu_rotator, "smmu_rotator", "aclk200",
                        E4210_GATE_IP_IMAGE, 4, 0, 0),
@@ -819,7 +820,8 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
        GATE(smmu_mdma, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0, 0),
        GATE(mipi_hsi, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
        GATE(chipid, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
-       GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, 0, 0),
+       GATE(sysreg, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
+                       CLK_IGNORE_UNUSED, 0),
        GATE(hdmi_cec, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, 0),
        GATE(sclk_mdnie0, "sclk_mdnie0", "div_mdnie0",
                        SRC_MASK_LCD0, 4, CLK_SET_RATE_PARENT, 0),
index 8292a00c3de9f1dbae8b19e23b6ac45b3d275866..075db0c99edb9b8569556d853fbaee0e88c2cb2c 100644 (file)
@@ -872,6 +872,14 @@ static void __init tegra20_periph_clk_init(void)
        struct clk *clk;
        int i;
 
+       /* ac97 */
+       clk = tegra_clk_register_periph_gate("ac97", "pll_a_out0",
+                                   TEGRA_PERIPH_ON_APB,
+                                   clk_base, 0, 3, &periph_l_regs,
+                                   periph_clk_enb_refcnt);
+       clk_register_clkdev(clk, NULL, "tegra20-ac97");
+       clks[ac97] = clk;
+
        /* apbdma */
        clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base,
                                    0, 34, &periph_h_regs,
@@ -1234,9 +1242,6 @@ static __initdata struct tegra_clk_init_table init_table[] = {
        {uartc, pll_p, 0, 0},
        {uartd, pll_p, 0, 0},
        {uarte, pll_p, 0, 0},
-       {usbd, clk_max, 12000000, 0},
-       {usb2, clk_max, 12000000, 0},
-       {usb3, clk_max, 12000000, 0},
        {pll_a, clk_max, 56448000, 1},
        {pll_a_out0, clk_max, 11289600, 1},
        {cdev1, clk_max, 0, 1},
index bc7e9bde792b9ea5061752cd368a5a9e87fcbd9c..e364c9d4aa60e38b05085926297d3514da319048 100644 (file)
@@ -145,7 +145,13 @@ static struct clk *clk_reg_sysctrl(struct device *dev,
                return ERR_PTR(-ENOMEM);
        }
 
-       for (i = 0; i < num_parents; i++) {
+       /* set main clock registers */
+       clk->reg_sel[0] = reg_sel[0];
+       clk->reg_bits[0] = reg_bits[0];
+       clk->reg_mask[0] = reg_mask[0];
+
+       /* handle clocks with more than one parent */
+       for (i = 1; i < num_parents; i++) {
                clk->reg_sel[i] = reg_sel[i];
                clk->reg_bits[i] = reg_bits[i];
                clk->reg_mask[i] = reg_mask[i];
index 0b4f35a5ffc273b45cc602f1a4f819d18cd43b86..80069c370a47a1c560f05929ce7bd2aedb651514 100644 (file)
@@ -325,7 +325,7 @@ void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
        clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
                                BIT(0), 0);
        clk_register_clkdev(clk, "fsmc", NULL);
-       clk_register_clkdev(clk, NULL, "smsc911x");
+       clk_register_clkdev(clk, NULL, "smsc911x.0");
 
        clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
                                BIT(1), 0);
index 5cf4f4686406e639e71353984ea7b84d0bc2709b..4f45eee9e33b2746f95c42867c2339f67707fb30 100644 (file)
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/platform_data/clk-lpss.h>
 #include <linux/platform_device.h>
 
 #define PRV_CLOCK_PARAMS 0x800
 
 static int lpt_clk_probe(struct platform_device *pdev)
 {
+       struct lpss_clk_data *drvdata;
        struct clk *clk;
 
+       drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
        /* LPSS free running clock */
-       clk = clk_register_fixed_rate(&pdev->dev, "lpss_clk", NULL, CLK_IS_ROOT,
-                                     100000000);
+       drvdata->name = "lpss_clk";
+       clk = clk_register_fixed_rate(&pdev->dev, drvdata->name, NULL,
+                                     CLK_IS_ROOT, 100000000);
        if (IS_ERR(clk))
                return PTR_ERR(clk);
 
-       /* Shared DMA clock */
-       clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto");
+       drvdata->clk = clk;
+       platform_set_drvdata(pdev, drvdata);
        return 0;
 }
 
index a1488f58f6ca40eb0b660a82e97a54ce0246a430..534fcb8251538a31d2695313b1d565990f8d51d4 100644 (file)
@@ -47,7 +47,7 @@ config CPU_FREQ_STAT_DETAILS
 
 choice
        prompt "Default CPUFreq governor"
-       default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110
+       default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
        default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
        help
          This option sets which CPUFreq governor shall be loaded at
index f3af18b9acc50f299b7f105b5e0b59effe5f4f33..6e57543fe0b981e55cfdd9623c5eb1ed910ffd8c 100644 (file)
@@ -3,16 +3,17 @@
 #
 
 config ARM_BIG_LITTLE_CPUFREQ
-       tristate
-       depends on ARM_CPU_TOPOLOGY
+       tristate "Generic ARM big LITTLE CPUfreq driver"
+       depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
+       help
+         This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
 
 config ARM_DT_BL_CPUFREQ
-       tristate "Generic ARM big LITTLE CPUfreq driver probed via DT"
-       select ARM_BIG_LITTLE_CPUFREQ
-       depends on OF && HAVE_CLK
+       tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
+       depends on ARM_BIG_LITTLE_CPUFREQ && OF
        help
-         This enables the Generic CPUfreq driver for ARM big.LITTLE platform.
-         This gets frequency tables from DT.
+         This enables probing via DT for Generic CPUfreq driver for ARM
+         big.LITTLE platform. This gets frequency tables from DT.
 
 config ARM_EXYNOS_CPUFREQ
        bool "SAMSUNG EXYNOS SoCs"
index 2b8a8c3745486d4cdf62c79490ad263815bf6032..6bd63d63d356ba0f2fc731e65824409cee9bf158 100644 (file)
@@ -272,7 +272,7 @@ config X86_LONGHAUL
 config X86_E_POWERSAVER
        tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
        select CPU_FREQ_TABLE
-       depends on X86_32
+       depends on X86_32 && ACPI_PROCESSOR
        help
          This adds the CPUFreq driver for VIA C7 processors.  However, this driver
          does not have any safeguards to prevent operating the CPU out of spec
index dbdf677d2f3610e4e28340a3785090b245d5c336..5d7f53fcd6f5eac052d4a0bfed4d48f531921d8c 100644 (file)
@@ -40,11 +40,6 @@ static struct clk *clk[MAX_CLUSTERS];
 static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
 static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
 
-static int cpu_to_cluster(int cpu)
-{
-       return topology_physical_package_id(cpu);
-}
-
 static unsigned int bL_cpufreq_get(unsigned int cpu)
 {
        u32 cur_cluster = cpu_to_cluster(cpu);
@@ -192,7 +187,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
 
        cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
 
-       dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu);
+       dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
        return 0;
 }
 
index 70f18fc12d4ad2a1a2c14f00ae1844e5378cdaee..79b2ce17884dd9cc645cb5a34c33e90290bba10a 100644 (file)
@@ -34,6 +34,11 @@ struct cpufreq_arm_bL_ops {
        int (*init_opp_table)(struct device *cpu_dev);
 };
 
+static inline int cpu_to_cluster(int cpu)
+{
+       return topology_physical_package_id(cpu);
+}
+
 int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
 void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
 
index 44be3115375ca968897b16726a3f9192ab0e1695..fd9e3ea6a480aad30c0f9c593eb683470605e36d 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/device.h>
 #include <linux/export.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/opp.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include "arm_big_little.h"
 
-static int dt_init_opp_table(struct device *cpu_dev)
+/* get cpu node with valid operating-points */
+static struct device_node *get_cpu_node_with_valid_op(int cpu)
 {
-       struct device_node *np, *parent;
-       int count = 0, ret;
+       struct device_node *np = NULL, *parent;
+       int count = 0;
 
        parent = of_find_node_by_path("/cpus");
        if (!parent) {
                pr_err("failed to find OF /cpus\n");
-               return -ENOENT;
+               return NULL;
        }
 
        for_each_child_of_node(parent, np) {
-               if (count++ != cpu_dev->id)
+               if (count++ != cpu)
                        continue;
                if (!of_get_property(np, "operating-points", NULL)) {
-                       ret = -ENODATA;
-               } else {
-                       cpu_dev->of_node = np;
-                       ret = of_init_opp_table(cpu_dev);
+                       of_node_put(np);
+                       np = NULL;
                }
-               of_node_put(np);
-               of_node_put(parent);
 
-               return ret;
+               break;
        }
 
-       return -ENODEV;
+       of_node_put(parent);
+       return np;
 }
 
-static int dt_get_transition_latency(struct device *cpu_dev)
+static int dt_init_opp_table(struct device *cpu_dev)
 {
-       struct device_node *np, *parent;
-       u32 transition_latency = CPUFREQ_ETERNAL;
-       int count = 0;
+       struct device_node *np;
+       int ret;
 
-       parent = of_find_node_by_path("/cpus");
-       if (!parent) {
-               pr_err("failed to find OF /cpus\n");
-               return -ENOENT;
-       }
+       np = get_cpu_node_with_valid_op(cpu_dev->id);
+       if (!np)
+               return -ENODATA;
 
-       for_each_child_of_node(parent, np) {
-               if (count++ != cpu_dev->id)
-                       continue;
+       cpu_dev->of_node = np;
+       ret = of_init_opp_table(cpu_dev);
+       of_node_put(np);
 
-               of_property_read_u32(np, "clock-latency", &transition_latency);
-               of_node_put(np);
-               of_node_put(parent);
+       return ret;
+}
 
-               return 0;
-       }
+static int dt_get_transition_latency(struct device *cpu_dev)
+{
+       struct device_node *np;
+       u32 transition_latency = CPUFREQ_ETERNAL;
+
+       np = get_cpu_node_with_valid_op(cpu_dev->id);
+       if (!np)
+               return CPUFREQ_ETERNAL;
 
-       return -ENODEV;
+       of_property_read_u32(np, "clock-latency", &transition_latency);
+       of_node_put(np);
+
+       pr_debug("%s: clock-latency: %d\n", __func__, transition_latency);
+       return transition_latency;
 }
 
 static struct cpufreq_arm_bL_ops dt_bL_ops = {
@@ -90,17 +96,33 @@ static struct cpufreq_arm_bL_ops dt_bL_ops = {
        .init_opp_table = dt_init_opp_table,
 };
 
-static int generic_bL_init(void)
+static int generic_bL_probe(struct platform_device *pdev)
 {
+       struct device_node *np;
+
+       np = get_cpu_node_with_valid_op(0);
+       if (!np)
+               return -ENODEV;
+
+       of_node_put(np);
        return bL_cpufreq_register(&dt_bL_ops);
 }
-module_init(generic_bL_init);
 
-static void generic_bL_exit(void)
+static int generic_bL_remove(struct platform_device *pdev)
 {
-       return bL_cpufreq_unregister(&dt_bL_ops);
+       bL_cpufreq_unregister(&dt_bL_ops);
+       return 0;
 }
-module_exit(generic_bL_exit);
+
+static struct platform_driver generic_bL_platdrv = {
+       .driver = {
+               .name   = "arm-bL-cpufreq-dt",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = generic_bL_probe,
+       .remove         = generic_bL_remove,
+};
+module_platform_driver(generic_bL_platdrv);
 
 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
 MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
index 3ab8294eab04f280f04ce3c2ea42b0fb2722fc86..a64eb8b70444b4a9cf183e8696117d28445a8efc 100644 (file)
@@ -189,12 +189,29 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
 
        if (!np) {
                pr_err("failed to find cpu0 node\n");
-               return -ENOENT;
+               ret = -ENOENT;
+               goto out_put_parent;
        }
 
        cpu_dev = &pdev->dev;
        cpu_dev->of_node = np;
 
+       cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
+       if (IS_ERR(cpu_reg)) {
+               /*
+                * If cpu0 regulator supply node is present, but regulator is
+                * not yet registered, we should try defering probe.
+                */
+               if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
+                       dev_err(cpu_dev, "cpu0 regulator not ready, retry\n");
+                       ret = -EPROBE_DEFER;
+                       goto out_put_node;
+               }
+               pr_warn("failed to get cpu0 regulator: %ld\n",
+                       PTR_ERR(cpu_reg));
+               cpu_reg = NULL;
+       }
+
        cpu_clk = devm_clk_get(cpu_dev, NULL);
        if (IS_ERR(cpu_clk)) {
                ret = PTR_ERR(cpu_clk);
@@ -202,12 +219,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                goto out_put_node;
        }
 
-       cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
-       if (IS_ERR(cpu_reg)) {
-               pr_warn("failed to get cpu0 regulator\n");
-               cpu_reg = NULL;
-       }
-
        ret = of_init_opp_table(cpu_dev);
        if (ret) {
                pr_err("failed to init OPP table: %d\n", ret);
@@ -264,6 +275,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
        opp_free_cpufreq_table(cpu_dev, &freq_table);
 out_put_node:
        of_node_put(np);
+out_put_parent:
+       of_node_put(parent);
        return ret;
 }
 
index 1b8a48eaf90f1c4d2e541e9e03e58f03ca8b6d80..2d53f47d1747360b8968c7c308c0f975777ef9ab 100644 (file)
@@ -1075,14 +1075,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
                                __func__, cpu_dev->id, cpu);
        }
 
+       if ((cpus == 1) && (cpufreq_driver->target))
+               __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+
        pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
        cpufreq_cpu_put(data);
 
        /* If cpu is last user of policy, free policy */
        if (cpus == 1) {
-               if (cpufreq_driver->target)
-                       __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
-
                lock_policy_rwsem_read(cpu);
                kobj = &data->kobj;
                cmp = &data->kobj_unregister;
@@ -1729,18 +1729,23 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
                        /* end old governor */
                        if (data->governor) {
                                __cpufreq_governor(data, CPUFREQ_GOV_STOP);
+                               unlock_policy_rwsem_write(policy->cpu);
                                __cpufreq_governor(data,
                                                CPUFREQ_GOV_POLICY_EXIT);
+                               lock_policy_rwsem_write(policy->cpu);
                        }
 
                        /* start new governor */
                        data->governor = policy->governor;
                        if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
-                               if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
+                               if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
                                        failed = 0;
-                               else
+                               } else {
+                                       unlock_policy_rwsem_write(policy->cpu);
                                        __cpufreq_governor(data,
                                                        CPUFREQ_GOV_POLICY_EXIT);
+                                       lock_policy_rwsem_write(policy->cpu);
+                               }
                        }
 
                        if (failed) {
@@ -1832,15 +1837,13 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
        if (dev) {
                switch (action) {
                case CPU_ONLINE:
-               case CPU_ONLINE_FROZEN:
                        cpufreq_add_dev(dev, NULL);
                        break;
                case CPU_DOWN_PREPARE:
-               case CPU_DOWN_PREPARE_FROZEN:
+               case CPU_UP_CANCELED_FROZEN:
                        __cpufreq_remove_dev(dev, NULL);
                        break;
                case CPU_DOWN_FAILED:
-               case CPU_DOWN_FAILED_FROZEN:
                        cpufreq_add_dev(dev, NULL);
                        break;
                }
index 443442df113be73b33173c1a44109b4619508d7a..5af40ad82d231f7fd23bfd862590c30ab7d4b38e 100644 (file)
@@ -255,6 +255,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                if (have_governor_per_policy()) {
                        WARN_ON(dbs_data);
                } else if (dbs_data) {
+                       dbs_data->usage_count++;
                        policy->governor_data = dbs_data;
                        return 0;
                }
@@ -266,6 +267,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                }
 
                dbs_data->cdata = cdata;
+               dbs_data->usage_count = 1;
                rc = cdata->init(dbs_data);
                if (rc) {
                        pr_err("%s: POLICY_INIT: init() failed\n", __func__);
@@ -294,7 +296,8 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
                                        latency * LATENCY_MULTIPLIER));
 
-               if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+               if ((cdata->governor == GOV_CONSERVATIVE) &&
+                               (!policy->governor->initialized)) {
                        struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
 
                        cpufreq_register_notifier(cs_ops->notifier_block,
@@ -306,12 +309,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 
                return 0;
        case CPUFREQ_GOV_POLICY_EXIT:
-               if ((policy->governor->initialized == 1) ||
-                               have_governor_per_policy()) {
+               if (!--dbs_data->usage_count) {
                        sysfs_remove_group(get_governor_parent_kobj(policy),
                                        get_sysfs_attr(dbs_data));
 
-                       if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+                       if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
+                               (policy->governor->initialized == 1)) {
                                struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
 
                                cpufreq_unregister_notifier(cs_ops->notifier_block,
index 8ac33538d0bdb4f66cc7572f53db4c463190ae92..e16a96130cb3491d30f376728144cdca92e441cd 100644 (file)
@@ -211,6 +211,7 @@ struct common_dbs_data {
 struct dbs_data {
        struct common_dbs_data *cdata;
        unsigned int min_sampling_rate;
+       int usage_count;
        void *tuners;
 
        /* dbs_mutex protects dbs_enable in governor start/stop */
index b0ffef96bf77a2c1bcaf0680c6a500a7430a6858..4b9bb5def6f159a126e4f05aa6a76d365c6a23f8 100644 (file)
@@ -547,7 +547,6 @@ static int od_init(struct dbs_data *dbs_data)
        tuners->io_is_busy = should_io_be_busy();
 
        dbs_data->tuners = tuners;
-       pr_info("%s: tuners %p\n", __func__, tuners);
        mutex_init(&dbs_data->mutex);
        return 0;
 }
index bfd6273fd873531d864f3b4e104d37018a5d8dda..fb65decffa28128ded8817441b2e5eff8ecad8e3 100644 (file)
@@ -349,15 +349,16 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
 
        switch (action) {
        case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
                cpufreq_update_policy(cpu);
                break;
        case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
                cpufreq_stats_free_sysfs(cpu);
                break;
        case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
+               cpufreq_stats_free_table(cpu);
+               break;
+       case CPU_UP_CANCELED_FROZEN:
+               cpufreq_stats_free_sysfs(cpu);
                cpufreq_stats_free_table(cpu);
                break;
        }
index cc3a8e6c92beca8669e1e45e48051d9a9d26a81d..07f2840ad80596b529488f40808e10c70782f498 100644 (file)
@@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
 }
 
 struct sample {
-       ktime_t start_time;
-       ktime_t end_time;
        int core_pct_busy;
-       int pstate_pct_busy;
-       u64 duration_us;
-       u64 idletime_us;
        u64 aperf;
        u64 mperf;
        int freq;
@@ -86,13 +81,9 @@ struct cpudata {
        struct pstate_adjust_policy *pstate_policy;
        struct pstate_data pstate;
        struct _pid pid;
-       struct _pid idle_pid;
 
        int min_pstate_count;
-       int idle_mode;
 
-       ktime_t prev_sample;
-       u64     prev_idle_time_us;
        u64     prev_aperf;
        u64     prev_mperf;
        int     sample_ptr;
@@ -124,6 +115,8 @@ struct perf_limits {
        int min_perf_pct;
        int32_t max_perf;
        int32_t min_perf;
+       int max_policy_pct;
+       int max_sysfs_pct;
 };
 
 static struct perf_limits limits = {
@@ -132,6 +125,8 @@ static struct perf_limits limits = {
        .max_perf = int_tofp(1),
        .min_perf_pct = 0,
        .min_perf = 0,
+       .max_policy_pct = 100,
+       .max_sysfs_pct = 100,
 };
 
 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
@@ -202,19 +197,6 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
                0);
 }
 
-static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu)
-{
-       pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct);
-       pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct);
-       pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct);
-
-       pid_reset(&cpu->idle_pid,
-               75,
-               50,
-               cpu->pstate_policy->deadband,
-               0);
-}
-
 static inline void intel_pstate_reset_all_pid(void)
 {
        unsigned int cpu;
@@ -302,7 +284,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
        if (ret != 1)
                return -EINVAL;
 
-       limits.max_perf_pct = clamp_t(int, input, 0 , 100);
+       limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
+       limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
        limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
        return count;
 }
@@ -408,9 +391,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
        if (pstate == cpu->pstate.current_pstate)
                return;
 
-#ifndef MODULE
        trace_cpu_frequency(pstate * 100000, cpu->cpu);
-#endif
+
        cpu->pstate.current_pstate = pstate;
        wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
 
@@ -450,48 +432,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
                                        struct sample *sample)
 {
        u64 core_pct;
-       sample->pstate_pct_busy = 100 - div64_u64(
-                                       sample->idletime_us * 100,
-                                       sample->duration_us);
        core_pct = div64_u64(sample->aperf * 100, sample->mperf);
        sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
 
-       sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
-                                       100);
+       sample->core_pct_busy = core_pct;
 }
 
 static inline void intel_pstate_sample(struct cpudata *cpu)
 {
-       ktime_t now;
-       u64 idle_time_us;
        u64 aperf, mperf;
 
-       now = ktime_get();
-       idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
-
        rdmsrl(MSR_IA32_APERF, aperf);
        rdmsrl(MSR_IA32_MPERF, mperf);
-       /* for the first sample, don't actually record a sample, just
-        * set the baseline */
-       if (cpu->prev_idle_time_us > 0) {
-               cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
-               cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample;
-               cpu->samples[cpu->sample_ptr].end_time = now;
-               cpu->samples[cpu->sample_ptr].duration_us =
-                       ktime_us_delta(now, cpu->prev_sample);
-               cpu->samples[cpu->sample_ptr].idletime_us =
-                       idle_time_us - cpu->prev_idle_time_us;
-
-               cpu->samples[cpu->sample_ptr].aperf = aperf;
-               cpu->samples[cpu->sample_ptr].mperf = mperf;
-               cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
-               cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
-
-               intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
-       }
+       cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
+       cpu->samples[cpu->sample_ptr].aperf = aperf;
+       cpu->samples[cpu->sample_ptr].mperf = mperf;
+       cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
+       cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+
+       intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
 
-       cpu->prev_sample = now;
-       cpu->prev_idle_time_us = idle_time_us;
        cpu->prev_aperf = aperf;
        cpu->prev_mperf = mperf;
 }
@@ -505,16 +465,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
        mod_timer_pinned(&cpu->timer, jiffies + delay);
 }
 
-static inline void intel_pstate_idle_mode(struct cpudata *cpu)
-{
-       cpu->idle_mode = 1;
-}
-
-static inline void intel_pstate_normal_mode(struct cpudata *cpu)
-{
-       cpu->idle_mode = 0;
-}
-
 static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
 {
        int32_t busy_scaled;
@@ -547,50 +497,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
                intel_pstate_pstate_decrease(cpu, steps);
 }
 
-static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu)
-{
-       int busy_scaled;
-       struct _pid *pid;
-       int ctl = 0;
-       int steps;
-
-       pid = &cpu->idle_pid;
-
-       busy_scaled = intel_pstate_get_scaled_busy(cpu);
-
-       ctl = pid_calc(pid, 100 - busy_scaled);
-
-       steps = abs(ctl);
-       if (ctl < 0)
-               intel_pstate_pstate_decrease(cpu, steps);
-       else
-               intel_pstate_pstate_increase(cpu, steps);
-
-       if (cpu->pstate.current_pstate == cpu->pstate.min_pstate)
-               intel_pstate_normal_mode(cpu);
-}
-
 static void intel_pstate_timer_func(unsigned long __data)
 {
        struct cpudata *cpu = (struct cpudata *) __data;
 
        intel_pstate_sample(cpu);
+       intel_pstate_adjust_busy_pstate(cpu);
 
-       if (!cpu->idle_mode)
-               intel_pstate_adjust_busy_pstate(cpu);
-       else
-               intel_pstate_adjust_idle_pstate(cpu);
-
-#if defined(XPERF_FIX)
        if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
                cpu->min_pstate_count++;
                if (!(cpu->min_pstate_count % 5)) {
                        intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
-                       intel_pstate_idle_mode(cpu);
                }
        } else
                cpu->min_pstate_count = 0;
-#endif
+
        intel_pstate_set_sample_time(cpu);
 }
 
@@ -600,6 +521,7 @@ static void intel_pstate_timer_func(unsigned long __data)
 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(0x2a, default_policy),
        ICPU(0x2d, default_policy),
+       ICPU(0x3a, default_policy),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -631,7 +553,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
                (unsigned long)cpu;
        cpu->timer.expires = jiffies + HZ/100;
        intel_pstate_busy_pid_reset(cpu);
-       intel_pstate_idle_pid_reset(cpu);
        intel_pstate_sample(cpu);
        intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
 
@@ -675,8 +596,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
        limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
 
-       limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
-       limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
+       limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
+       limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
+       limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
        limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
 
        return 0;
@@ -788,10 +710,9 @@ static int __init intel_pstate_init(void)
 
        pr_info("Intel P-state driver initializing.\n");
 
-       all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());
+       all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
        if (!all_cpu_data)
                return -ENOMEM;
-       memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus());
 
        rc = cpufreq_register_driver(&intel_pstate_driver);
        if (rc)
index d36ea8dc96eb9fdc0812e0e93512f8f97cda66f8..b2644af985ec8e741daf009262069694c671513b 100644 (file)
@@ -171,10 +171,6 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
        priv.dev = &pdev->dev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Cannot get memory resource\n");
-               return -ENODEV;
-       }
        priv.base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv.base))
                return PTR_ERR(priv.base);
index 84889573b5669f22e1632108c5f32b94285d5814..d53912768946b96c1c27b017dbaeb7a10e7c2feb 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/platform_device.h>
 
 #include <asm/clock.h>
+#include <asm/idle.h>
 
 #include <asm/mach-loongson/loongson.h>
 
@@ -200,6 +201,7 @@ static void loongson2_cpu_wait(void)
        LOONGSON_CHIPCFG0 &= ~0x7;      /* Put CPU into wait mode */
        LOONGSON_CHIPCFG0 = cpu_freq;   /* Restore CPU state */
        spin_unlock_irqrestore(&loongson2_wait_lock, flags);
+       local_irq_enable();
 }
 
 static int __init cpufreq_init(void)
index 765fdf5ce579bafa81eb79e67f8341f424e22fe3..bf416a8391a77ec94dc3686d773e5afb7b437b3d 100644 (file)
@@ -1154,7 +1154,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
                dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
 
        sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
-                                DMA_BIDIRECTIONAL, assoc_chained);
+                                DMA_TO_DEVICE, assoc_chained);
        if (likely(req->src == req->dst)) {
                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
                                         DMA_BIDIRECTIONAL, src_chained);
@@ -1336,7 +1336,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
                dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
 
        sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
-                                DMA_BIDIRECTIONAL, assoc_chained);
+                                DMA_TO_DEVICE, assoc_chained);
        if (likely(req->src == req->dst)) {
                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
                                         DMA_BIDIRECTIONAL, src_chained);
index a76d4c4f29f50798ed3ecafaa871d98a7d02c7e3..35d483f8db66b2a3ecb204181ffd0d43b0aaaffe 100644 (file)
@@ -126,6 +126,7 @@ struct crypto_alg nx_cbc_aes_alg = {
        .cra_blocksize   = AES_BLOCK_SIZE,
        .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
        .cra_type        = &crypto_blkcipher_type,
+       .cra_alignmask   = 0xf,
        .cra_module      = THIS_MODULE,
        .cra_init        = nx_crypto_ctx_aes_cbc_init,
        .cra_exit        = nx_crypto_ctx_exit,
index ba5f1611336fe9fa5165cfa680e7acc2b7e38637..7bbc9a81da219e5c27e021c9ef6c880d194c90d3 100644 (file)
@@ -123,6 +123,7 @@ struct crypto_alg nx_ecb_aes_alg = {
        .cra_priority    = 300,
        .cra_flags       = CRYPTO_ALG_TYPE_BLKCIPHER,
        .cra_blocksize   = AES_BLOCK_SIZE,
+       .cra_alignmask   = 0xf,
        .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
        .cra_type        = &crypto_blkcipher_type,
        .cra_module      = THIS_MODULE,
index c8109edc5cfb02063c1bd6c5914985846bccac2a..6cca6c392b00f34fa65ad4663b1725bd7b3842fe 100644 (file)
@@ -219,7 +219,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
        if (enc)
                NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
        else
-               nbytes -= AES_BLOCK_SIZE;
+               nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
 
        csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
 
index 9767315f8c0bd069a451f1a2c74bbf2f2bff628d..67024f2f0b78746bdbfcb8c5ffde595754a0b155 100644 (file)
@@ -69,7 +69,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
         *  1: <= SHA256_BLOCK_SIZE: copy into state, return 0
         *  2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
         */
-       if (len + sctx->count <= SHA256_BLOCK_SIZE) {
+       if (len + sctx->count < SHA256_BLOCK_SIZE) {
                memcpy(sctx->buf + sctx->count, data, len);
                sctx->count += len;
                goto out;
@@ -110,7 +110,8 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        atomic_inc(&(nx_ctx->stats->sha256_ops));
 
        /* copy the leftover back into the state struct */
-       memcpy(sctx->buf, data + len - leftover, leftover);
+       if (leftover)
+               memcpy(sctx->buf, data + len - leftover, leftover);
        sctx->count = leftover;
 
        csbcpb->cpb.sha256.message_bit_length += (u64)
@@ -130,6 +131,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
        struct nx_sg *in_sg, *out_sg;
        int rc;
 
+
        if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
                /* we've hit the nx chip previously, now we're finalizing,
                 * so copy over the partial digest */
@@ -162,7 +164,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
 
        atomic_inc(&(nx_ctx->stats->sha256_ops));
 
-       atomic64_add(csbcpb->cpb.sha256.message_bit_length,
+       atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8,
                     &(nx_ctx->stats->sha256_bytes));
        memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
 out:
index 3177b8c3d5f1e3073ef559d902fe3cd488e8360b..08eee11223490c7a1a249a2f559f960e72e7af2c 100644 (file)
@@ -69,7 +69,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
         *  1: <= SHA512_BLOCK_SIZE: copy into state, return 0
         *  2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
         */
-       if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) {
+       if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) {
                memcpy(sctx->buf + sctx->count[0], data, len);
                sctx->count[0] += len;
                goto out;
@@ -110,7 +110,8 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        atomic_inc(&(nx_ctx->stats->sha512_ops));
 
        /* copy the leftover back into the state struct */
-       memcpy(sctx->buf, data + len - leftover, leftover);
+       if (leftover)
+               memcpy(sctx->buf, data + len - leftover, leftover);
        sctx->count[0] = leftover;
 
        spbc_bits = csbcpb->cpb.sha512.spbc * 8;
@@ -168,7 +169,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
                goto out;
 
        atomic_inc(&(nx_ctx->stats->sha512_ops));
-       atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo,
+       atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8,
                     &(nx_ctx->stats->sha512_bytes));
 
        memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
index c767f232e6933bc876adedd56a1f7e655f93c815..bbdab6e5ccf08f75fcc23481dbb9d29a1df6cd69 100644 (file)
@@ -211,44 +211,20 @@ int nx_build_sg_lists(struct nx_crypto_ctx  *nx_ctx,
 {
        struct nx_sg *nx_insg = nx_ctx->in_sg;
        struct nx_sg *nx_outsg = nx_ctx->out_sg;
-       struct blkcipher_walk walk;
-       int rc;
-
-       blkcipher_walk_init(&walk, dst, src, nbytes);
-       rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
-       if (rc)
-               goto out;
 
        if (iv)
-               memcpy(iv, walk.iv, AES_BLOCK_SIZE);
+               memcpy(iv, desc->info, AES_BLOCK_SIZE);
 
-       while (walk.nbytes) {
-               nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
-                                          walk.nbytes, nx_ctx->ap->sglen);
-               nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
-                                           walk.nbytes, nx_ctx->ap->sglen);
-
-               rc = blkcipher_walk_done(desc, &walk, 0);
-               if (rc)
-                       break;
-       }
-
-       if (walk.nbytes) {
-               nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
-                                          walk.nbytes, nx_ctx->ap->sglen);
-               nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
-                                           walk.nbytes, nx_ctx->ap->sglen);
-
-               rc = 0;
-       }
+       nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes);
+       nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes);
 
        /* these lengths should be negative, which will indicate to phyp that
         * the input and output parameters are scatterlists, not linear
         * buffers */
        nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
        nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
-out:
-       return rc;
+
+       return 0;
 }
 
 /**
@@ -454,6 +430,8 @@ static int nx_register_algs(void)
        if (rc)
                goto out;
 
+       nx_driver.of.status = NX_OKAY;
+
        rc = crypto_register_alg(&nx_ecb_aes_alg);
        if (rc)
                goto out;
@@ -498,8 +476,6 @@ static int nx_register_algs(void)
        if (rc)
                goto out_unreg_s512;
 
-       nx_driver.of.status = NX_OKAY;
-
        goto out;
 
 out_unreg_s512:
index ba6fc62e965163bca45cd55d148188812e08374a..5a18f82f732af57a319628190713e6bd054cf8b3 100644 (file)
@@ -4,7 +4,8 @@
  * Based on of-dma.c
  *
  * Copyright (C) 2013, Intel Corporation
- * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *         Mika Westerberg <mika.westerberg@linux.intel.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
+#include <linux/ioport.h>
 #include <linux/acpi.h>
 #include <linux/acpi_dma.h>
 
 static LIST_HEAD(acpi_dma_list);
 static DEFINE_MUTEX(acpi_dma_lock);
 
+/**
+ * acpi_dma_parse_resource_group - match device and parse resource group
+ * @grp:       CSRT resource group
+ * @adev:      ACPI device to match with
+ * @adma:      struct acpi_dma of the given DMA controller
+ *
+ * Returns 1 on success, 0 when no information is available, or appropriate
+ * errno value on error.
+ *
+ * In order to match a device from DSDT table to the corresponding CSRT device
+ * we use MMIO address and IRQ.
+ */
+static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
+               struct acpi_device *adev, struct acpi_dma *adma)
+{
+       const struct acpi_csrt_shared_info *si;
+       struct list_head resource_list;
+       struct resource_list_entry *rentry;
+       resource_size_t mem = 0, irq = 0;
+       u32 vendor_id;
+       int ret;
+
+       if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
+               return -ENODEV;
+
+       INIT_LIST_HEAD(&resource_list);
+       ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+       if (ret <= 0)
+               return 0;
+
+       list_for_each_entry(rentry, &resource_list, node) {
+               if (resource_type(&rentry->res) == IORESOURCE_MEM)
+                       mem = rentry->res.start;
+               else if (resource_type(&rentry->res) == IORESOURCE_IRQ)
+                       irq = rentry->res.start;
+       }
+
+       acpi_dev_free_resource_list(&resource_list);
+
+       /* Consider initial zero values as resource not found */
+       if (mem == 0 && irq == 0)
+               return 0;
+
+       si = (const struct acpi_csrt_shared_info *)&grp[1];
+
+       /* Match device by MMIO and IRQ */
+       if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
+               return 0;
+
+       vendor_id = le32_to_cpu(grp->vendor_id);
+       dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
+               (char *)&vendor_id, grp->device_id, grp->revision);
+
+       /* Check if the request line range is available */
+       if (si->base_request_line == 0 && si->num_handshake_signals == 0)
+               return 0;
+
+       adma->base_request_line = si->base_request_line;
+       adma->end_request_line = si->base_request_line +
+                                si->num_handshake_signals - 1;
+
+       dev_dbg(&adev->dev, "request line base: 0x%04x end: 0x%04x\n",
+               adma->base_request_line, adma->end_request_line);
+
+       return 1;
+}
+
+/**
+ * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources
+ * @adev:      ACPI device to match with
+ * @adma:      struct acpi_dma of the given DMA controller
+ *
+ * CSRT or Core System Resources Table is a proprietary ACPI table
+ * introduced by Microsoft. This table can contain devices that are not in
+ * the system DSDT table. In particular DMA controllers might be described
+ * here.
+ *
+ * We are using this table to get the request line range of the specific DMA
+ * controller to be used later.
+ *
+ */
+static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
+{
+       struct acpi_csrt_group *grp, *end;
+       struct acpi_table_csrt *csrt;
+       acpi_status status;
+       int ret;
+
+       status = acpi_get_table(ACPI_SIG_CSRT, 0,
+                               (struct acpi_table_header **)&csrt);
+       if (ACPI_FAILURE(status)) {
+               if (status != AE_NOT_FOUND)
+                       dev_warn(&adev->dev, "failed to get the CSRT table\n");
+               return;
+       }
+
+       grp = (struct acpi_csrt_group *)(csrt + 1);
+       end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
+
+       while (grp < end) {
+               ret = acpi_dma_parse_resource_group(grp, adev, adma);
+               if (ret < 0) {
+                       dev_warn(&adev->dev,
+                                "error in parsing resource group\n");
+                       return;
+               }
+
+               grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
+       }
+}
+
 /**
  * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers
  * @dev:               struct device of DMA controller
@@ -61,6 +174,8 @@ int acpi_dma_controller_register(struct device *dev,
        adma->acpi_dma_xlate = acpi_dma_xlate;
        adma->data = data;
 
+       acpi_dma_parse_csrt(adev, adma);
+
        /* Now queue acpi_dma controller structure in list */
        mutex_lock(&acpi_dma_lock);
        list_add_tail(&adma->dma_controllers, &acpi_dma_list);
@@ -149,6 +264,45 @@ void devm_acpi_dma_controller_free(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
 
+/**
+ * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function
+ * @adma:      struct acpi_dma of DMA controller
+ * @dma_spec:  dma specifier to update
+ *
+ * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
+ *
+ * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
+ * Descriptor":
+ *     DMA Request Line bits is a platform-relative number uniquely
+ *     identifying the request line assigned. Request line-to-Controller
+ *     mapping is done in a controller-specific OS driver.
+ * That's why we can safely adjust slave_id when the appropriate controller is
+ * found.
+ */
+static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
+               struct acpi_dma_spec *dma_spec)
+{
+       /* Set link to the DMA controller device */
+       dma_spec->dev = adma->dev;
+
+       /* Check if the request line range is available */
+       if (adma->base_request_line == 0 && adma->end_request_line == 0)
+               return 0;
+
+       /* Check if slave_id falls to the range */
+       if (dma_spec->slave_id < adma->base_request_line ||
+           dma_spec->slave_id > adma->end_request_line)
+               return -1;
+
+       /*
+        * Here we adjust slave_id. It should be a relative number to the base
+        * request line.
+        */
+       dma_spec->slave_id -= adma->base_request_line;
+
+       return 1;
+}
+
 struct acpi_dma_parser_data {
        struct acpi_dma_spec dma_spec;
        size_t index;
@@ -193,6 +347,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
        struct acpi_device *adev;
        struct acpi_dma *adma;
        struct dma_chan *chan = NULL;
+       int found;
 
        /* Check if the device was enumerated by ACPI */
        if (!dev || !ACPI_HANDLE(dev))
@@ -219,9 +374,20 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
        mutex_lock(&acpi_dma_lock);
 
        list_for_each_entry(adma, &acpi_dma_list, dma_controllers) {
-               dma_spec->dev = adma->dev;
+               /*
+                * We are not going to call translation function if slave_id
+                * doesn't fall to the request range.
+                */
+               found = acpi_dma_update_dma_spec(adma, dma_spec);
+               if (found < 0)
+                       continue;
                chan = adma->acpi_dma_xlate(dma_spec, adma);
-               if (chan)
+               /*
+                * Try to get a channel only from the DMA controller that
+                * matches the slave_id. See acpi_dma_update_dma_spec()
+                * description for the details.
+                */
+               if (found > 0 || chan)
                        break;
        }
 
index ce193409ebd32345e997dad798625c41565b438e..33f59ecd256e1487ce3dca17cd5f0822e2308c34 100644 (file)
@@ -1273,11 +1273,6 @@ static int tegra_dma_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, tdma);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "No mem resource for DMA\n");
-               return -EINVAL;
-       }
-
        tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(tdma->base_addr))
                return PTR_ERR(tdma->base_addr);
index 8c171fa1cb9bc65f5531e178bd45cb9a0ef0c9ad..845f04786c2de4baedf0952d2ec2b5a830e62602 100644 (file)
@@ -202,9 +202,9 @@ static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
                   amd64_inject_word_show, amd64_inject_word_store);
 static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
                   amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store);
-static DEVICE_ATTR(inject_write, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(inject_write, S_IWUSR,
                   NULL, amd64_inject_write_store);
-static DEVICE_ATTR(inject_read, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(inject_read,  S_IWUSR,
                   NULL, amd64_inject_read_store);
 
 
index b623c599e572975fd30db879da54194ef025dcf7..8bd1bb6dbe4739cf0914a129a12363c1bf157820 100644 (file)
@@ -523,13 +523,11 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
        struct efivar_entry *entry;
        int err;
 
-       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-       if (!entry)
-               return;
-
        /* Add new sysfs entries */
        while (1) {
-               memset(entry, 0, sizeof(*entry));
+               entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+               if (!entry)
+                       return;
 
                err = efivar_init(efivar_update_sysfs_entry, entry,
                                  true, false, &efivar_sysfs_list);
index 87d567089f13653297ddc001ec0fda8746254465..573c449c49b9138a69f8aaf9a999e8a41898d2e1 100644 (file)
@@ -636,7 +636,7 @@ config GPIO_MAX7301
 
 config GPIO_MCP23S08
        tristate "Microchip MCP23xxx I/O expander"
-       depends on SPI_MASTER || I2C
+       depends on (SPI_MASTER && !I2C) || I2C
        help
          SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
          I/O expanders.
index 634c3d37f7b5e010fe315e0734c4205ab4abce58..62ef10a641c4250b273263c78e51bd2534ed632a 100644 (file)
@@ -324,6 +324,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
        resource_size_t start, len;
        struct lnw_gpio *lnw;
        u32 gpio_base;
+       u32 irq_base;
        int retval;
        int ngpio = id->driver_data;
 
@@ -345,6 +346,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
                retval = -EFAULT;
                goto err_ioremap;
        }
+       irq_base = *(u32 *)base;
        gpio_base = *((u32 *)base + 1);
        /* release the IO mapping, since we already get the info from bar1 */
        iounmap(base);
@@ -365,13 +367,6 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
                goto err_ioremap;
        }
 
-       lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
-                                           &lnw_gpio_irq_ops, lnw);
-       if (!lnw->domain) {
-               retval = -ENOMEM;
-               goto err_ioremap;
-       }
-
        lnw->reg_base = base;
        lnw->chip.label = dev_name(&pdev->dev);
        lnw->chip.request = lnw_gpio_request;
@@ -384,6 +379,14 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
        lnw->chip.ngpio = ngpio;
        lnw->chip.can_sleep = 0;
        lnw->pdev = pdev;
+
+       lnw->domain = irq_domain_add_simple(pdev->dev.of_node, ngpio, irq_base,
+                                           &lnw_gpio_irq_ops, lnw);
+       if (!lnw->domain) {
+               retval = -ENOMEM;
+               goto err_ioremap;
+       }
+
        pci_set_drvdata(pdev, lnw);
        retval = gpiochip_add(&lnw->chip);
        if (retval) {
index b73366523faebf4db81c3fbb020365f8afd75123..0966f2637ad2dc849f12d7224dbf7d6af1c75c13 100644 (file)
@@ -496,8 +496,7 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
 err_gpiochip_add:
        while (--i >= 0) {
                chip--;
-               ret = gpiochip_remove(&chip->gpio);
-               if (ret)
+               if (gpiochip_remove(&chip->gpio))
                        dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i);
        }
        kfree(chip_save);
index bf69a7eff370cbabc72ba01591727ba20c81338c..3a4816adc137de912b51fca601c50d65f8baea19 100644 (file)
@@ -619,11 +619,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
         * per-CPU registers */
        if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               if (!res) {
-                       dev_err(&pdev->dev, "Cannot get memory resource\n");
-                       return -ENODEV;
-               }
-
                mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev,
                                                               res);
                if (IS_ERR(mvchip->percpu_membase))
index 25000b0f84532645ad7eab5d85278dfdd9b2557b..f8e6af20dfbf1d0efc5fce0060b6ace3792cb1ad 100644 (file)
@@ -326,7 +326,8 @@ static int mxs_gpio_probe(struct platform_device *pdev)
 
        err = bgpio_init(&port->bgc, &pdev->dev, 4,
                         port->base + PINCTRL_DIN(port),
-                        port->base + PINCTRL_DOUT(port), NULL,
+                        port->base + PINCTRL_DOUT(port) + MXS_SET,
+                        port->base + PINCTRL_DOUT(port) + MXS_CLR,
                         port->base + PINCTRL_DOE(port), NULL, 0);
        if (err)
                goto out_irqdesc_free;
index 2050891d9c65a4936533fb14a4b2d8b95b379cb5..d3f7d2db870f985253a603f3f53f2b5475ee97f6 100644 (file)
@@ -69,6 +69,7 @@ struct gpio_bank {
        bool is_mpuio;
        bool dbck_flag;
        bool loses_context;
+       bool context_valid;
        int stride;
        u32 width;
        int context_loss_count;
@@ -1128,6 +1129,10 @@ static int omap_gpio_probe(struct platform_device *pdev)
                        bank->loses_context = true;
        } else {
                bank->loses_context = pdata->loses_context;
+
+               if (bank->loses_context)
+                       bank->get_context_loss_count =
+                               pdata->get_context_loss_count;
        }
 
 
@@ -1178,9 +1183,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
        omap_gpio_chip_init(bank);
        omap_gpio_show_rev(bank);
 
-       if (bank->loses_context)
-               bank->get_context_loss_count = pdata->get_context_loss_count;
-
        pm_runtime_put(bank->dev);
 
        list_add_tail(&bank->node, &omap_gpio_list);
@@ -1259,6 +1261,8 @@ static int omap_gpio_runtime_suspend(struct device *dev)
        return 0;
 }
 
+static void omap_gpio_init_context(struct gpio_bank *p);
+
 static int omap_gpio_runtime_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -1268,6 +1272,20 @@ static int omap_gpio_runtime_resume(struct device *dev)
        int c;
 
        spin_lock_irqsave(&bank->lock, flags);
+
+       /*
+        * On the first resume during the probe, the context has not
+        * been initialised and so initialise it now. Also initialise
+        * the context loss count.
+        */
+       if (bank->loses_context && !bank->context_valid) {
+               omap_gpio_init_context(bank);
+
+               if (bank->get_context_loss_count)
+                       bank->context_loss_count =
+                               bank->get_context_loss_count(bank->dev);
+       }
+
        _gpio_dbck_enable(bank);
 
        /*
@@ -1384,6 +1402,29 @@ void omap2_gpio_resume_after_idle(void)
 }
 
 #if defined(CONFIG_PM_RUNTIME)
+static void omap_gpio_init_context(struct gpio_bank *p)
+{
+       struct omap_gpio_reg_offs *regs = p->regs;
+       void __iomem *base = p->base;
+
+       p->context.ctrl         = __raw_readl(base + regs->ctrl);
+       p->context.oe           = __raw_readl(base + regs->direction);
+       p->context.wake_en      = __raw_readl(base + regs->wkup_en);
+       p->context.leveldetect0 = __raw_readl(base + regs->leveldetect0);
+       p->context.leveldetect1 = __raw_readl(base + regs->leveldetect1);
+       p->context.risingdetect = __raw_readl(base + regs->risingdetect);
+       p->context.fallingdetect = __raw_readl(base + regs->fallingdetect);
+       p->context.irqenable1   = __raw_readl(base + regs->irqenable);
+       p->context.irqenable2   = __raw_readl(base + regs->irqenable2);
+
+       if (regs->set_dataout && p->regs->clr_dataout)
+               p->context.dataout = __raw_readl(base + regs->set_dataout);
+       else
+               p->context.dataout = __raw_readl(base + regs->dataout);
+
+       p->context_valid = true;
+}
+
 static void omap_gpio_restore_context(struct gpio_bank *bank)
 {
        __raw_writel(bank->context.wake_en,
@@ -1421,6 +1462,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
 #else
 #define omap_gpio_runtime_suspend NULL
 #define omap_gpio_runtime_resume NULL
+static void omap_gpio_init_context(struct gpio_bank *p) {}
 #endif
 
 static const struct dev_pm_ops gpio_pm_ops = {
index cdf599687cf7b1e4704fa213cf77961bb8f66cfd..0fec097e838df23c264b62405b2cf10bd0531bae 100644 (file)
@@ -424,8 +424,7 @@ static int pch_gpio_probe(struct pci_dev *pdev,
 err_request_irq:
        irq_free_descs(irq_base, gpio_pins[chip->ioh]);
 
-       ret = gpiochip_remove(&chip->gpio);
-       if (ret)
+       if (gpiochip_remove(&chip->gpio))
                dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
 
 err_gpiochip_add:
index 1e4de16ceb419e148c9c233aef0746143530efca..5af65719b95dafad605fb8f1f733ead623b78bf5 100644 (file)
@@ -272,10 +272,8 @@ static int sch_gpio_probe(struct platform_device *pdev)
        return 0;
 
 err_sch_gpio_resume:
-       err = gpiochip_remove(&sch_gpio_core);
-       if (err)
-               dev_err(&pdev->dev, "%s failed, %d\n",
-                               "gpiochip_remove()", err);
+       if (gpiochip_remove(&sch_gpio_core))
+               dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
 
 err_sch_gpio_core:
        release_region(res->start, resource_size(res));
index da4cb5b0cb87612fed241b9a4ebc6245716377cd..9a62672f1bed97185b26132966dd0ddcb50534f3 100644 (file)
@@ -463,11 +463,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Missing MEM resource\n");
-               return -ENODEV;
-       }
-
        regs = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(regs))
                return PTR_ERR(regs);
index 095ab14cea4d16d672e009fb40d6929a2e5d16e0..5ac2919197fe70775f8d17fda5a6dafaab9af677 100644 (file)
@@ -446,7 +446,8 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
        return ret;
 
 err_gpiob:
-       ret = gpiochip_remove(&vb_gpio->gpioa);
+       if (gpiochip_remove(&vb_gpio->gpioa))
+               dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
 
 err_gpioa:
        return ret;
index 3a8f7e6db2950fc175759e558c7663ae84d3702f..e7e92429d10f9e36d80d48e5f8a0004e4eb0284e 100644 (file)
@@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
 {
        struct drm_crtc *crtc;
 
+       /* Locking is currently fubar in the panic handler. */
+       if (oops_in_progress)
+               return;
+
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
                WARN_ON(!mutex_is_locked(&crtc->mutex));
 
@@ -246,6 +250,7 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
        else
                return "unknown";
 }
+EXPORT_SYMBOL(drm_get_connector_status_name);
 
 /**
  * drm_mode_object_get - allocate a new modeset identifier
index e974f9309b72697d3c9918ab979c8469d548c25d..ed1334e27c33283442fceb604892e55666bdfe2c 100644 (file)
@@ -121,6 +121,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
                connector->helper_private;
        int count = 0;
        int mode_flags = 0;
+       bool verbose_prune = true;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
                        drm_get_connector_name(connector));
@@ -149,6 +150,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
                DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
                        connector->base.id, drm_get_connector_name(connector));
                drm_mode_connector_update_edid_property(connector, NULL);
+               verbose_prune = false;
                goto prune;
        }
 
@@ -182,7 +184,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
        }
 
 prune:
-       drm_mode_prune_invalid(dev, &connector->modes, true);
+       drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
 
        if (list_empty(&connector->modes))
                return 0;
@@ -1005,12 +1007,20 @@ static void output_poll_execute(struct work_struct *work)
                        continue;
 
                connector->status = connector->funcs->detect(connector, false);
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
-                             connector->base.id,
-                             drm_get_connector_name(connector),
-                             old_status, connector->status);
-               if (old_status != connector->status)
+               if (old_status != connector->status) {
+                       const char *old, *new;
+
+                       old = drm_get_connector_status_name(old_status);
+                       new = drm_get_connector_status_name(connector->status);
+
+                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
+                                     "status updated from %s to %s\n",
+                                     connector->base.id,
+                                     drm_get_connector_name(connector),
+                                     old, new);
+
                        changed = true;
+               }
        }
 
        mutex_unlock(&dev->mode_config.mutex);
@@ -1083,10 +1093,11 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
                old_status = connector->status;
 
                connector->status = connector->funcs->detect(connector, false);
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
                              connector->base.id,
                              drm_get_connector_name(connector),
-                             old_status, connector->status);
+                             drm_get_connector_status_name(old_status),
+                             drm_get_connector_status_name(connector->status));
                if (old_status != connector->status)
                        changed = true;
        }
index 8d4f29075af5bd24f0b03461cac4d35c511530dd..9cc247f555028f41046cbacd57a6b31cdf8f5e20 100644 (file)
@@ -57,7 +57,7 @@ static int drm_version(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
 
 #define DRM_IOCTL_DEF(ioctl, _func, _flags) \
-       [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
+       [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
 
 /** Ioctl table */
 static const struct drm_ioctl_desc drm_ioctls[] = {
@@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp,
 {
        struct drm_file *file_priv = filp->private_data;
        struct drm_device *dev;
-       const struct drm_ioctl_desc *ioctl;
+       const struct drm_ioctl_desc *ioctl = NULL;
        drm_ioctl_t *func;
        unsigned int nr = DRM_IOCTL_NR(cmd);
        int retcode = -EINVAL;
@@ -392,11 +392,6 @@ long drm_ioctl(struct file *filp,
        atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
        ++file_priv->ioctl_count;
 
-       DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
-                 task_pid_nr(current), cmd, nr,
-                 (long)old_encode_dev(file_priv->minor->device),
-                 file_priv->authenticated);
-
        if ((nr >= DRM_CORE_IOCTL_COUNT) &&
            ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
                goto err_i1;
@@ -417,6 +412,11 @@ long drm_ioctl(struct file *filp,
        } else
                goto err_i1;
 
+       DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+                 task_pid_nr(current),
+                 (long)old_encode_dev(file_priv->minor->device),
+                 file_priv->authenticated, ioctl->name);
+
        /* Do not trust userspace, use our own definition */
        func = ioctl->func;
        /* is there a local override? */
@@ -471,6 +471,12 @@ long drm_ioctl(struct file *filp,
        }
 
       err_i1:
+       if (!ioctl)
+               DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+                         task_pid_nr(current),
+                         (long)old_encode_dev(file_priv->minor->device),
+                         file_priv->authenticated, cmd, nr);
+
        if (kdata != stack_kdata)
                kfree(kdata);
        atomic_dec(&dev->ioctl_count);
index 48c52f7df4e63affac527e1f86e06188a03e2021..0cfb60f5476655edc097ca71c648d11544f3357e 100644 (file)
@@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
                         struct i2c_adapter *adap,
                         const struct i2c_board_info *info)
 {
-       char modalias[sizeof(I2C_MODULE_PREFIX)
-                     + I2C_NAME_SIZE];
        struct module *module = NULL;
        struct i2c_client *client;
        struct drm_i2c_encoder_driver *encoder_drv;
        int err = 0;
 
-       snprintf(modalias, sizeof(modalias),
-                "%s%s", I2C_MODULE_PREFIX, info->type);
-       request_module(modalias);
+       request_module("%s%s", I2C_MODULE_PREFIX, info->type);
 
        client = i2c_new_device(adap, info);
        if (!client) {
index db1e2d6f90d7221d713c04b70560d85a08ad6732..07cf99cc886283aedf9feee690d71ee6fbe2fb77 100644 (file)
@@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
 EXPORT_SYMBOL(drm_mm_debug_table);
 
 #if defined(CONFIG_DEBUG_FS)
-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
 {
-       struct drm_mm_node *entry;
-       unsigned long total_used = 0, total_free = 0, total = 0;
        unsigned long hole_start, hole_end, hole_size;
 
-       hole_start = drm_mm_hole_node_start(&mm->head_node);
-       hole_end = drm_mm_hole_node_end(&mm->head_node);
-       hole_size = hole_end - hole_start;
-       if (hole_size)
+       if (entry->hole_follows) {
+               hole_start = drm_mm_hole_node_start(entry);
+               hole_end = drm_mm_hole_node_end(entry);
+               hole_size = hole_end - hole_start;
                seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
                                hole_start, hole_end, hole_size);
-       total_free += hole_size;
+               return hole_size;
+       }
+
+       return 0;
+}
+
+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+{
+       struct drm_mm_node *entry;
+       unsigned long total_used = 0, total_free = 0, total = 0;
+
+       total_free += drm_mm_dump_hole(m, &mm->head_node);
 
        drm_mm_for_each_node(entry, mm) {
                seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
                                entry->start, entry->start + entry->size,
                                entry->size);
                total_used += entry->size;
-               if (entry->hole_follows) {
-                       hole_start = drm_mm_hole_node_start(entry);
-                       hole_end = drm_mm_hole_node_end(entry);
-                       hole_size = hole_end - hole_start;
-                       seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
-                                       hole_start, hole_end, hole_size);
-                       total_free += hole_size;
-               }
+               total_free += drm_mm_dump_hole(m, entry);
        }
        total = total_free + total_used;
 
index faa79df0264802e985719da65be37ee369a87a69..a371ff865a887755b81de57663a819429a34ed4c 100644 (file)
@@ -1143,6 +1143,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
                                was_digit = false;
                        } else
                                goto done;
+                       break;
                case '0' ... '9':
                        was_digit = true;
                        break;
index e8894bc9e6d5edf0ecf07cea5078f3961191030d..c200e4d71e3d96a84bc8913b82cde320b0787ec9 100644 (file)
@@ -48,6 +48,8 @@ struct exynos_drm_crtc {
        unsigned int                    pipe;
        unsigned int                    dpms;
        enum exynos_crtc_mode           mode;
+       wait_queue_head_t               pending_flip_queue;
+       atomic_t                        pending_flip;
 };
 
 static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -61,6 +63,13 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
                return;
        }
 
+       if (mode > DRM_MODE_DPMS_ON) {
+               /* wait for the completion of page flip. */
+               wait_event(exynos_crtc->pending_flip_queue,
+                               atomic_read(&exynos_crtc->pending_flip) == 0);
+               drm_vblank_off(crtc->dev, exynos_crtc->pipe);
+       }
+
        exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms);
        exynos_crtc->dpms = mode;
 }
@@ -217,7 +226,6 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
                ret = drm_vblank_get(dev, exynos_crtc->pipe);
                if (ret) {
                        DRM_DEBUG("failed to acquire vblank counter\n");
-                       list_del(&event->base.link);
 
                        goto out;
                }
@@ -225,6 +233,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
                spin_lock_irq(&dev->event_lock);
                list_add_tail(&event->base.link,
                                &dev_priv->pageflip_event_list);
+               atomic_set(&exynos_crtc->pending_flip, 1);
                spin_unlock_irq(&dev->event_lock);
 
                crtc->fb = fb;
@@ -344,6 +353,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
 
        exynos_crtc->pipe = nr;
        exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
+       init_waitqueue_head(&exynos_crtc->pending_flip_queue);
+       atomic_set(&exynos_crtc->pending_flip, 0);
        exynos_crtc->plane = exynos_plane_init(dev, 1 << nr, true);
        if (!exynos_crtc->plane) {
                kfree(exynos_crtc);
@@ -398,7 +409,8 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
 {
        struct exynos_drm_private *dev_priv = dev->dev_private;
        struct drm_pending_vblank_event *e, *t;
-       struct timeval now;
+       struct drm_crtc *drm_crtc = dev_priv->crtc[crtc];
+       struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
        unsigned long flags;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -411,14 +423,11 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
                if (crtc != e->pipe)
                        continue;
 
-               do_gettimeofday(&now);
-               e->event.sequence = 0;
-               e->event.tv_sec = now.tv_sec;
-               e->event.tv_usec = now.tv_usec;
-
-               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
+               list_del(&e->base.link);
+               drm_send_vblank_event(dev, -1, e);
                drm_vblank_put(dev, crtc);
+               atomic_set(&exynos_crtc->pending_flip, 0);
+               wake_up(&exynos_crtc->pending_flip_queue);
        }
 
        spin_unlock_irqrestore(&dev->event_lock, flags);
index 68f0045f86b860d7ecd4b7477a98719a950a7f95..8f007aaeffc3ea2b6e97f067e6be49b123df1eae 100644 (file)
@@ -182,7 +182,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 
        helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
                        &exynos_gem_obj->base);
-       if (IS_ERR_OR_NULL(helper->fb)) {
+       if (IS_ERR(helper->fb)) {
                DRM_ERROR("failed to create drm framebuffer.\n");
                ret = PTR_ERR(helper->fb);
                goto err_destroy_gem;
index 773f583fa9648c97ba4e5073a3a82681b50ef0af..4a1616a18ab7e9ff019ed4b6f50212789a78126a 100644 (file)
@@ -12,9 +12,9 @@
  *
  */
 #include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/pm_runtime.h>
@@ -1845,7 +1845,7 @@ static int fimc_probe(struct platform_device *pdev)
        }
 
        ctx->irq = res->start;
-       ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+       ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler,
                IRQF_ONESHOT, "drm_fimc", ctx);
        if (ret < 0) {
                dev_err(dev, "failed to request irq.\n");
@@ -1854,7 +1854,7 @@ static int fimc_probe(struct platform_device *pdev)
 
        ret = fimc_setup_clocks(ctx);
        if (ret < 0)
-               goto err_free_irq;
+               return ret;
 
        ippdrv = &ctx->ippdrv;
        ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
@@ -1884,7 +1884,7 @@ static int fimc_probe(struct platform_device *pdev)
                goto err_pm_dis;
        }
 
-       dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+       dev_info(dev, "drm fimc registered successfully.\n");
 
        return 0;
 
@@ -1892,8 +1892,6 @@ static int fimc_probe(struct platform_device *pdev)
        pm_runtime_disable(dev);
 err_put_clk:
        fimc_put_clocks(ctx);
-err_free_irq:
-       free_irq(ctx->irq, ctx);
 
        return ret;
 }
@@ -1911,8 +1909,6 @@ static int fimc_remove(struct platform_device *pdev)
        pm_runtime_set_suspended(dev);
        pm_runtime_disable(dev);
 
-       free_irq(ctx->irq, ctx);
-
        return 0;
 }
 
index 746b282b343abb0328b9c4b6aaa9b2b0e0a89e03..97c61dbffd82ee36cfe2a92dfaf5b0b618ee07cd 100644 (file)
@@ -885,7 +885,7 @@ static int fimd_probe(struct platform_device *pdev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       if (pdev->dev.of_node) {
+       if (dev->of_node) {
                pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
                if (!pdata) {
                        DRM_ERROR("memory allocation for pdata failed\n");
@@ -899,7 +899,7 @@ static int fimd_probe(struct platform_device *pdev)
                        return ret;
                }
        } else {
-               pdata = pdev->dev.platform_data;
+               pdata = dev->platform_data;
                if (!pdata) {
                        DRM_ERROR("no platform data specified\n");
                        return -EINVAL;
@@ -912,7 +912,7 @@ static int fimd_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
@@ -930,7 +930,7 @@ static int fimd_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
-       ctx->regs = devm_ioremap_resource(&pdev->dev, res);
+       ctx->regs = devm_ioremap_resource(dev, res);
        if (IS_ERR(ctx->regs))
                return PTR_ERR(ctx->regs);
 
@@ -942,7 +942,7 @@ static int fimd_probe(struct platform_device *pdev)
 
        ctx->irq = res->start;
 
-       ret = devm_request_irq(&pdev->dev, ctx->irq, fimd_irq_handler,
+       ret = devm_request_irq(dev, ctx->irq, fimd_irq_handler,
                                                        0, "drm_fimd", ctx);
        if (ret) {
                dev_err(dev, "irq request failed.\n");
index 47a493c8a71f2629b7acf0de109bcd3eefc15bfd..af75434ee4d7993aacb9d6c3884f2177937f00f6 100644 (file)
@@ -1379,7 +1379,7 @@ static int g2d_probe(struct platform_device *pdev)
        struct exynos_drm_subdrv *subdrv;
        int ret;
 
-       g2d = devm_kzalloc(&pdev->dev, sizeof(*g2d), GFP_KERNEL);
+       g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
        if (!g2d) {
                dev_err(dev, "failed to allocate driver data\n");
                return -ENOMEM;
@@ -1417,7 +1417,7 @@ static int g2d_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
-       g2d->regs = devm_ioremap_resource(&pdev->dev, res);
+       g2d->regs = devm_ioremap_resource(dev, res);
        if (IS_ERR(g2d->regs)) {
                ret = PTR_ERR(g2d->regs);
                goto err_put_clk;
@@ -1430,7 +1430,7 @@ static int g2d_probe(struct platform_device *pdev)
                goto err_put_clk;
        }
 
-       ret = devm_request_irq(&pdev->dev, g2d->irq, g2d_irq_handler, 0,
+       ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0,
                                                                "drm_g2d", g2d);
        if (ret < 0) {
                dev_err(dev, "irq request failed\n");
index 7841c3b8a20e022e94a0983c2ba09b232ab66fc4..762f40d548b76398904480f839b1741721aca2b9 100644 (file)
@@ -1704,7 +1704,7 @@ static int gsc_probe(struct platform_device *pdev)
        }
 
        ctx->irq = res->start;
-       ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+       ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler,
                IRQF_ONESHOT, "drm_gsc", ctx);
        if (ret < 0) {
                dev_err(dev, "failed to request irq.\n");
@@ -1725,7 +1725,7 @@ static int gsc_probe(struct platform_device *pdev)
        ret = gsc_init_prop_list(ippdrv);
        if (ret < 0) {
                dev_err(dev, "failed to init property list.\n");
-               goto err_get_irq;
+               return ret;
        }
 
        DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
@@ -1743,15 +1743,12 @@ static int gsc_probe(struct platform_device *pdev)
                goto err_ippdrv_register;
        }
 
-       dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+       dev_info(dev, "drm gsc registered successfully.\n");
 
        return 0;
 
 err_ippdrv_register:
-       devm_kfree(dev, ippdrv->prop_list);
        pm_runtime_disable(dev);
-err_get_irq:
-       free_irq(ctx->irq, ctx);
        return ret;
 }
 
@@ -1761,15 +1758,12 @@ static int gsc_remove(struct platform_device *pdev)
        struct gsc_context *ctx = get_gsc_context(dev);
        struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
 
-       devm_kfree(dev, ippdrv->prop_list);
        exynos_drm_ippdrv_unregister(ippdrv);
        mutex_destroy(&ctx->lock);
 
        pm_runtime_set_suspended(dev);
        pm_runtime_disable(dev);
 
-       free_irq(ctx->irq, ctx);
-
        return 0;
 }
 
index ba2f0f1aa05f41f895257c697b4926d726159e9f..437fb947e46dbce4a0b510710649eba8e5a3c6a8 100644 (file)
@@ -442,7 +442,7 @@ static int exynos_drm_hdmi_probe(struct platform_device *pdev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx) {
                DRM_LOG_KMS("failed to alloc common hdmi context.\n");
                return -ENOMEM;
index 29d2ad314490126302ed65e5cb9f79b5afd896f1..be1e884634664533ac9971ef753434a935fac1ee 100644 (file)
@@ -222,7 +222,7 @@ static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
                /* find ipp driver using idr */
                ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
                        ipp_id);
-               if (IS_ERR_OR_NULL(ippdrv)) {
+               if (IS_ERR(ippdrv)) {
                        DRM_ERROR("not found ipp%d driver.\n", ipp_id);
                        return ippdrv;
                }
@@ -388,7 +388,7 @@ static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
        DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
 
        ippdrv = ipp_find_drv_by_handle(prop_id);
-       if (IS_ERR_OR_NULL(ippdrv)) {
+       if (IS_ERR(ippdrv)) {
                DRM_ERROR("failed to get ipp driver.\n");
                return -EINVAL;
        }
@@ -492,7 +492,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
 
        /* find ipp driver using ipp id */
        ippdrv = ipp_find_driver(ctx, property);
-       if (IS_ERR_OR_NULL(ippdrv)) {
+       if (IS_ERR(ippdrv)) {
                DRM_ERROR("failed to get ipp driver.\n");
                return -EINVAL;
        }
@@ -521,19 +521,19 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
        c_node->state = IPP_STATE_IDLE;
 
        c_node->start_work = ipp_create_cmd_work();
-       if (IS_ERR_OR_NULL(c_node->start_work)) {
+       if (IS_ERR(c_node->start_work)) {
                DRM_ERROR("failed to create start work.\n");
                goto err_clear;
        }
 
        c_node->stop_work = ipp_create_cmd_work();
-       if (IS_ERR_OR_NULL(c_node->stop_work)) {
+       if (IS_ERR(c_node->stop_work)) {
                DRM_ERROR("failed to create stop work.\n");
                goto err_free_start;
        }
 
        c_node->event_work = ipp_create_event_work();
-       if (IS_ERR_OR_NULL(c_node->event_work)) {
+       if (IS_ERR(c_node->event_work)) {
                DRM_ERROR("failed to create event work.\n");
                goto err_free_stop;
        }
@@ -915,7 +915,7 @@ static int ipp_queue_buf_with_run(struct device *dev,
        DRM_DEBUG_KMS("%s\n", __func__);
 
        ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
-       if (IS_ERR_OR_NULL(ippdrv)) {
+       if (IS_ERR(ippdrv)) {
                DRM_ERROR("failed to get ipp driver.\n");
                return -EFAULT;
        }
@@ -1909,7 +1909,7 @@ static int ipp_probe(struct platform_device *pdev)
        struct exynos_drm_subdrv *subdrv;
        int ret;
 
-       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
@@ -1963,7 +1963,7 @@ static int ipp_probe(struct platform_device *pdev)
                goto err_cmd_workq;
        }
 
-       dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+       dev_info(dev, "drm ipp registered successfully.\n");
 
        return 0;
 
index 947f09f15ad1abe53179b77d330db5602a936dc5..9b6c70964d71c969106ed41f5f849979e2094601 100644 (file)
@@ -666,8 +666,8 @@ static int rotator_probe(struct platform_device *pdev)
                return rot->irq;
        }
 
-       ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
-                       IRQF_ONESHOT, "drm_rotator", rot);
+       ret = devm_request_threaded_irq(dev, rot->irq, NULL,
+                       rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot);
        if (ret < 0) {
                dev_err(dev, "failed to request irq\n");
                return ret;
@@ -676,8 +676,7 @@ static int rotator_probe(struct platform_device *pdev)
        rot->clock = devm_clk_get(dev, "rotator");
        if (IS_ERR(rot->clock)) {
                dev_err(dev, "failed to get clock\n");
-               ret = PTR_ERR(rot->clock);
-               goto err_clk_get;
+               return PTR_ERR(rot->clock);
        }
 
        pm_runtime_enable(dev);
@@ -709,10 +708,7 @@ static int rotator_probe(struct platform_device *pdev)
        return 0;
 
 err_ippdrv_register:
-       devm_kfree(dev, ippdrv->prop_list);
        pm_runtime_disable(dev);
-err_clk_get:
-       free_irq(rot->irq, rot);
        return ret;
 }
 
@@ -722,13 +718,10 @@ static int rotator_remove(struct platform_device *pdev)
        struct rot_context *rot = dev_get_drvdata(dev);
        struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
 
-       devm_kfree(dev, ippdrv->prop_list);
        exynos_drm_ippdrv_unregister(ippdrv);
 
        pm_runtime_disable(dev);
 
-       free_irq(rot->irq, rot);
-
        return 0;
 }
 
index 9504b0cd825a4dc9d4b81c2be674eb0897eb137d..24376c194a5ec025a1b5f300656c23bea45f50d3 100644 (file)
@@ -594,7 +594,7 @@ static int vidi_probe(struct platform_device *pdev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
@@ -612,7 +612,7 @@ static int vidi_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, ctx);
 
-       ret = device_create_file(&pdev->dev, &dev_attr_connection);
+       ret = device_create_file(dev, &dev_attr_connection);
        if (ret < 0)
                DRM_INFO("failed to create connection sysfs.\n");
 
index bbfc3840080cf03b718e76fb0aa9f3119ec4ba24..fd1426dca8824d48db62aa1bb27d62e7b4070d9f 100644 (file)
@@ -1946,14 +1946,14 @@ static int hdmi_probe(struct platform_device *pdev)
 
        DRM_DEBUG_KMS("[%d]\n", __LINE__);
 
-       if (pdev->dev.of_node) {
+       if (dev->of_node) {
                pdata = drm_hdmi_dt_parse_pdata(dev);
                if (IS_ERR(pdata)) {
                        DRM_ERROR("failed to parse dt\n");
                        return PTR_ERR(pdata);
                }
        } else {
-               pdata = pdev->dev.platform_data;
+               pdata = dev->platform_data;
        }
 
        if (!pdata) {
@@ -1961,14 +1961,14 @@ static int hdmi_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx),
+       drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
                                                                GFP_KERNEL);
        if (!drm_hdmi_ctx) {
                DRM_ERROR("failed to allocate common hdmi context.\n");
                return -ENOMEM;
        }
 
-       hdata = devm_kzalloc(&pdev->dev, sizeof(struct hdmi_context),
+       hdata = devm_kzalloc(dev, sizeof(struct hdmi_context),
                                                                GFP_KERNEL);
        if (!hdata) {
                DRM_ERROR("out of memory\n");
@@ -1985,7 +1985,7 @@ static int hdmi_probe(struct platform_device *pdev)
        if (dev->of_node) {
                const struct of_device_id *match;
                match = of_match_node(of_match_ptr(hdmi_match_types),
-                                       pdev->dev.of_node);
+                                       dev->of_node);
                if (match == NULL)
                        return -ENODEV;
                hdata->type = (enum hdmi_type)match->data;
@@ -2005,16 +2005,11 @@ static int hdmi_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               DRM_ERROR("failed to find registers\n");
-               return -ENOENT;
-       }
-
-       hdata->regs = devm_ioremap_resource(&pdev->dev, res);
+       hdata->regs = devm_ioremap_resource(dev, res);
        if (IS_ERR(hdata->regs))
                return PTR_ERR(hdata->regs);
 
-       ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
+       ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD");
        if (ret) {
                DRM_ERROR("failed to request HPD gpio\n");
                return ret;
@@ -2046,7 +2041,7 @@ static int hdmi_probe(struct platform_device *pdev)
 
        hdata->hpd = gpio_get_value(hdata->hpd_gpio);
 
-       ret = request_threaded_irq(hdata->irq, NULL,
+       ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
                        hdmi_irq_thread, IRQF_TRIGGER_RISING |
                        IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                        "hdmi", drm_hdmi_ctx);
@@ -2075,16 +2070,11 @@ static int hdmi_probe(struct platform_device *pdev)
 static int hdmi_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
-       struct hdmi_context *hdata = ctx->ctx;
 
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
        pm_runtime_disable(dev);
 
-       free_irq(hdata->irq, hdata);
-
-
        /* hdmiphy i2c driver */
        i2c_del_driver(&hdmiphy_driver);
        /* DDC i2c driver */
index ec3e376b7e01e08f82c68d26b6a27e7e5ff0b55b..7c197d3820c5583c2023831f9df5b1c7d5891f73 100644 (file)
@@ -1061,7 +1061,7 @@ static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
                return -ENXIO;
        }
 
-       mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
+       mixer_res->mixer_regs = devm_ioremap(dev, res->start,
                                                        resource_size(res));
        if (mixer_res->mixer_regs == NULL) {
                dev_err(dev, "register mapping failed.\n");
@@ -1074,7 +1074,7 @@ static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
                return -ENXIO;
        }
 
-       ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
+       ret = devm_request_irq(dev, res->start, mixer_irq_handler,
                                                        0, "drm_mixer", ctx);
        if (ret) {
                dev_err(dev, "request interrupt failed.\n");
@@ -1118,7 +1118,7 @@ static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
                return -ENXIO;
        }
 
-       mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
+       mixer_res->vp_regs = devm_ioremap(dev, res->start,
                                                        resource_size(res));
        if (mixer_res->vp_regs == NULL) {
                dev_err(dev, "register mapping failed.\n");
@@ -1169,14 +1169,14 @@ static int mixer_probe(struct platform_device *pdev)
 
        dev_info(dev, "probe start\n");
 
-       drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx),
+       drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
                                                                GFP_KERNEL);
        if (!drm_hdmi_ctx) {
                DRM_ERROR("failed to allocate common hdmi context.\n");
                return -ENOMEM;
        }
 
-       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx) {
                DRM_ERROR("failed to alloc mixer context.\n");
                return -ENOMEM;
@@ -1187,14 +1187,14 @@ static int mixer_probe(struct platform_device *pdev)
        if (dev->of_node) {
                const struct of_device_id *match;
                match = of_match_node(of_match_ptr(mixer_match_types),
-                                                         pdev->dev.of_node);
+                                                         dev->of_node);
                drv = (struct mixer_drv_data *)match->data;
        } else {
                drv = (struct mixer_drv_data *)
                        platform_get_device_id(pdev)->driver_data;
        }
 
-       ctx->dev = &pdev->dev;
+       ctx->dev = dev;
        ctx->parent_ctx = (void *)drm_hdmi_ctx;
        drm_hdmi_ctx->ctx = (void *)ctx;
        ctx->vp_enabled = drv->is_vp_enabled;
index 9ebe895c17d6b34dae006a6d392d7911b45eae8b..a2e4953b8e8d5771d6edd95bcaf7f3198674f6b4 100644 (file)
@@ -364,40 +364,64 @@ static const struct pci_device_id pciidlist[] = {         /* aka */
        INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
        INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
        INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
-       INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
+       INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
        INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
        INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
-       INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
+       INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
        INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
        INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
        INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
+       INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
+       INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
+       INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
+       INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
+       INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
+       INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
        INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
        INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
-       INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
+       INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
        INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
        INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
-       INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
+       INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
        INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
        INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
-       INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
+       INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
+       INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
+       INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
+       INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
+       INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
+       INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
+       INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
        INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
        INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
-       INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
+       INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
        INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
        INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
-       INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
+       INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
        INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
        INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
-       INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
+       INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
+       INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
+       INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
+       INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
+       INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
+       INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
+       INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
        INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
        INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
-       INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
+       INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
        INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
        INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
-       INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
+       INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
        INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
        INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
-       INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
+       INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
+       INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
+       INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
+       INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
+       INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
+       INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
+       INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
        INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
        INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
        INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
index d5dcf7fe1ee9b536bee4beb82e4684f0f86b24e2..b9d00dcf9a2d4fcb618c67af82df56ee640f72e2 100644 (file)
@@ -1943,4 +1943,19 @@ static inline void __user *to_user_ptr(u64 address)
        return (void __user *)(uintptr_t)address;
 }
 
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+       unsigned long j = msecs_to_jiffies(m);
+
+       return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+static inline unsigned long
+timespec_to_jiffies_timeout(const struct timespec *value)
+{
+       unsigned long j = timespec_to_jiffies(value);
+
+       return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
 #endif
index 6be940effefd1cf3dd69262e826c9cc999e4c26c..a6cf8e8439733480a7687a098a764af362b7b7f6 100644 (file)
@@ -1003,7 +1003,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                wait_forever = false;
        }
 
-       timeout_jiffies = timespec_to_jiffies(&wait_time);
+       timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
 
        if (WARN_ON(!ring->irq_get(ring)))
                return -ENODEV;
@@ -1045,6 +1045,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        if (timeout) {
                struct timespec sleep_time = timespec_sub(now, before);
                *timeout = timespec_sub(*timeout, sleep_time);
+               if (!timespec_valid(timeout)) /* i.e. negative time remains */
+                       set_normalized_timespec(timeout, 0, 0);
        }
 
        switch (end) {
@@ -1053,8 +1055,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        case -ERESTARTSYS: /* Signal */
                return (int)end;
        case 0: /* Timeout */
-               if (timeout)
-                       set_normalized_timespec(timeout, 0, 0);
                return -ETIME;
        default: /* Completed */
                WARN_ON(end < 0); /* We're not aware of other errors */
@@ -2377,10 +2377,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        mutex_unlock(&dev->struct_mutex);
 
        ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
-       if (timeout) {
-               WARN_ON(!timespec_valid(timeout));
+       if (timeout)
                args->timeout_ns = timespec_to_ns(timeout);
-       }
        return ret;
 
 out:
index dca614de71b6a34189bb40fa0a99cc0f60fcb6ec..bdb0d7717bc77937dce3c4de563f1e7066c7bb2e 100644 (file)
@@ -709,15 +709,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
        return snb_gmch_ctl << 25; /* 32 MB units */
 }
 
-static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
-{
-       static const int stolen_decoder[] = {
-               0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
-       snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
-       snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
-       return stolen_decoder[snb_gmch_ctl] << 20;
-}
-
 static int gen6_gmch_probe(struct drm_device *dev,
                           size_t *gtt_total,
                           size_t *stolen,
@@ -747,11 +738,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
        pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
        gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
 
-       if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
-               *stolen = gen7_get_stolen_size(snb_gmch_ctl);
-       else
-               *stolen = gen6_get_stolen_size(snb_gmch_ctl);
-
+       *stolen = gen6_get_stolen_size(snb_gmch_ctl);
        *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
 
        /* For Modern GENs the PTEs and register space are split in the BAR */
index 83f9c26e1adbf7b9451e02303545eb1b076b03fa..2d6b62e42daf324478ea64bc49e6e722330c3e2b 100644 (file)
@@ -46,8 +46,6 @@
 #define    SNB_GMCH_GGMS_MASK  0x3
 #define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
 #define    SNB_GMCH_GMS_MASK    0x1f
-#define    IVB_GMCH_GMS_SHIFT   4
-#define    IVB_GMCH_GMS_MASK    0xf
 
 
 /* PCI config space */
index 26a0a570f92e0eba28fa443b28176252a7bb577d..fb961bb81903c95550289845a846bd0308c448d7 100644 (file)
@@ -1265,6 +1265,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
                intel_dp_start_link_train(intel_dp);
                intel_dp_complete_link_train(intel_dp);
+               if (port != PORT_A)
+                       intel_dp_stop_link_train(intel_dp);
        }
 }
 
@@ -1326,6 +1328,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
        } else if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
+               if (port == PORT_A)
+                       intel_dp_stop_link_train(intel_dp);
+
                ironlake_edp_backlight_on(intel_dp);
        }
 
index efe8299197555c1ae5def2bc95bf930a4e041a8b..ad1117bebd7ee47423267eabdb0ace8cafec2381 100644 (file)
@@ -8140,6 +8140,21 @@ static void intel_set_config_restore_state(struct drm_device *dev,
        }
 }
 
+static bool
+is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
+                     int num_connectors)
+{
+       int i;
+
+       for (i = 0; i < num_connectors; i++)
+               if (connectors[i].encoder &&
+                   connectors[i].encoder->crtc == crtc &&
+                   connectors[i].dpms != DRM_MODE_DPMS_ON)
+                       return true;
+
+       return false;
+}
+
 static void
 intel_set_config_compute_mode_changes(struct drm_mode_set *set,
                                      struct intel_set_config *config)
@@ -8147,7 +8162,11 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
 
        /* We should be able to check here if the fb has the same properties
         * and then just flip_or_move it */
-       if (set->crtc->fb != set->fb) {
+       if (set->connectors != NULL &&
+           is_crtc_connector_off(set->crtc, *set->connectors,
+                                 set->num_connectors)) {
+                       config->mode_changed = true;
+       } else if (set->crtc->fb != set->fb) {
                /* If we have no fb then treat it as a full mode set */
                if (set->crtc->fb == NULL) {
                        DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
@@ -8157,8 +8176,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
                } else if (set->fb->pixel_format !=
                           set->crtc->fb->pixel_format) {
                        config->mode_changed = true;
-               } else
+               } else {
                        config->fb_changed = true;
+               }
        }
 
        if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
@@ -8332,11 +8352,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
 
                ret = intel_set_mode(set->crtc, set->mode,
                                     set->x, set->y, set->fb);
-               if (ret) {
-                       DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
-                                 set->crtc->base.id, ret);
-                       goto fail;
-               }
        } else if (config->fb_changed) {
                intel_crtc_wait_for_pending_flips(set->crtc);
 
@@ -8344,18 +8359,18 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
                                          set->x, set->y, set->fb);
        }
 
-       intel_set_config_free(config);
-
-       return 0;
-
+       if (ret) {
+               DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
+                         set->crtc->base.id, ret);
 fail:
-       intel_set_config_restore_state(dev, config);
+               intel_set_config_restore_state(dev, config);
 
-       /* Try to restore the config */
-       if (config->mode_changed &&
-           intel_set_mode(save_set.crtc, save_set.mode,
-                          save_set.x, save_set.y, save_set.fb))
-               DRM_ERROR("failed to restore config after modeset failure\n");
+               /* Try to restore the config */
+               if (config->mode_changed &&
+                   intel_set_mode(save_set.crtc, save_set.mode,
+                                  save_set.x, save_set.y, save_set.fb))
+                       DRM_ERROR("failed to restore config after modeset failure\n");
+       }
 
 out_config:
        intel_set_config_free(config);
index fb2fbc1e08b9ba0048b17c02badb8fd8ed29d089..70789b1b564282b9ba7df39c73283e8b532d67ad 100644 (file)
@@ -303,7 +303,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
        if (has_aux_irq)
                done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
-                                         msecs_to_jiffies(10));
+                                         msecs_to_jiffies_timeout(10));
        else
                done = wait_for_atomic(C, 10) == 0;
        if (!done)
@@ -702,6 +702,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        /* Walk through all bpp values. Luckily they're all nicely spaced with 2
         * bpc in between. */
        bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
+       if (is_edp(intel_dp) && dev_priv->edp.bpp)
+               bpp = min_t(int, bpp, dev_priv->edp.bpp);
+
        for (; bpp >= 6*3; bpp -= 2*3) {
                mode_rate = intel_dp_link_required(target_clock, bpp);
 
@@ -739,6 +742,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        intel_dp->link_bw = bws[clock];
        intel_dp->lane_count = lane_count;
        adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+       pipe_config->pipe_bpp = bpp;
        pipe_config->pixel_target_clock = target_clock;
 
        DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
@@ -751,20 +755,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                               target_clock, adjusted_mode->clock,
                               &pipe_config->dp_m_n);
 
-       /*
-        * XXX: We have a strange regression where using the vbt edp bpp value
-        * for the link bw computation results in black screens, the panel only
-        * works when we do the computation at the usual 24bpp (but still
-        * requires us to use 18bpp). Until that's fully debugged, stay
-        * bug-for-bug compatible with the old code.
-        */
-       if (is_edp(intel_dp) && dev_priv->edp.bpp) {
-               DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n",
-                             bpp, dev_priv->edp.bpp);
-               bpp = min_t(int, bpp, dev_priv->edp.bpp);
-       }
-       pipe_config->pipe_bpp = bpp;
-
        return true;
 }
 
@@ -1389,6 +1379,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        ironlake_edp_panel_on(intel_dp);
        ironlake_edp_panel_vdd_off(intel_dp, true);
        intel_dp_complete_link_train(intel_dp);
+       intel_dp_stop_link_train(intel_dp);
        ironlake_edp_backlight_on(intel_dp);
 }
 
@@ -1711,10 +1702,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = intel_dig_port->port;
        int ret;
-       uint32_t temp;
 
        if (HAS_DDI(dev)) {
-               temp = I915_READ(DP_TP_CTL(port));
+               uint32_t temp = I915_READ(DP_TP_CTL(port));
 
                if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
                        temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -1724,18 +1714,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
                temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
                case DP_TRAINING_PATTERN_DISABLE:
-
-                       if (port != PORT_A) {
-                               temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
-                               I915_WRITE(DP_TP_CTL(port), temp);
-
-                               if (wait_for((I915_READ(DP_TP_STATUS(port)) &
-                                             DP_TP_STATUS_IDLE_DONE), 1))
-                                       DRM_ERROR("Timed out waiting for DP idle patterns\n");
-
-                               temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
-                       }
-
                        temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
 
                        break;
@@ -1811,6 +1789,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
        return true;
 }
 
+static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum port port = intel_dig_port->port;
+       uint32_t val;
+
+       if (!HAS_DDI(dev))
+               return;
+
+       val = I915_READ(DP_TP_CTL(port));
+       val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+       val |= DP_TP_CTL_LINK_TRAIN_IDLE;
+       I915_WRITE(DP_TP_CTL(port), val);
+
+       /*
+        * On PORT_A we can have only eDP in SST mode. There the only reason
+        * we need to set idle transmission mode is to work around a HW issue
+        * where we enable the pipe while not in idle link-training mode.
+        * In this case there is requirement to wait for a minimum number of
+        * idle patterns to be sent.
+        */
+       if (port == PORT_A)
+               return;
+
+       if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
+                    1))
+               DRM_ERROR("Timed out waiting for DP idle patterns\n");
+}
+
 /* Enable corresponding port and start training pattern 1 */
 void
 intel_dp_start_link_train(struct intel_dp *intel_dp)
@@ -1953,10 +1962,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                ++tries;
        }
 
+       intel_dp_set_idle_link_train(intel_dp);
+
+       intel_dp->DP = DP;
+
        if (channel_eq)
                DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
 
-       intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
+}
+
+void intel_dp_stop_link_train(struct intel_dp *intel_dp)
+{
+       intel_dp_set_link_train(intel_dp, intel_dp->DP,
+                               DP_TRAINING_PATTERN_DISABLE);
 }
 
 static void
@@ -2164,6 +2182,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
                              drm_get_encoder_name(&intel_encoder->base));
                intel_dp_start_link_train(intel_dp);
                intel_dp_complete_link_train(intel_dp);
+               intel_dp_stop_link_train(intel_dp);
        }
 }
 
index b5b6d19e6dd3ff30293799980d1efafef6540812..624a9e6b8d718ebe64a3aa41019137b673b3b296 100644 (file)
@@ -499,6 +499,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
 extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
 extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
 extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
index 0e19e575a1b41e456cbe3e82877502fa1906c875..6b7c3ca2c035e5514c2c333877966ff1cb3d3cfd 100644 (file)
@@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev)
 void intel_fbdev_set_suspend(struct drm_device *dev, int state)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       if (!dev_priv->fbdev)
+       struct intel_fbdev *ifbdev = dev_priv->fbdev;
+       struct fb_info *info;
+
+       if (!ifbdev)
                return;
 
-       fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
+       info = ifbdev->helper.fbdev;
+
+       /* On resume from hibernation: If the object is shmemfs backed, it has
+        * been restored from swap. If the object is stolen however, it will be
+        * full of whatever garbage was left in there.
+        */
+       if (!state && ifbdev->ifb.obj->stolen)
+               memset_io(info->screen_base, 0, info->screen_size);
+
+       fb_set_suspend(info, state);
 }
 
 MODULE_LICENSE("GPL and additional rights");
index 5d245031e391a464fbe8eb6fb6031e70e1ac6770..639fe192997cdecb602c789f97045aad61328249 100644 (file)
@@ -228,7 +228,7 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
         * need to wake up periodically and check that ourselves. */
        I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
 
-       for (i = 0; i < msecs_to_jiffies(50) + 1; i++) {
+       for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
                prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
                                TASK_UNINTERRUPTIBLE);
 
@@ -263,7 +263,8 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
        /* Important: The hw handles only the first bit, so set only one! */
        I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
 
-       ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+       ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+                                msecs_to_jiffies_timeout(10));
 
        I915_WRITE(GMBUS4 + reg_offset, 0);
 
index de3b0dc5658bcf0c84081ec23ee1cc0add9d93c5..aa01128ff192cc6c5860c881a9aa8a71cb756fd7 100644 (file)
@@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev)
 
        vlv_update_drain_latency(dev);
 
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &valleyview_wm_info, latency_ns,
                            &valleyview_cursor_wm_info, latency_ns,
                            &planea_wm, &cursora_wm))
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &valleyview_wm_info, latency_ns,
                            &valleyview_cursor_wm_info, latency_ns,
                            &planeb_wm, &cursorb_wm))
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
 
        if (single_plane_enabled(enabled) &&
            g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev)
        int plane_sr, cursor_sr;
        unsigned int enabled = 0;
 
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &g4x_wm_info, latency_ns,
                            &g4x_cursor_wm_info, latency_ns,
                            &planea_wm, &cursora_wm))
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &g4x_wm_info, latency_ns,
                            &g4x_cursor_wm_info, latency_ns,
                            &planeb_wm, &cursorb_wm))
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
 
        if (single_plane_enabled(enabled) &&
            g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev)
        unsigned int enabled;
 
        enabled = 0;
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &ironlake_display_wm_info,
                            ILK_LP0_PLANE_LATENCY,
                            &ironlake_cursor_wm_info,
@@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
                              " plane %d, " "cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
        }
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &ironlake_display_wm_info,
                            ILK_LP0_PLANE_LATENCY,
                            &ironlake_cursor_wm_info,
@@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
        }
 
        /*
@@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
        unsigned int enabled;
 
        enabled = 0;
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
                              " plane %d, " "cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
        }
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
        }
 
        /*
@@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
        unsigned int enabled;
 
        enabled = 0;
-       if (g4x_compute_wm0(dev, 0,
+       if (g4x_compute_wm0(dev, PIPE_A,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
                              " plane %d, " "cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 1;
+               enabled |= 1 << PIPE_A;
        }
 
-       if (g4x_compute_wm0(dev, 1,
+       if (g4x_compute_wm0(dev, PIPE_B,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 2;
+               enabled |= 1 << PIPE_B;
        }
 
-       if (g4x_compute_wm0(dev, 2,
+       if (g4x_compute_wm0(dev, PIPE_C,
                            &sandybridge_display_wm_info, latency,
                            &sandybridge_cursor_wm_info, latency,
                            &plane_wm, &cursor_wm)) {
@@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
                DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
                              " plane %d, cursor: %d\n",
                              plane_wm, cursor_wm);
-               enabled |= 3;
+               enabled |= 1 << PIPE_C;
        }
 
        /*
index f9889658329bfd1aa613cc595a143511e9f94386..77b8a45fb10a3c872aa86304f22abf16266a0249 100644 (file)
@@ -46,29 +46,26 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
 
 static inline void mga_wait_vsync(struct mga_device *mdev)
 {
-       unsigned int count = 0;
+       unsigned long timeout = jiffies + HZ/10;
        unsigned int status = 0;
 
        do {
                status = RREG32(MGAREG_Status);
-               count++;
-       } while ((status & 0x08) && (count < 250000));
-       count = 0;
+       } while ((status & 0x08) && time_before(jiffies, timeout));
+       timeout = jiffies + HZ/10;
        status = 0;
        do {
                status = RREG32(MGAREG_Status);
-               count++;
-       } while (!(status & 0x08) && (count < 250000));
+       } while (!(status & 0x08) && time_before(jiffies, timeout));
 }
 
 static inline void mga_wait_busy(struct mga_device *mdev)
 {
-       unsigned int count = 0;
+       unsigned long timeout = jiffies + HZ;
        unsigned int status = 0;
        do {
                status = RREG8(MGAREG_Status + 2);
-               count++;
-       } while ((status & 0x01) && (count < 500000));
+       } while ((status & 0x01) && time_before(jiffies, timeout));
 }
 
 /*
@@ -189,12 +186,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
-               WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+               WREG8(DAC_DATA, tmp);
 
                WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_REMHEADCTL_CLKDIS;
-               WREG_DAC(MGA1064_REMHEADCTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                /* select PLL Set C */
                tmp = RREG8(MGAREG_MEM_MISC_READ);
@@ -204,7 +201,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                udelay(500);
 
@@ -212,7 +209,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_VREF_CTL);
                tmp = RREG8(DAC_DATA);
                tmp &= ~0x04;
-               WREG_DAC(MGA1064_VREF_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                udelay(50);
 
@@ -236,13 +233,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
                tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
                tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
-               WREG_DAC(MGA1064_REMHEADCTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                /* reset dotclock rate bit */
                WREG8(MGAREG_SEQ_INDEX, 1);
@@ -253,7 +250,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                vcount = RREG8(MGAREG_VCOUNT);
 
@@ -318,7 +315,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
-       WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+       WREG8(DAC_DATA, tmp);
 
        tmp = RREG8(MGAREG_MEM_MISC_READ);
        tmp |= 0x3 << 2;
@@ -326,12 +323,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
 
        WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
        tmp = RREG8(DAC_DATA);
-       WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
+       WREG8(DAC_DATA, tmp & ~0x40);
 
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
        WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
@@ -342,7 +339,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        udelay(500);
 
@@ -350,11 +347,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        tmp = RREG8(DAC_DATA);
        tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
        tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
        tmp = RREG8(DAC_DATA);
-       WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
+       WREG8(DAC_DATA, tmp | 0x40);
 
        tmp = RREG8(MGAREG_MEM_MISC_READ);
        tmp |= (0x3 << 2);
@@ -363,7 +360,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        return 0;
 }
@@ -416,7 +413,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
-               WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+               WREG8(DAC_DATA, tmp);
 
                tmp = RREG8(MGAREG_MEM_MISC_READ);
                tmp |= 0x3 << 2;
@@ -425,7 +422,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                udelay(500);
 
@@ -439,13 +436,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
                tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
                tmp = RREG8(DAC_DATA);
                tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
                tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-               WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+               WREG8(DAC_DATA, tmp);
 
                vcount = RREG8(MGAREG_VCOUNT);
 
@@ -515,12 +512,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
        WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
        tmp = RREG8(DAC_DATA);
        tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
-       WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+       WREG8(DAC_DATA, tmp);
 
        WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
        tmp = RREG8(DAC_DATA);
        tmp |= MGA1064_REMHEADCTL_CLKDIS;
-       WREG_DAC(MGA1064_REMHEADCTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        tmp = RREG8(MGAREG_MEM_MISC_READ);
        tmp |= (0x3<<2) | 0xc0;
@@ -530,7 +527,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
        tmp = RREG8(DAC_DATA);
        tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
        tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
-       WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+       WREG8(DAC_DATA, tmp);
 
        udelay(500);
 
@@ -657,12 +654,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
        WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
 }
 
-
+/*
+   This is how the framebuffer base address is stored in g200 cards:
+   * Assume @offset is the gpu_addr variable of the framebuffer object
+   * Then addr is the number of _pixels_ (not bytes) from the start of
+     VRAM to the first pixel we want to display. (divided by 2 for 32bit
+     framebuffers)
+   * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
+   addr<20> -> CRTCEXT0<6>
+   addr<19-16> -> CRTCEXT0<3-0>
+   addr<15-8> -> CRTCC<7-0>
+   addr<7-0> -> CRTCD<7-0>
+   CRTCEXT0 has to be programmed last to trigger an update and make the
+   new addr variable take effect.
+ */
 void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
 {
        struct mga_device *mdev = crtc->dev->dev_private;
        u32 addr;
        int count;
+       u8 crtcext0;
 
        while (RREG8(0x1fda) & 0x08);
        while (!(RREG8(0x1fda) & 0x08));
@@ -670,10 +681,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
        count = RREG8(MGAREG_VCOUNT) + 2;
        while (RREG8(MGAREG_VCOUNT) < count);
 
-       addr = offset >> 2;
+       WREG8(MGAREG_CRTCEXT_INDEX, 0);
+       crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
+       crtcext0 &= 0xB0;
+       addr = offset / 8;
+       /* Can't store addresses any higher than that...
+          but we also don't have more than 16MB of memory, so it should be fine. */
+       WARN_ON(addr > 0x1fffff);
+       crtcext0 |= (!!(addr & (1<<20)))<<6;
        WREG_CRT(0x0d, (u8)(addr & 0xff));
        WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
-       WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
+       WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
 }
 
 
@@ -829,11 +847,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
 
 
        for (i = 0; i < sizeof(dacvalue); i++) {
-               if ((i <= 0x03) ||
-                   (i == 0x07) ||
-                   (i == 0x0b) ||
-                   (i == 0x0f) ||
-                   ((i >= 0x13) && (i <= 0x17)) ||
+               if ((i <= 0x17) ||
                    (i == 0x1b) ||
                    (i == 0x1c) ||
                    ((i >= 0x1f) && (i <= 0x29)) ||
index 955af122c3a68dc06eb0cea0360e746447416966..a36e64e98ef372a91566da1d6fb0fff6ebef400f 100644 (file)
@@ -138,7 +138,6 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
-               device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
                device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xce:
@@ -225,7 +224,6 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
-               device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
                device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xc8:
index ddaeb55729038ee4572b17c78c6ec94198ac0280..89bf459d584bb2f8272c0443ed8411715c2c0ed5 100644 (file)
@@ -47,6 +47,7 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
        struct nouveau_gpuobj *cur;
        int i, p;
 
+       mutex_lock(&nv_subdev(priv)->mutex);
        cur = priv->playlist[priv->cur_playlist];
        priv->cur_playlist = !priv->cur_playlist;
 
@@ -60,6 +61,7 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
        nv_wr32(priv, 0x0032f4, cur->addr >> 12);
        nv_wr32(priv, 0x0032ec, p);
        nv_wr32(priv, 0x002500, 0x00000101);
+       mutex_unlock(&nv_subdev(priv)->mutex);
 }
 
 static int
index 4d4a6b905370b2633b6afb199de3eb3973943ece..46dfa68c47bbd84ca4c30d07955111aab2bb29b5 100644 (file)
@@ -71,6 +71,7 @@ nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
        struct nouveau_gpuobj *cur;
        int i, p;
 
+       mutex_lock(&nv_subdev(priv)->mutex);
        cur = priv->playlist[priv->cur_playlist];
        priv->cur_playlist = !priv->cur_playlist;
 
@@ -87,6 +88,7 @@ nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
        nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
        if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
                nv_error(priv, "playlist update failed\n");
+       mutex_unlock(&nv_subdev(priv)->mutex);
 }
 
 static int
@@ -248,9 +250,17 @@ nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
        struct nvc0_fifo_priv *priv = (void *)object->engine;
        struct nvc0_fifo_chan *chan = (void *)object;
        u32 chid = chan->base.chid;
+       u32 mask, engine;
 
        nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
        nvc0_fifo_playlist_update(priv);
+       mask = nv_rd32(priv, 0x0025a4);
+       for (engine = 0; mask && engine < 16; engine++) {
+               if (!(mask & (1 << engine)))
+                       continue;
+               nv_mask(priv, 0x0025a8 + (engine * 4), 0x00000000, 0x00000000);
+               mask &= ~(1 << engine);
+       }
        nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
 
        return nouveau_fifo_channel_fini(&chan->base, suspend);
index 9151919fb83152a3c5035299f04fcbbd472b74f9..56192a7242aee51ab1fdbb575b8463a47a41acb7 100644 (file)
@@ -94,11 +94,13 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
        u32 match = (engine << 16) | 0x00000001;
        int i, p;
 
+       mutex_lock(&nv_subdev(priv)->mutex);
        cur = engn->playlist[engn->cur_playlist];
        if (unlikely(cur == NULL)) {
                int ret = nouveau_gpuobj_new(nv_object(priv), NULL,
                                             0x8000, 0x1000, 0, &cur);
                if (ret) {
+                       mutex_unlock(&nv_subdev(priv)->mutex);
                        nv_error(priv, "playlist alloc failed\n");
                        return;
                }
@@ -122,6 +124,7 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
        nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
        if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
                nv_error(priv, "playlist %d update timeout\n", engine);
+       mutex_unlock(&nv_subdev(priv)->mutex);
 }
 
 static int
index c300b5e7b67048550c4d019bc94b31dc9c0ac6a9..c434d398d16f0302bec862c95b6421cb8e505652 100644 (file)
@@ -1940,8 +1940,8 @@ init_zm_mask_add(struct nvbios_init *init)
        trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
        init->offset += 13;
 
-       data  =  init_rd32(init, addr) & mask;
-       data |= ((data + add) & ~mask);
+       data =  init_rd32(init, addr);
+       data = (data & mask) | ((data + add) & ~mask);
        init_wr32(init, addr, data);
 }
 
index e4940fb166e8ead8280d7d4f4ee9659aa8b3d133..fb794e997fbccdeef0f03a460f0112766d1cc8fd 100644 (file)
@@ -29,7 +29,6 @@
 struct nvc0_ltcg_priv {
        struct nouveau_ltcg base;
        u32 part_nr;
-       u32 part_mask;
        u32 subp_nr;
        struct nouveau_mm tags;
        u32 num_tags;
@@ -105,8 +104,6 @@ nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
 
        /* wait until it's finished with clearing */
        for (p = 0; p < priv->part_nr; ++p) {
-               if (!(priv->part_mask & (1 << p)))
-                       continue;
                for (i = 0; i < priv->subp_nr; ++i)
                        nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
        }
@@ -121,6 +118,8 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
        int ret;
 
        nv_wr32(priv, 0x17e8d8, priv->part_nr);
+       if (nv_device(pfb)->card_type >= NV_E0)
+               nv_wr32(priv, 0x17e000, priv->part_nr);
 
        /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
        priv->num_tags = (pfb->ram.size >> 17) / 4;
@@ -167,16 +166,20 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 {
        struct nvc0_ltcg_priv *priv;
        struct nouveau_fb *pfb = nouveau_fb(parent);
-       int ret;
+       u32 parts, mask;
+       int ret, i;
 
        ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
-       priv->part_nr = nv_rd32(priv, 0x022438);
-       priv->part_mask = nv_rd32(priv, 0x022554);
-
+       parts = nv_rd32(priv, 0x022438);
+       mask = nv_rd32(priv, 0x022554);
+       for (i = 0; i < parts; i++) {
+               if (!(mask & (1 << i)))
+                       priv->part_nr++;
+       }
        priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
 
        nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
index 7bf22d4a3d9679c1553e0b9846802abf11c729be..f17dc2ab03ecd25a762e52840f02b8f4652345d4 100644 (file)
@@ -638,17 +638,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
        }
 
        s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
-       if (s->event) {
-               struct drm_pending_vblank_event *e = s->event;
-               struct timeval now;
-
-               do_gettimeofday(&now);
-               e->event.sequence = 0;
-               e->event.tv_sec = now.tv_sec;
-               e->event.tv_usec = now.tv_usec;
-               list_add_tail(&e->base.link, &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
-       }
+       if (s->event)
+               drm_send_vblank_event(dev, -1, s->event);
 
        list_del(&s->head);
        if (ps)
index 46c152ff0a80f41a102a332a2378867cf0e88f3a..383f4e6ea9d164c59509c7788666cf89fb6abbe0 100644 (file)
@@ -453,18 +453,32 @@ nouveau_do_suspend(struct drm_device *dev)
        NV_INFO(drm, "evicting buffers...\n");
        ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
 
+       NV_INFO(drm, "waiting for kernel channels to go idle...\n");
+       if (drm->cechan) {
+               ret = nouveau_channel_idle(drm->cechan);
+               if (ret)
+                       return ret;
+       }
+
+       if (drm->channel) {
+               ret = nouveau_channel_idle(drm->channel);
+               if (ret)
+                       return ret;
+       }
+
+       NV_INFO(drm, "suspending client object trees...\n");
        if (drm->fence && nouveau_fence(drm)->suspend) {
                if (!nouveau_fence(drm)->suspend(drm))
                        return -ENOMEM;
        }
 
-       NV_INFO(drm, "suspending client object trees...\n");
        list_for_each_entry(cli, &drm->clients, head) {
                ret = nouveau_client_fini(&cli->base, true);
                if (ret)
                        goto fail_client;
        }
 
+       NV_INFO(drm, "suspending kernel object tree...\n");
        ret = nouveau_client_fini(&drm->client.base, true);
        if (ret)
                goto fail_client;
@@ -514,17 +528,18 @@ nouveau_do_resume(struct drm_device *dev)
 
        nouveau_agp_reset(drm);
 
-       NV_INFO(drm, "resuming client object trees...\n");
+       NV_INFO(drm, "resuming kernel object tree...\n");
        nouveau_client_init(&drm->client.base);
        nouveau_agp_init(drm);
 
+       NV_INFO(drm, "resuming client object trees...\n");
+       if (drm->fence && nouveau_fence(drm)->resume)
+               nouveau_fence(drm)->resume(drm);
+
        list_for_each_entry(cli, &drm->clients, head) {
                nouveau_client_init(&cli->base);
        }
 
-       if (drm->fence && nouveau_fence(drm)->resume)
-               nouveau_fence(drm)->resume(drm);
-
        nouveau_run_vbios_init(dev);
        nouveau_pm_resume(dev);
 
index 9c53c25e5201763a4673734d5ce32d72043f5a35..826586ffbe835d94983f779b372149d92143b863 100644 (file)
@@ -649,6 +649,9 @@ static void pdev_shutdown(struct platform_device *device)
 
 static int pdev_probe(struct platform_device *device)
 {
+       if (omapdss_is_initialized() == false)
+               return -EPROBE_DEFER;
+
        DBG("%s", device->name);
        return drm_platform_init(&omap_drm_driver, device);
 }
index 2f1a57e11140adac1e8850abdd348092da07f083..d6c12796023cd654e95196bc9d55ce18aabdb114 100644 (file)
@@ -4,6 +4,7 @@ config DRM_QXL
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
+       select FB_DEFERRED_IO
         select DRM_KMS_HELPER
         select DRM_TTM
        help
index 08b0823c93d526405d506aca4849f2c83a9d9eb5..f86771481317b77d565675c956d77fdcb22f2789 100644 (file)
@@ -277,7 +277,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
        return 0;
 }
 
-static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
+static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
 {
        int irq_num;
        long addr = qdev->io_base + port;
@@ -285,20 +285,29 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
 
        mutex_lock(&qdev->async_io_mutex);
        irq_num = atomic_read(&qdev->irq_received_io_cmd);
-
-
        if (qdev->last_sent_io_cmd > irq_num) {
-               ret = wait_event_interruptible(qdev->io_cmd_event,
-                                              atomic_read(&qdev->irq_received_io_cmd) > irq_num);
-               if (ret)
+               if (intr)
+                       ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
+                                                              atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+               else
+                       ret = wait_event_timeout(qdev->io_cmd_event,
+                                                atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+               /* 0 is timeout, just bail the "hw" has gone away */
+               if (ret <= 0)
                        goto out;
                irq_num = atomic_read(&qdev->irq_received_io_cmd);
        }
        outb(val, addr);
        qdev->last_sent_io_cmd = irq_num + 1;
-       ret = wait_event_interruptible(qdev->io_cmd_event,
-                                      atomic_read(&qdev->irq_received_io_cmd) > irq_num);
+       if (intr)
+               ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
+                                                      atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+       else
+               ret = wait_event_timeout(qdev->io_cmd_event,
+                                        atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
 out:
+       if (ret > 0)
+               ret = 0;
        mutex_unlock(&qdev->async_io_mutex);
        return ret;
 }
@@ -308,7 +317,7 @@ static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
        int ret;
 
 restart:
-       ret = wait_for_io_cmd_user(qdev, val, port);
+       ret = wait_for_io_cmd_user(qdev, val, port, false);
        if (ret == -ERESTARTSYS)
                goto restart;
 }
@@ -340,7 +349,7 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
        mutex_lock(&qdev->update_area_mutex);
        qdev->ram_header->update_area = *area;
        qdev->ram_header->update_surface = surface_id;
-       ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC);
+       ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
        mutex_unlock(&qdev->update_area_mutex);
        return ret;
 }
index fcfd4436ceedd50f335afdc84edac0001036e2d8..823d29e926ec0aa6c9715b4cb93448b3c4e90215 100644 (file)
@@ -428,10 +428,10 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
        int inc = 1;
 
        qobj = gem_to_qxl_bo(qxl_fb->obj);
-       if (qxl_fb != qdev->active_user_framebuffer) {
-               DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n",
-                       __func__, qxl_fb, qdev->active_user_framebuffer);
-       }
+       /* if we aren't primary surface ignore this */
+       if (!qobj->is_primary)
+               return 0;
+
        if (!num_clips) {
                num_clips = 1;
                clips = &norect;
@@ -604,7 +604,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
                                               mode->hdisplay,
                                               mode->vdisplay);
        }
-       qdev->mode_set = true;
        return 0;
 }
 
@@ -893,7 +892,6 @@ qxl_user_framebuffer_create(struct drm_device *dev,
 {
        struct drm_gem_object *obj;
        struct qxl_framebuffer *qxl_fb;
-       struct qxl_device *qdev = dev->dev_private;
        int ret;
 
        obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
@@ -909,13 +907,6 @@ qxl_user_framebuffer_create(struct drm_device *dev,
                return NULL;
        }
 
-       if (qdev->active_user_framebuffer) {
-               DRM_INFO("%s: active_user_framebuffer %p -> %p\n",
-                        __func__,
-                        qdev->active_user_framebuffer, qxl_fb);
-       }
-       qdev->active_user_framebuffer = qxl_fb;
-
        return &qxl_fb->base;
 }
 
index 52b582c211da9dc8e8ff4f28dbd52c8a0242c23a..43d06ab28a21195c5c81e03bd3f1b1b838381c80 100644 (file)
@@ -255,12 +255,6 @@ struct qxl_device {
        struct qxl_gem          gem;
        struct qxl_mode_info mode_info;
 
-       /*
-        * last created framebuffer with fb_create
-        * only used by debugfs dumbppm
-        */
-       struct qxl_framebuffer *active_user_framebuffer;
-
        struct fb_info                  *fbdev_info;
        struct qxl_framebuffer  *fbdev_qfb;
        void *ram_physical;
@@ -270,7 +264,6 @@ struct qxl_device {
        struct qxl_ring *cursor_ring;
 
        struct qxl_ram_header *ram_header;
-       bool mode_set;
 
        bool primary_created;
 
index 04b64f9cbfdb94b2c6dc1fa5e09a2df1c2b0b2cc..a4b71b25fa5366c948f8447e6c7b6f0808b4aab4 100644 (file)
@@ -151,7 +151,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
                struct qxl_bo *cmd_bo;
                int release_type;
                struct drm_qxl_command *commands =
-                       (struct drm_qxl_command *)execbuffer->commands;
+                       (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
 
                if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
                                       sizeof(user_cmd)))
@@ -193,7 +193,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
 
                for (i = 0 ; i < user_cmd.relocs_num; ++i) {
                        if (DRM_COPY_FROM_USER(&reloc,
-                                              &((struct drm_qxl_reloc *)user_cmd.relocs)[i],
+                                              &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
                                               sizeof(reloc))) {
                                qxl_bo_list_unreserve(&reloc_list, true);
                                qxl_release_unreserve(qdev, release);
@@ -294,6 +294,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
                goto out;
 
        if (!qobj->pin_count) {
+               qxl_ttm_placement_from_domain(qobj, qobj->type);
                ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
                                      true, false);
                if (unlikely(ret))
index 85127ed24cfd3481b72428b84cd54da5c12134dc..e27ce2a907cf798c7027c15abbd270d166ef9dfa 100644 (file)
@@ -128,12 +128,13 @@ int qxl_device_init(struct qxl_device *qdev,
 
        qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
        qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
-       DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n",
-                (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0),
+       DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n",
+                (unsigned long long)qdev->vram_base,
+                (unsigned long long)pci_resource_end(pdev, 0),
                 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
                 (int)pci_resource_len(pdev, 0) / 1024,
-                (void *)qdev->surfaceram_base,
-                (void *)pci_resource_end(pdev, 1),
+                (unsigned long long)qdev->surfaceram_base,
+                (unsigned long long)pci_resource_end(pdev, 1),
                 (int)qdev->surfaceram_size / 1024 / 1024,
                 (int)qdev->surfaceram_size / 1024);
 
index 6d6fdb3ba0d07d859b8c0fba223c0fe8193b1e46..d5df8fd1021755ad1ca39543bc0218ea9531b5ae 100644 (file)
@@ -1811,12 +1811,9 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
 
 static void atombios_crtc_prepare(struct drm_crtc *crtc)
 {
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct radeon_device *rdev = dev->dev_private;
 
-       radeon_crtc->in_mode_set = true;
-
        /* disable crtc pair power gating before programming */
        if (ASIC_IS_DCE6(rdev))
                atombios_powergate_crtc(crtc, ATOM_DISABLE);
@@ -1827,11 +1824,8 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc)
 
 static void atombios_crtc_commit(struct drm_crtc *crtc)
 {
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-
        atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
        atombios_lock_crtc(crtc, ATOM_DISABLE);
-       radeon_crtc->in_mode_set = false;
 }
 
 static void atombios_crtc_disable(struct drm_crtc *crtc)
index 105bafb6c29d8aab2a6d22018d6a48c86a805cf4..8546e3b333b44c2a07314678f4fd1da5c3382c21 100644 (file)
@@ -2343,11 +2343,13 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
        u32 crtc_enabled, tmp, frame_count, blackout;
        int i, j;
 
-       save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
-       save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+       if (!ASIC_IS_NODCE(rdev)) {
+               save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+               save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
 
-       /* disable VGA render */
-       WREG32(VGA_RENDER_CONTROL, 0);
+               /* disable VGA render */
+               WREG32(VGA_RENDER_CONTROL, 0);
+       }
        /* blank the display controllers */
        for (i = 0; i < rdev->num_crtc; i++) {
                crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
@@ -2438,8 +2440,11 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
                WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
                       (u32)rdev->mc.vram_start);
        }
-       WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
-       WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+       if (!ASIC_IS_NODCE(rdev)) {
+               WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+               WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+       }
 
        /* unlock regs and wait for update */
        for (i = 0; i < rdev->num_crtc; i++) {
@@ -2499,10 +2504,12 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
                        }
                }
        }
-       /* Unlock vga access */
-       WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
-       mdelay(1);
-       WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+       if (!ASIC_IS_NODCE(rdev)) {
+               /* Unlock vga access */
+               WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+               mdelay(1);
+               WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+       }
 }
 
 void evergreen_mc_program(struct radeon_device *rdev)
@@ -3405,8 +3412,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
                rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        } else {
                /* size in MB on evergreen/cayman/tn */
-               rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
-               rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+               rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+               rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
        }
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
        r700_vram_gtt_location(rdev, &rdev->mc);
@@ -4992,8 +4999,7 @@ void evergreen_fini(struct radeon_device *rdev)
 
 void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
 {
-       u32 link_width_cntl, speed_cntl, mask;
-       int ret;
+       u32 link_width_cntl, speed_cntl;
 
        if (radeon_pcie_gen2 == 0)
                return;
@@ -5008,11 +5014,8 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
        if (ASIC_IS_X2(rdev))
                return;
 
-       ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
-       if (ret != 0)
-               return;
-
-       if (!(mask & DRM_PCIE_SPEED_50))
+       if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+               (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
                return;
 
        speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
index b4ab8ceb16545d115d6b39428e54c42a6f6ce1d2..ed7c8a7680929e5589fe84daa70f36d2e3bce74a 100644 (file)
@@ -154,19 +154,18 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
-       u32 base_rate = 48000;
+       u32 base_rate = 24000;
 
        if (!dig || !dig->afmt)
                return;
 
-       /* XXX: properly calculate this */
        /* XXX two dtos; generally use dto0 for hdmi */
        /* Express [24MHz / target pixel clock] as an exact rational
         * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
         * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
         */
-       WREG32(DCCG_AUDIO_DTO0_PHASE, (base_rate*50) & 0xffffff);
-       WREG32(DCCG_AUDIO_DTO0_MODULE, (clock*100) & 0xffffff);
+       WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+       WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
        WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
 }
 
index 865e2c9980dbd5b2245a40ed23cdbb19af1044d5..60170ea5e3a228c0483c18f0fd1652b30ec7760b 100644 (file)
@@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
                OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
 
                for (i = 0; i < nr; ++i) {
-                       if (DRM_COPY_FROM_USER_UNCHECKED
+                       if (DRM_COPY_FROM_USER
                            (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
                                DRM_ERROR("copy cliprect faulted\n");
                                return -EFAULT;
index 1a08008c978bc413fe6e4ac5161c61053ed62c93..b45e64848677a9606a61fea17d440c1a85ea7f09 100644 (file)
@@ -4631,8 +4631,6 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
 {
        u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
        u16 link_cntl2;
-       u32 mask;
-       int ret;
 
        if (radeon_pcie_gen2 == 0)
                return;
@@ -4651,11 +4649,8 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
        if (rdev->family <= CHIP_R600)
                return;
 
-       ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
-       if (ret != 0)
-               return;
-
-       if (!(mask & DRM_PCIE_SPEED_50))
+       if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+               (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
                return;
 
        speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
index 47f180a79352a48d5eb083df80181f0b7aab597f..456750a0daa5c98409e040d6c3ed4731e533a0a1 100644 (file)
@@ -232,7 +232,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 base_rate = 48000;
+       u32 base_rate = 24000;
 
        if (!dig || !dig->afmt)
                return;
@@ -240,7 +240,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
        /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
         * doesn't matter which one you use.  Just use the first one.
         */
-       /* XXX: properly calculate this */
        /* XXX two dtos; generally use dto0 for hdmi */
        /* Express [24MHz / target pixel clock] as an exact rational
         * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
@@ -250,13 +249,13 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
                /* according to the reg specs, this should DCE3.2 only, but in
                 * practice it seems to cover DCE3.0 as well.
                 */
-               WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 50);
+               WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
                WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
                WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
        } else {
                /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
-               WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate * 50) |
-                      AUDIO_DTO_MODULE(clock * 100));
+               WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
+                      AUDIO_DTO_MODULE(clock / 10));
        }
 }
 
index 1442ce765d48a74be071a89b9e7f30beee0332d7..142ce6cc69f5e3564299102ceaa2982adce628e7 100644 (file)
@@ -1694,6 +1694,7 @@ struct radeon_device {
        int num_crtc; /* number of crtcs */
        struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
        bool audio_enabled;
+       bool has_uvd;
        struct r600_audio audio_status; /* audio stuff */
        struct notifier_block acpi_nb;
        /* only one userspace can use Hyperz features or CMASK at a time */
@@ -1838,6 +1839,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
 #define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
                             (rdev->flags & RADEON_IS_IGP))
 #define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
+#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
 
 /*
  * BIOS helpers.
index 6417132c50cf5bf8df186225cc8233d182188197..06b8c19ab19e7cda0de46325d15291adc8eabf79 100644 (file)
@@ -1935,6 +1935,8 @@ int radeon_asic_init(struct radeon_device *rdev)
        else
                rdev->num_crtc = 2;
 
+       rdev->has_uvd = false;
+
        switch (rdev->family) {
        case CHIP_R100:
        case CHIP_RV100:
@@ -1999,16 +2001,22 @@ int radeon_asic_init(struct radeon_device *rdev)
        case CHIP_RV635:
        case CHIP_RV670:
                rdev->asic = &r600_asic;
+               if (rdev->family == CHIP_R600)
+                       rdev->has_uvd = false;
+               else
+                       rdev->has_uvd = true;
                break;
        case CHIP_RS780:
        case CHIP_RS880:
                rdev->asic = &rs780_asic;
+               rdev->has_uvd = true;
                break;
        case CHIP_RV770:
        case CHIP_RV730:
        case CHIP_RV710:
        case CHIP_RV740:
                rdev->asic = &rv770_asic;
+               rdev->has_uvd = true;
                break;
        case CHIP_CEDAR:
        case CHIP_REDWOOD:
@@ -2021,11 +2029,13 @@ int radeon_asic_init(struct radeon_device *rdev)
                else
                        rdev->num_crtc = 6;
                rdev->asic = &evergreen_asic;
+               rdev->has_uvd = true;
                break;
        case CHIP_PALM:
        case CHIP_SUMO:
        case CHIP_SUMO2:
                rdev->asic = &sumo_asic;
+               rdev->has_uvd = true;
                break;
        case CHIP_BARTS:
        case CHIP_TURKS:
@@ -2036,27 +2046,37 @@ int radeon_asic_init(struct radeon_device *rdev)
                else
                        rdev->num_crtc = 6;
                rdev->asic = &btc_asic;
+               rdev->has_uvd = true;
                break;
        case CHIP_CAYMAN:
                rdev->asic = &cayman_asic;
                /* set num crtcs */
                rdev->num_crtc = 6;
+               rdev->has_uvd = true;
                break;
        case CHIP_ARUBA:
                rdev->asic = &trinity_asic;
                /* set num crtcs */
                rdev->num_crtc = 4;
+               rdev->has_uvd = true;
                break;
        case CHIP_TAHITI:
        case CHIP_PITCAIRN:
        case CHIP_VERDE:
        case CHIP_OLAND:
+       case CHIP_HAINAN:
                rdev->asic = &si_asic;
                /* set num crtcs */
-               if (rdev->family == CHIP_OLAND)
+               if (rdev->family == CHIP_HAINAN)
+                       rdev->num_crtc = 0;
+               else if (rdev->family == CHIP_OLAND)
                        rdev->num_crtc = 2;
                else
                        rdev->num_crtc = 6;
+               if (rdev->family == CHIP_HAINAN)
+                       rdev->has_uvd = false;
+               else
+                       rdev->has_uvd = true;
                break;
        default:
                /* FIXME: not supported yet */
index fa3c56fba294fe1a433b2d7b6a3e1507e357d1ca..061b227dae0c45f88f506bf497c75faa1195aca1 100644 (file)
@@ -244,24 +244,28 @@ static bool ni_read_disabled_bios(struct radeon_device *rdev)
 
        /* enable the rom */
        WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
-       /* Disable VGA mode */
-       WREG32(AVIVO_D1VGA_CONTROL,
-              (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
-               AVIVO_DVGA_CONTROL_TIMING_SELECT)));
-       WREG32(AVIVO_D2VGA_CONTROL,
-              (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
-               AVIVO_DVGA_CONTROL_TIMING_SELECT)));
-       WREG32(AVIVO_VGA_RENDER_CONTROL,
-              (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+       if (!ASIC_IS_NODCE(rdev)) {
+               /* Disable VGA mode */
+               WREG32(AVIVO_D1VGA_CONTROL,
+                      (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+                                         AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+               WREG32(AVIVO_D2VGA_CONTROL,
+                      (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+                                         AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+               WREG32(AVIVO_VGA_RENDER_CONTROL,
+                      (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+       }
        WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
 
        r = radeon_read_bios(rdev);
 
        /* restore regs */
        WREG32(R600_BUS_CNTL, bus_cntl);
-       WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
-       WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
-       WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+       if (!ASIC_IS_NODCE(rdev)) {
+               WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+               WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+               WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+       }
        WREG32(R600_ROM_CNTL, rom_cntl);
        return r;
 }
index a8f6089039896f47f22152746d6baeceb1b07603..189973836cff691ced976818f1bf045ed58e424b 100644 (file)
@@ -94,6 +94,7 @@ static const char radeon_family_name[][16] = {
        "PITCAIRN",
        "VERDE",
        "OLAND",
+       "HAINAN",
        "LAST",
 };
 
@@ -466,23 +467,27 @@ bool radeon_card_posted(struct radeon_device *rdev)
 {
        uint32_t reg;
 
+       /* required for EFI mode on macbook2,1 which uses an r5xx asic */
        if (efi_enabled(EFI_BOOT) &&
-           rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+           (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+           (rdev->family < CHIP_R600))
                return false;
 
+       if (ASIC_IS_NODCE(rdev))
+               goto check_memsize;
+
        /* first check CRTCs */
-       if (ASIC_IS_DCE41(rdev)) {
+       if (ASIC_IS_DCE4(rdev)) {
                reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
                        RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
-               if (reg & EVERGREEN_CRTC_MASTER_EN)
-                       return true;
-       } else if (ASIC_IS_DCE4(rdev)) {
-               reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
-                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
-                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
-                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
-                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
-                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+                       if (rdev->num_crtc >= 4) {
+                               reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+                                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+                       }
+                       if (rdev->num_crtc >= 6) {
+                               reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+                                       RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+                       }
                if (reg & EVERGREEN_CRTC_MASTER_EN)
                        return true;
        } else if (ASIC_IS_AVIVO(rdev)) {
@@ -499,6 +504,7 @@ bool radeon_card_posted(struct radeon_device *rdev)
                }
        }
 
+check_memsize:
        /* then check MEM_SIZE, in case the crtcs are off */
        if (rdev->family >= CHIP_R600)
                reg = RREG32(R600_CONFIG_MEMSIZE);
index e38fd559f1abb843b0b1733cfc92f4ddc301802b..eb18bb7af1cc007150871ae814ed6502c36d7c75 100644 (file)
@@ -271,8 +271,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        struct radeon_unpin_work *work;
-       struct drm_pending_vblank_event *e;
-       struct timeval now;
        unsigned long flags;
        u32 update_pending;
        int vpos, hpos;
@@ -328,14 +326,9 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
        radeon_crtc->unpin_work = NULL;
 
        /* wakeup userspace */
-       if (work->event) {
-               e = work->event;
-               e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now);
-               e->event.tv_sec = now.tv_sec;
-               e->event.tv_usec = now.tv_usec;
-               list_add_tail(&e->base.link, &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
-       }
+       if (work->event)
+               drm_send_vblank_event(rdev->ddev, crtc_id, work->event);
+
        spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
 
        drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
index d33f484ace48792d746e812d81fd84d56ce5d4b8..094e7e5ea39e00fb391e63364c9c2f5978ce9c57 100644 (file)
@@ -147,7 +147,7 @@ static inline void radeon_unregister_atpx_handler(void) {}
 #endif
 
 int radeon_no_wb;
-int radeon_modeset = 1;
+int radeon_modeset = -1;
 int radeon_dynclks = -1;
 int radeon_r4xx_atom = 0;
 int radeon_agpmode = 0;
@@ -456,6 +456,16 @@ static struct pci_driver radeon_kms_pci_driver = {
 
 static int __init radeon_init(void)
 {
+#ifdef CONFIG_VGA_CONSOLE
+       if (vgacon_text_force() && radeon_modeset == -1) {
+               DRM_INFO("VGACON disable radeon kernel modesetting.\n");
+               radeon_modeset = 0;
+       }
+#endif
+       /* set to modesetting by default if not nomodeset */
+       if (radeon_modeset == -1)
+               radeon_modeset = 1;
+
        if (radeon_modeset == 1) {
                DRM_INFO("radeon kernel modesetting enabled.\n");
                driver = &kms_driver;
index 2d91123f2759dd3556310581dda6fa188988e958..36e9803b077d8a5a50a54e0d9e1e70f16b691db7 100644 (file)
@@ -92,6 +92,7 @@ enum radeon_family {
        CHIP_PITCAIRN,
        CHIP_VERDE,
        CHIP_OLAND,
+       CHIP_HAINAN,
        CHIP_LAST,
 };
 
index 6857cb4efb768c932f23d4645c3969fd9c792f18..7cb178a34a0f18c0b50c7cd48a3e151f5ce05b4b 100644 (file)
@@ -1031,11 +1031,9 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
 
 static void radeon_crtc_prepare(struct drm_crtc *crtc)
 {
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct drm_crtc *crtci;
 
-       radeon_crtc->in_mode_set = true;
        /*
        * The hardware wedges sometimes if you reconfigure one CRTC
        * whilst another is running (see fdo bug #24611).
@@ -1046,7 +1044,6 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc)
 
 static void radeon_crtc_commit(struct drm_crtc *crtc)
 {
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct drm_crtc *crtci;
 
@@ -1057,7 +1054,6 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
                if (crtci->enabled)
                        radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
        }
-       radeon_crtc->in_mode_set = false;
 }
 
 static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
index 44e579e75fd0950e6a644d5be110687fbab0efae..69ad4fe224c1951eb370f6396b257b85dda7e4da 100644 (file)
@@ -302,7 +302,6 @@ struct radeon_crtc {
        u16 lut_r[256], lut_g[256], lut_b[256];
        bool enabled;
        bool can_tile;
-       bool in_mode_set;
        uint32_t crtc_offset;
        struct drm_gem_object *cursor_bo;
        uint64_t cursor_addr;
index 93f760e27a9200a81b94dfee8ce6d14ac935b49a..6c0ce8915fac9efc399654a8b25e4bd180285d0a 100644 (file)
@@ -726,7 +726,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
                return r;
        }
        DRM_INFO("radeon: %uM of VRAM memory ready\n",
-                (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
+                (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
        r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
                                rdev->mc.gtt_size >> PAGE_SHIFT);
        if (r) {
index 83f612a9500ba288269241f4b17fafa26dc9841a..08aef24afe401601070c52800c7d8551f2c5b225 100644 (file)
@@ -862,10 +862,8 @@ int rv770_uvd_resume(struct radeon_device *rdev)
                chip_id = 0x0100000b;
                break;
        case CHIP_SUMO:
-               chip_id = 0x0100000c;
-               break;
        case CHIP_SUMO2:
-               chip_id = 0x0100000d;
+               chip_id = 0x0100000c;
                break;
        case CHIP_PALM:
                chip_id = 0x0100000e;
@@ -2113,8 +2111,6 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
 {
        u32 link_width_cntl, lanes, speed_cntl, tmp;
        u16 link_cntl2;
-       u32 mask;
-       int ret;
 
        if (radeon_pcie_gen2 == 0)
                return;
@@ -2129,11 +2125,8 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
        if (ASIC_IS_X2(rdev))
                return;
 
-       ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
-       if (ret != 0)
-               return;
-
-       if (!(mask & DRM_PCIE_SPEED_50))
+       if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+               (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
                return;
 
        DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
index f0b6c2f87c4d70fd752d41918a792a43b6b93cac..d1ba9d88f311026cd06078432ce806b012742897 100644 (file)
@@ -60,6 +60,11 @@ MODULE_FIRMWARE("radeon/OLAND_me.bin");
 MODULE_FIRMWARE("radeon/OLAND_ce.bin");
 MODULE_FIRMWARE("radeon/OLAND_mc.bin");
 MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
+MODULE_FIRMWARE("radeon/HAINAN_me.bin");
+MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
+MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
 
 extern int r600_ih_ring_alloc(struct radeon_device *rdev);
 extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -265,6 +270,40 @@ static const u32 oland_golden_registers[] =
        0x15c0, 0x000c0fc0, 0x000c0400
 };
 
+static const u32 hainan_golden_registers[] =
+{
+       0x9a10, 0x00010000, 0x00018208,
+       0x9830, 0xffffffff, 0x00000000,
+       0x9834, 0xf00fffff, 0x00000400,
+       0x9838, 0x0002021c, 0x00020200,
+       0xd0c0, 0xff000fff, 0x00000100,
+       0xd030, 0x000300c0, 0x00800040,
+       0xd8c0, 0xff000fff, 0x00000100,
+       0xd830, 0x000300c0, 0x00800040,
+       0x2ae4, 0x00073ffe, 0x000022a2,
+       0x240c, 0x000007ff, 0x00000000,
+       0x8a14, 0xf000001f, 0x00000007,
+       0x8b24, 0xffffffff, 0x00ffffff,
+       0x8b10, 0x0000ff0f, 0x00000000,
+       0x28a4c, 0x07ffffff, 0x4e000000,
+       0x28350, 0x3f3f3fff, 0x00000000,
+       0x30, 0x000000ff, 0x0040,
+       0x34, 0x00000040, 0x00004040,
+       0x9100, 0x03e00000, 0x03600000,
+       0x9060, 0x0000007f, 0x00000020,
+       0x9508, 0x00010000, 0x00010000,
+       0xac14, 0x000003ff, 0x000000f1,
+       0xac10, 0xffffffff, 0x00000000,
+       0xac0c, 0xffffffff, 0x00003210,
+       0x88d4, 0x0000001f, 0x00000010,
+       0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers2[] =
+{
+       0x98f8, 0xffffffff, 0x02010001
+};
+
 static const u32 tahiti_mgcg_cgcg_init[] =
 {
        0xc400, 0xffffffff, 0xfffffffc,
@@ -673,6 +712,83 @@ static const u32 oland_mgcg_cgcg_init[] =
        0xd8c0, 0xfffffff0, 0x00000100
 };
 
+static const u32 hainan_mgcg_cgcg_init[] =
+{
+       0xc400, 0xffffffff, 0xfffffffc,
+       0x802c, 0xffffffff, 0xe0000000,
+       0x9a60, 0xffffffff, 0x00000100,
+       0x92a4, 0xffffffff, 0x00000100,
+       0xc164, 0xffffffff, 0x00000100,
+       0x9774, 0xffffffff, 0x00000100,
+       0x8984, 0xffffffff, 0x06000100,
+       0x8a18, 0xffffffff, 0x00000100,
+       0x92a0, 0xffffffff, 0x00000100,
+       0xc380, 0xffffffff, 0x00000100,
+       0x8b28, 0xffffffff, 0x00000100,
+       0x9144, 0xffffffff, 0x00000100,
+       0x8d88, 0xffffffff, 0x00000100,
+       0x8d8c, 0xffffffff, 0x00000100,
+       0x9030, 0xffffffff, 0x00000100,
+       0x9034, 0xffffffff, 0x00000100,
+       0x9038, 0xffffffff, 0x00000100,
+       0x903c, 0xffffffff, 0x00000100,
+       0xad80, 0xffffffff, 0x00000100,
+       0xac54, 0xffffffff, 0x00000100,
+       0x897c, 0xffffffff, 0x06000100,
+       0x9868, 0xffffffff, 0x00000100,
+       0x9510, 0xffffffff, 0x00000100,
+       0xaf04, 0xffffffff, 0x00000100,
+       0xae04, 0xffffffff, 0x00000100,
+       0x949c, 0xffffffff, 0x00000100,
+       0x802c, 0xffffffff, 0xe0000000,
+       0x9160, 0xffffffff, 0x00010000,
+       0x9164, 0xffffffff, 0x00030002,
+       0x9168, 0xffffffff, 0x00040007,
+       0x916c, 0xffffffff, 0x00060005,
+       0x9170, 0xffffffff, 0x00090008,
+       0x9174, 0xffffffff, 0x00020001,
+       0x9178, 0xffffffff, 0x00040003,
+       0x917c, 0xffffffff, 0x00000007,
+       0x9180, 0xffffffff, 0x00060005,
+       0x9184, 0xffffffff, 0x00090008,
+       0x9188, 0xffffffff, 0x00030002,
+       0x918c, 0xffffffff, 0x00050004,
+       0x9190, 0xffffffff, 0x00000008,
+       0x9194, 0xffffffff, 0x00070006,
+       0x9198, 0xffffffff, 0x000a0009,
+       0x919c, 0xffffffff, 0x00040003,
+       0x91a0, 0xffffffff, 0x00060005,
+       0x91a4, 0xffffffff, 0x00000009,
+       0x91a8, 0xffffffff, 0x00080007,
+       0x91ac, 0xffffffff, 0x000b000a,
+       0x91b0, 0xffffffff, 0x00050004,
+       0x91b4, 0xffffffff, 0x00070006,
+       0x91b8, 0xffffffff, 0x0008000b,
+       0x91bc, 0xffffffff, 0x000a0009,
+       0x91c0, 0xffffffff, 0x000d000c,
+       0x91c4, 0xffffffff, 0x00060005,
+       0x91c8, 0xffffffff, 0x00080007,
+       0x91cc, 0xffffffff, 0x0000000b,
+       0x91d0, 0xffffffff, 0x000a0009,
+       0x91d4, 0xffffffff, 0x000d000c,
+       0x9150, 0xffffffff, 0x96940200,
+       0x8708, 0xffffffff, 0x00900100,
+       0xc478, 0xffffffff, 0x00000080,
+       0xc404, 0xffffffff, 0x0020003f,
+       0x30, 0xffffffff, 0x0000001c,
+       0x34, 0x000f0000, 0x000f0000,
+       0x160c, 0xffffffff, 0x00000100,
+       0x1024, 0xffffffff, 0x00000100,
+       0x20a8, 0xffffffff, 0x00000104,
+       0x264c, 0x000c0000, 0x000c0000,
+       0x2648, 0x000c0000, 0x000c0000,
+       0x2f50, 0x00000001, 0x00000001,
+       0x30cc, 0xc0000fff, 0x00000104,
+       0xc1e4, 0x00000001, 0x00000001,
+       0xd0c0, 0xfffffff0, 0x00000100,
+       0xd8c0, 0xfffffff0, 0x00000100
+};
+
 static u32 verde_pg_init[] =
 {
        0x353c, 0xffffffff, 0x40000,
@@ -853,6 +969,17 @@ static void si_init_golden_registers(struct radeon_device *rdev)
                                                 oland_mgcg_cgcg_init,
                                                 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
                break;
+       case CHIP_HAINAN:
+               radeon_program_register_sequence(rdev,
+                                                hainan_golden_registers,
+                                                (const u32)ARRAY_SIZE(hainan_golden_registers));
+               radeon_program_register_sequence(rdev,
+                                                hainan_golden_registers2,
+                                                (const u32)ARRAY_SIZE(hainan_golden_registers2));
+               radeon_program_register_sequence(rdev,
+                                                hainan_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+               break;
        default:
                break;
        }
@@ -1062,6 +1189,45 @@ static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
        {0x0000009f, 0x00a17730}
 };
 
+static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+       {0x0000006f, 0x03044000},
+       {0x00000070, 0x0480c018},
+       {0x00000071, 0x00000040},
+       {0x00000072, 0x01000000},
+       {0x00000074, 0x000000ff},
+       {0x00000075, 0x00143400},
+       {0x00000076, 0x08ec0800},
+       {0x00000077, 0x040000cc},
+       {0x00000079, 0x00000000},
+       {0x0000007a, 0x21000409},
+       {0x0000007c, 0x00000000},
+       {0x0000007d, 0xe8000000},
+       {0x0000007e, 0x044408a8},
+       {0x0000007f, 0x00000003},
+       {0x00000080, 0x00000000},
+       {0x00000081, 0x01000000},
+       {0x00000082, 0x02000000},
+       {0x00000083, 0x00000000},
+       {0x00000084, 0xe3f3e4f4},
+       {0x00000085, 0x00052024},
+       {0x00000087, 0x00000000},
+       {0x00000088, 0x66036603},
+       {0x00000089, 0x01000000},
+       {0x0000008b, 0x1c0a0000},
+       {0x0000008c, 0xff010000},
+       {0x0000008e, 0xffffefff},
+       {0x0000008f, 0xfff3efff},
+       {0x00000090, 0xfff3efbf},
+       {0x00000094, 0x00101101},
+       {0x00000095, 0x00000fff},
+       {0x00000096, 0x00116fff},
+       {0x00000097, 0x60010000},
+       {0x00000098, 0x10010000},
+       {0x00000099, 0x00006000},
+       {0x0000009a, 0x00001000},
+       {0x0000009f, 0x00a07730}
+};
+
 /* ucode loading */
 static int si_mc_load_microcode(struct radeon_device *rdev)
 {
@@ -1095,6 +1261,11 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
                ucode_size = OLAND_MC_UCODE_SIZE;
                regs_size = TAHITI_IO_MC_REGS_SIZE;
                break;
+       case CHIP_HAINAN:
+               io_mc_regs = (u32 *)&hainan_io_mc_regs;
+               ucode_size = OLAND_MC_UCODE_SIZE;
+               regs_size = TAHITI_IO_MC_REGS_SIZE;
+               break;
        }
 
        running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1198,6 +1369,15 @@ static int si_init_microcode(struct radeon_device *rdev)
                rlc_req_size = SI_RLC_UCODE_SIZE * 4;
                mc_req_size = OLAND_MC_UCODE_SIZE * 4;
                break;
+       case CHIP_HAINAN:
+               chip_name = "HAINAN";
+               rlc_chip_name = "HAINAN";
+               pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+               me_req_size = SI_PM4_UCODE_SIZE * 4;
+               ce_req_size = SI_CE_UCODE_SIZE * 4;
+               rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+               mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+               break;
        default: BUG();
        }
 
@@ -2003,7 +2183,8 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
                        WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                }
        } else if ((rdev->family == CHIP_VERDE) ||
-                  (rdev->family == CHIP_OLAND)) {
+                  (rdev->family == CHIP_OLAND) ||
+                  (rdev->family == CHIP_HAINAN)) {
                for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
                        switch (reg_offset) {
                        case 0:  /* non-AA compressed depth or any compressed stencil */
@@ -2435,7 +2616,7 @@ static void si_gpu_init(struct radeon_device *rdev)
        default:
                rdev->config.si.max_shader_engines = 1;
                rdev->config.si.max_tile_pipes = 4;
-               rdev->config.si.max_cu_per_sh = 2;
+               rdev->config.si.max_cu_per_sh = 5;
                rdev->config.si.max_sh_per_se = 2;
                rdev->config.si.max_backends_per_se = 4;
                rdev->config.si.max_texture_channel_caches = 4;
@@ -2466,6 +2647,23 @@ static void si_gpu_init(struct radeon_device *rdev)
                rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
                gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
                break;
+       case CHIP_HAINAN:
+               rdev->config.si.max_shader_engines = 1;
+               rdev->config.si.max_tile_pipes = 4;
+               rdev->config.si.max_cu_per_sh = 5;
+               rdev->config.si.max_sh_per_se = 1;
+               rdev->config.si.max_backends_per_se = 1;
+               rdev->config.si.max_texture_channel_caches = 2;
+               rdev->config.si.max_gprs = 256;
+               rdev->config.si.max_gs_threads = 16;
+               rdev->config.si.max_hw_contexts = 8;
+
+               rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+               rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+               rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+               rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
+               break;
        }
 
        /* Initialize HDP */
@@ -2559,9 +2757,11 @@ static void si_gpu_init(struct radeon_device *rdev)
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
        WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
        WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
-       WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
-       WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
-       WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+       if (rdev->has_uvd) {
+               WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+               WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+               WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+       }
 
        si_tiling_mode_table_init(rdev);
 
@@ -3304,8 +3504,9 @@ static void si_mc_program(struct radeon_device *rdev)
        if (radeon_mc_wait_for_idle(rdev)) {
                dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
        }
-       /* Lockout access through VGA aperture*/
-       WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+       if (!ASIC_IS_NODCE(rdev))
+               /* Lockout access through VGA aperture*/
+               WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
        /* Update configuration */
        WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
               rdev->mc.vram_start >> 12);
@@ -3327,9 +3528,11 @@ static void si_mc_program(struct radeon_device *rdev)
                dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
        }
        evergreen_mc_resume(rdev, &save);
-       /* we need to own VRAM, so turn off the VGA renderer here
-        * to stop it overwriting our objects */
-       rv515_vga_render_disable(rdev);
+       if (!ASIC_IS_NODCE(rdev)) {
+               /* we need to own VRAM, so turn off the VGA renderer here
+                * to stop it overwriting our objects */
+               rv515_vga_render_disable(rdev);
+       }
 }
 
 static void si_vram_gtt_location(struct radeon_device *rdev,
@@ -3397,8 +3600,8 @@ static int si_mc_init(struct radeon_device *rdev)
        rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
        rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
        /* size in MB on si */
-       rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
-       rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+       rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+       rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
        si_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
@@ -4251,8 +4454,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
        tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
        WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
        WREG32(GRBM_INT_CNTL, 0);
-       WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
-       WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+       if (rdev->num_crtc >= 2) {
+               WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+               WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+       }
        if (rdev->num_crtc >= 4) {
                WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
                WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
@@ -4262,8 +4467,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
                WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
        }
 
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+       if (rdev->num_crtc >= 2) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+       }
        if (rdev->num_crtc >= 4) {
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
@@ -4273,21 +4480,22 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
        }
 
-       WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
-
-       tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-       WREG32(DC_HPD1_INT_CONTROL, tmp);
-       tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-       WREG32(DC_HPD2_INT_CONTROL, tmp);
-       tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-       WREG32(DC_HPD3_INT_CONTROL, tmp);
-       tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-       WREG32(DC_HPD4_INT_CONTROL, tmp);
-       tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-       WREG32(DC_HPD5_INT_CONTROL, tmp);
-       tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
-       WREG32(DC_HPD6_INT_CONTROL, tmp);
+       if (!ASIC_IS_NODCE(rdev)) {
+               WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
 
+               tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD1_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD2_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD3_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD4_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD5_INT_CONTROL, tmp);
+               tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD6_INT_CONTROL, tmp);
+       }
 }
 
 static int si_irq_init(struct radeon_device *rdev)
@@ -4366,7 +4574,7 @@ int si_irq_set(struct radeon_device *rdev)
        u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
        u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
        u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
-       u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+       u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
        u32 grbm_int_cntl = 0;
        u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
        u32 dma_cntl, dma_cntl1;
@@ -4383,12 +4591,14 @@ int si_irq_set(struct radeon_device *rdev)
                return 0;
        }
 
-       hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
-       hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
-       hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
-       hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
-       hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
-       hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+       if (!ASIC_IS_NODCE(rdev)) {
+               hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+               hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+       }
 
        dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
        dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -4479,8 +4689,10 @@ int si_irq_set(struct radeon_device *rdev)
 
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
-       WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
-       WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+       if (rdev->num_crtc >= 2) {
+               WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+               WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+       }
        if (rdev->num_crtc >= 4) {
                WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
                WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
@@ -4490,8 +4702,10 @@ int si_irq_set(struct radeon_device *rdev)
                WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
        }
 
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
-       WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+       if (rdev->num_crtc >= 2) {
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+               WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+       }
        if (rdev->num_crtc >= 4) {
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
@@ -4501,12 +4715,14 @@ int si_irq_set(struct radeon_device *rdev)
                WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
        }
 
-       WREG32(DC_HPD1_INT_CONTROL, hpd1);
-       WREG32(DC_HPD2_INT_CONTROL, hpd2);
-       WREG32(DC_HPD3_INT_CONTROL, hpd3);
-       WREG32(DC_HPD4_INT_CONTROL, hpd4);
-       WREG32(DC_HPD5_INT_CONTROL, hpd5);
-       WREG32(DC_HPD6_INT_CONTROL, hpd6);
+       if (!ASIC_IS_NODCE(rdev)) {
+               WREG32(DC_HPD1_INT_CONTROL, hpd1);
+               WREG32(DC_HPD2_INT_CONTROL, hpd2);
+               WREG32(DC_HPD3_INT_CONTROL, hpd3);
+               WREG32(DC_HPD4_INT_CONTROL, hpd4);
+               WREG32(DC_HPD5_INT_CONTROL, hpd5);
+               WREG32(DC_HPD6_INT_CONTROL, hpd6);
+       }
 
        return 0;
 }
@@ -4515,6 +4731,9 @@ static inline void si_irq_ack(struct radeon_device *rdev)
 {
        u32 tmp;
 
+       if (ASIC_IS_NODCE(rdev))
+               return;
+
        rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
        rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
        rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
@@ -5118,15 +5337,17 @@ static int si_startup(struct radeon_device *rdev)
                return r;
        }
 
-       r = rv770_uvd_resume(rdev);
-       if (!r) {
-               r = radeon_fence_driver_start_ring(rdev,
-                                                  R600_RING_TYPE_UVD_INDEX);
+       if (rdev->has_uvd) {
+               r = rv770_uvd_resume(rdev);
+               if (!r) {
+                       r = radeon_fence_driver_start_ring(rdev,
+                                                          R600_RING_TYPE_UVD_INDEX);
+                       if (r)
+                               dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+               }
                if (r)
-                       dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+                       rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
        }
-       if (r)
-               rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
 
        /* Enable IRQ */
        r = si_irq_init(rdev);
@@ -5185,16 +5406,18 @@ static int si_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
-       ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-       if (ring->ring_size) {
-               r = radeon_ring_init(rdev, ring, ring->ring_size,
-                                    R600_WB_UVD_RPTR_OFFSET,
-                                    UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
-                                    0, 0xfffff, RADEON_CP_PACKET2);
-               if (!r)
-                       r = r600_uvd_init(rdev);
-               if (r)
-                       DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+       if (rdev->has_uvd) {
+               ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+               if (ring->ring_size) {
+                       r = radeon_ring_init(rdev, ring, ring->ring_size,
+                                            R600_WB_UVD_RPTR_OFFSET,
+                                            UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+                                            0, 0xfffff, RADEON_CP_PACKET2);
+                       if (!r)
+                               r = r600_uvd_init(rdev);
+                       if (r)
+                               DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+               }
        }
 
        r = radeon_ib_pool_init(rdev);
@@ -5243,8 +5466,10 @@ int si_suspend(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        si_cp_enable(rdev, false);
        cayman_dma_stop(rdev);
-       r600_uvd_rbc_stop(rdev);
-       radeon_uvd_suspend(rdev);
+       if (rdev->has_uvd) {
+               r600_uvd_rbc_stop(rdev);
+               radeon_uvd_suspend(rdev);
+       }
        si_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        si_pcie_gart_disable(rdev);
@@ -5332,11 +5557,13 @@ int si_init(struct radeon_device *rdev)
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 64 * 1024);
 
-       r = radeon_uvd_init(rdev);
-       if (!r) {
-               ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-               ring->ring_obj = NULL;
-               r600_ring_init(rdev, ring, 4096);
+       if (rdev->has_uvd) {
+               r = radeon_uvd_init(rdev);
+               if (!r) {
+                       ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+                       ring->ring_obj = NULL;
+                       r600_ring_init(rdev, ring, 4096);
+               }
        }
 
        rdev->ih.ring_obj = NULL;
@@ -5384,7 +5611,8 @@ void si_fini(struct radeon_device *rdev)
        radeon_vm_manager_fini(rdev);
        radeon_ib_pool_fini(rdev);
        radeon_irq_kms_fini(rdev);
-       radeon_uvd_fini(rdev);
+       if (rdev->has_uvd)
+               radeon_uvd_fini(rdev);
        si_pcie_gart_fini(rdev);
        r600_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
index 222877ba6cf5b3f9e8deb0738b15d5662aca0cf0..8f2d7d4f9b282e05f7d1e4f2e0b9d2e5325967c4 100644 (file)
@@ -28,6 +28,7 @@
 
 #define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
 #define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN        0x02010001
 
 /* discrete uvd clocks */
 #define        CG_UPLL_FUNC_CNTL                               0x634
index 7dff49ed66e763c4f0214a4a52ef0864f798b99f..99e2034e49ccf0687e4096e5912b025e6dc3df4f 100644 (file)
@@ -451,27 +451,16 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
 {
        struct drm_pending_vblank_event *event;
        struct drm_device *dev = scrtc->crtc.dev;
-       struct timeval vblanktime;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->event_lock, flags);
        event = scrtc->event;
        scrtc->event = NULL;
+       if (event) {
+               drm_send_vblank_event(dev, 0, event);
+               drm_vblank_put(dev, 0);
+       }
        spin_unlock_irqrestore(&dev->event_lock, flags);
-
-       if (event == NULL)
-               return;
-
-       event->event.sequence = drm_vblank_count_and_time(dev, 0, &vblanktime);
-       event->event.tv_sec = vblanktime.tv_sec;
-       event->event.tv_usec = vblanktime.tv_usec;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-       list_add_tail(&event->base.link, &event->base.file_priv->event_list);
-       wake_up_interruptible(&event->base.file_priv->event_wait);
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-
-       drm_vblank_put(dev, 0);
 }
 
 static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
index 1e2060324f02fd77cbbb3c4d5e2081816cbd0a53..8c04943f82e3531d75ee5565eab06c16e6f46b16 100644 (file)
@@ -1128,11 +1128,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
                return err;
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!regs) {
-               dev_err(&pdev->dev, "failed to get registers\n");
-               return -ENXIO;
-       }
-
        dc->regs = devm_ioremap_resource(&pdev->dev, regs);
        if (IS_ERR(dc->regs))
                return PTR_ERR(dc->regs);
index 6961bbeab3edb0c9a75767865c87bc2a6a6b4386..264f550999406f7b924be6634917e3826e80a161 100644 (file)
@@ -1685,6 +1685,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
@@ -2341,7 +2342,7 @@ struct hid_device *hid_allocate_device(void)
 
        init_waitqueue_head(&hdev->debug_wait);
        INIT_LIST_HEAD(&hdev->debug_list);
-       mutex_init(&hdev->debug_list_lock);
+       spin_lock_init(&hdev->debug_list_lock);
        sema_init(&hdev->driver_lock, 1);
        sema_init(&hdev->driver_input_lock, 1);
 
index 7e56cb3855e329696abea711445bffe588d101f6..8453214ec3767d6c55aefe2bb99466d426904403 100644 (file)
@@ -579,15 +579,16 @@ void hid_debug_event(struct hid_device *hdev, char *buf)
 {
        int i;
        struct hid_debug_list *list;
+       unsigned long flags;
 
-       mutex_lock(&hdev->debug_list_lock);
+       spin_lock_irqsave(&hdev->debug_list_lock, flags);
        list_for_each_entry(list, &hdev->debug_list, node) {
                for (i = 0; i < strlen(buf); i++)
                        list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
                                buf[i];
                list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
         }
-       mutex_unlock(&hdev->debug_list_lock);
+       spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
 
        wake_up_interruptible(&hdev->debug_wait);
 }
@@ -977,6 +978,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
 {
        int err = 0;
        struct hid_debug_list *list;
+       unsigned long flags;
 
        if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) {
                err = -ENOMEM;
@@ -992,9 +994,9 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
        file->private_data = list;
        mutex_init(&list->read_mutex);
 
-       mutex_lock(&list->hdev->debug_list_lock);
+       spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
        list_add_tail(&list->node, &list->hdev->debug_list);
-       mutex_unlock(&list->hdev->debug_list_lock);
+       spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
 
 out:
        return err;
@@ -1088,10 +1090,11 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
 static int hid_debug_events_release(struct inode *inode, struct file *file)
 {
        struct hid_debug_list *list = file->private_data;
+       unsigned long flags;
 
-       mutex_lock(&list->hdev->debug_list_lock);
+       spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
        list_del(&list->node);
-       mutex_unlock(&list->hdev->debug_list_lock);
+       spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
        kfree(list->hid_debug_buf);
        kfree(list);
 
index 9b0efb0083feaf4a1f258226aad0426c7b961721..d1649119211277275840a1bee984ed93de28c025 100644 (file)
@@ -18,7 +18,8 @@
 
 #include "hid-ids.h"
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+    (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
 #define SRWS1_NUMBER_LEDS 15
 struct steelseries_srws1_data {
        __u16 led_state;
@@ -107,7 +108,8 @@ static __u8 steelseries_srws1_rdesc_fixed[] = {
 0xC0                /*  End Collection                      */
 };
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+    (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
 static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds)
 {
        struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
@@ -370,7 +372,8 @@ MODULE_DEVICE_TABLE(hid, steelseries_srws1_devices);
 static struct hid_driver steelseries_srws1_driver = {
        .name = "steelseries_srws1",
        .id_table = steelseries_srws1_devices,
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+    (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
        .probe = steelseries_srws1_probe,
        .remove = steelseries_srws1_remove,
 #endif
index bad8128b283a8752a11e1c3a13d02c83bb1df07b..21ef68934a20bba366c1ea1c5be9705f0fafe6fa 100644 (file)
@@ -329,7 +329,7 @@ static u32 get_vp_index(uuid_le *type_guid)
                return 0;
        }
        cur_cpu = (++next_vp % max_cpus);
-       return cur_cpu;
+       return hv_context.vp_index[cur_cpu];
 }
 
 /*
index df0b69987914d0c2241d746956db3d9d4e02a351..2ebd6ce46108ec4e44797cb695194672ad81bdac 100644 (file)
@@ -1414,14 +1414,18 @@ static int abituguru_probe(struct platform_device *pdev)
        pr_info("found Abit uGuru\n");
 
        /* Register sysfs hooks */
-       for (i = 0; i < sysfs_attr_i; i++)
-               if (device_create_file(&pdev->dev,
-                               &data->sysfs_attr[i].dev_attr))
+       for (i = 0; i < sysfs_attr_i; i++) {
+               res = device_create_file(&pdev->dev,
+                                        &data->sysfs_attr[i].dev_attr);
+               if (res)
                        goto abituguru_probe_error;
-       for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
-               if (device_create_file(&pdev->dev,
-                               &abituguru_sysfs_attr[i].dev_attr))
+       }
+       for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) {
+               res = device_create_file(&pdev->dev,
+                                        &abituguru_sysfs_attr[i].dev_attr);
+               if (res)
                        goto abituguru_probe_error;
+       }
 
        data->hwmon_dev = hwmon_device_register(&pdev->dev);
        if (!IS_ERR(data->hwmon_dev))
index aafa4531b9614e4b068c9b91698927272c7dc588..52b77afebde120a17ce75bf6401103cad556d021 100644 (file)
@@ -84,8 +84,10 @@ static int iio_hwmon_probe(struct platform_device *pdev)
                return PTR_ERR(channels);
 
        st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
-       if (st == NULL)
-               return -ENOMEM;
+       if (st == NULL) {
+               ret = -ENOMEM;
+               goto error_release_channels;
+       }
 
        st->channels = channels;
 
@@ -159,7 +161,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
 error_remove_group:
        sysfs_remove_group(&dev->kobj, &st->attr_group);
 error_release_channels:
-       iio_channel_release_all(st->channels);
+       iio_channel_release_all(channels);
        return ret;
 }
 
index f43f5e571db97a9b178f76a88ef82e8414c9ff39..04638aee90398f44e73a620e3a3cffc9631b8d40 100644 (file)
@@ -3705,8 +3705,10 @@ static int nct6775_probe(struct platform_device *pdev)
                        data->have_temp |= 1 << i;
                        data->have_temp_fixed |= 1 << i;
                        data->reg_temp[0][i] = reg_temp_alternate[i];
-                       data->reg_temp[1][i] = reg_temp_over[i];
-                       data->reg_temp[2][i] = reg_temp_hyst[i];
+                       if (i < num_reg_temp) {
+                               data->reg_temp[1][i] = reg_temp_over[i];
+                               data->reg_temp[2][i] = reg_temp_hyst[i];
+                       }
                        data->temp_src[i] = i + 1;
                        continue;
                }
index a478454f690fbc318aefa16b8952040944692149..dfe6d9527efb4b6489676468d495e56e7841b2a9 100644 (file)
@@ -240,7 +240,7 @@ static struct tmp401_data *tmp401_update_device(struct device *dev)
        mutex_lock(&data->update_lock);
 
        next_update = data->last_updated +
-                     msecs_to_jiffies(data->update_interval) + 1;
+                     msecs_to_jiffies(data->update_interval);
        if (time_after(jiffies, next_update) || !data->valid) {
                if (data->kind != tmp432) {
                        /*
index 21fbb340ad6603a249d052d8f7accb6aed0f7753..c41ca6354fc59d8d2111dfdbbb47bdab87f725a8 100644 (file)
@@ -383,7 +383,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
        /* Enable the adapter */
        __i2c_dw_enable(dev, true);
 
-       /* Enable interrupts */
+       /* Clear and enable interrupts */
+       i2c_dw_clear_int(dev);
        dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
 }
 
@@ -448,8 +449,14 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
                                cmd |= BIT(9);
 
                        if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
+
+                               /* avoid rx buffer overrun */
+                               if (rx_limit - dev->rx_outstanding <= 0)
+                                       break;
+
                                dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
                                rx_limit--;
+                               dev->rx_outstanding++;
                        } else
                                dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
                        tx_limit--; buf_len--;
@@ -502,8 +509,10 @@ i2c_dw_read(struct dw_i2c_dev *dev)
 
                rx_valid = dw_readl(dev, DW_IC_RXFLR);
 
-               for (; len > 0 && rx_valid > 0; len--, rx_valid--)
+               for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
                        *buf++ = dw_readl(dev, DW_IC_DATA_CMD);
+                       dev->rx_outstanding--;
+               }
 
                if (len > 0) {
                        dev->status |= STATUS_READ_IN_PROGRESS;
@@ -561,6 +570,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        dev->msg_err = 0;
        dev->status = STATUS_IDLE;
        dev->abort_source = 0;
+       dev->rx_outstanding = 0;
 
        ret = i2c_dw_wait_bus_not_busy(dev);
        if (ret < 0)
index 9c1840ee09c7a4fde94a468a5ff339bdb42ae2ac..e761ad18dd61b8888e77a2989b772f79431d8b26 100644 (file)
@@ -60,6 +60,7 @@
  * @adapter: i2c subsystem adapter node
  * @tx_fifo_depth: depth of the hardware tx fifo
  * @rx_fifo_depth: depth of the hardware rx fifo
+ * @rx_outstanding: current master-rx elements in tx fifo
  */
 struct dw_i2c_dev {
        struct device           *dev;
@@ -88,6 +89,7 @@ struct dw_i2c_dev {
        u32                     master_cfg;
        unsigned int            tx_fifo_depth;
        unsigned int            rx_fifo_depth;
+       int                     rx_outstanding;
 };
 
 #define ACCESS_SWAP            0x00000001
index 8ec91335d95a52c679193cabbd4a522cbb3851bc..35b70a1edf572bedda30222ec8c9d49693491314 100644 (file)
@@ -69,6 +69,7 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
 static const struct acpi_device_id dw_i2c_acpi_match[] = {
        { "INT33C2", 0 },
        { "INT33C3", 0 },
+       { "80860F41", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
index e1cf2e0e1f23de900e92c1839d8c6e856658ea37..3a6903f639137af09d28ab623c345d01812bd0e7 100644 (file)
@@ -231,7 +231,11 @@ static const char *i801_feature_names[] = {
 
 static unsigned int disable_features;
 module_param(disable_features, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(disable_features, "Disable selected driver features");
+MODULE_PARM_DESC(disable_features, "Disable selected driver features:\n"
+       "\t\t  0x01  disable SMBus PEC\n"
+       "\t\t  0x02  disable the block buffer\n"
+       "\t\t  0x08  disable the I2C block read functionality\n"
+       "\t\t  0x10  don't use interrupts ");
 
 /* Make sure the SMBus host is ready to start transmitting.
    Return 0 if it is, -EBUSY if it is not. */
index 3bbd65d35a5e05365361a4d7711e41de99fa8a31..1a3abd6a0bfcb7166bc7cfd7a9cb7be15016e48e 100644 (file)
@@ -252,7 +252,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
                writel(drv_data->cntl_bits,
                        drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
                drv_data->block = 0;
-               wake_up_interruptible(&drv_data->waitq);
+               wake_up(&drv_data->waitq);
                break;
 
        case MV64XXX_I2C_ACTION_CONTINUE:
@@ -300,7 +300,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
                writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
                        drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
                drv_data->block = 0;
-               wake_up_interruptible(&drv_data->waitq);
+               wake_up(&drv_data->waitq);
                break;
 
        case MV64XXX_I2C_ACTION_INVALID:
@@ -315,7 +315,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
                writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
                        drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
                drv_data->block = 0;
-               wake_up_interruptible(&drv_data->waitq);
+               wake_up(&drv_data->waitq);
                break;
        }
 }
@@ -381,7 +381,7 @@ mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data)
        unsigned long   flags;
        char            abort = 0;
 
-       time_left = wait_event_interruptible_timeout(drv_data->waitq,
+       time_left = wait_event_timeout(drv_data->waitq,
                !drv_data->block, drv_data->adapter.timeout);
 
        spin_lock_irqsave(&drv_data->lock, flags);
index 6e8ee92ab55398aec8f5517334166c38e9eb0ba8..cab1c91b75a3a8e300057ef2aaf3e98b6af999d4 100644 (file)
@@ -1082,11 +1082,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
        /* map the registers */
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res == NULL) {
-               dev_err(&pdev->dev, "cannot find IO resource\n");
-               return -ENOENT;
-       }
-
        i2c->regs = devm_ioremap_resource(&pdev->dev, res);
 
        if (IS_ERR(i2c->regs))
index 5a7ad240bd264bd53764f3053acab0e7127a28b7..a63c7d50683676f5e01969573ffce373b3ce578f 100644 (file)
@@ -303,12 +303,6 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
        adap->class = I2C_CLASS_HWMON;
 
        mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (mem_res == NULL) {
-               dev_err(&pdev->dev, "Unable to get MEM resource\n");
-               err = -EINVAL;
-               goto out;
-       }
-
        siic->base = devm_ioremap_resource(&pdev->dev, mem_res);
        if (IS_ERR(siic->base)) {
                err = PTR_ERR(siic->base);
index b60ff90adc39a92ed3940a53076d53cf6f7ae4f6..9aa1b60f7fdd86581e2f694d96a2834e0ecad055 100644 (file)
@@ -714,11 +714,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
        int ret = 0;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no mem resource\n");
-               return -EINVAL;
-       }
-
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
index 6b63cc7eb71e38ff31253c0e6af496b3b3b0bccb..48e31ed69dbf159f5e44f2ab2a4968896320c3eb 100644 (file)
@@ -892,7 +892,8 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
 }
 
 static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
-static DEVICE_ATTR(delete_device, S_IWUSR, NULL, i2c_sysfs_delete_device);
+static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
+                                  i2c_sysfs_delete_device);
 
 static struct attribute *i2c_adapter_attrs[] = {
        &dev_attr_name.attr,
index 0e8fab1913dfd8737ede3b7d9f332aee0861169e..fa6964d8681a0d126fcf7c4845896b3f06298f8f 100644 (file)
@@ -272,6 +272,27 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
                .exit_latency = 166,
                .target_residency = 500,
                .enter = &intel_idle },
+       {
+               .name = "C8-HSW",
+               .desc = "MWAIT 0x40",
+               .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 300,
+               .target_residency = 900,
+               .enter = &intel_idle },
+       {
+               .name = "C9-HSW",
+               .desc = "MWAIT 0x50",
+               .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 600,
+               .target_residency = 1800,
+               .enter = &intel_idle },
+       {
+               .name = "C10-HSW",
+               .desc = "MWAIT 0x60",
+               .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 2600,
+               .target_residency = 7700,
+               .enter = &intel_idle },
        {
                .enter = NULL }
 };
index 9f3a8ef1fb3e4484bcbc027337ce75a3f6e13425..b3d03d335948a5a5757bd4d49650b1685cbfb78b 100644 (file)
@@ -390,8 +390,8 @@ static int exynos_adc_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int exynos_adc_suspend(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct exynos_adc *info = platform_get_drvdata(pdev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct exynos_adc *info = iio_priv(indio_dev);
        u32 con;
 
        if (info->version == ADC_V2) {
@@ -413,8 +413,8 @@ static int exynos_adc_suspend(struct device *dev)
 
 static int exynos_adc_resume(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct exynos_adc *info = platform_get_drvdata(pdev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct exynos_adc *info = iio_priv(indio_dev);
        int ret;
 
        ret = regulator_enable(info->vdd);
index 9201022945e966719c4322e9242d0582316a9eee..9d19ba74f22bd92125501e2827ef51bbae59da13 100644 (file)
@@ -64,7 +64,7 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
        while (chan->indio_dev) {
                if (chan->indio_dev != indio_dev) {
                        ret = -EINVAL;
-                       goto error_release_channels;
+                       goto error_free_scan_mask;
                }
                set_bit(chan->channel->scan_index,
                        cb_buff->buffer.scan_mask);
@@ -73,6 +73,8 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
 
        return cb_buff;
 
+error_free_scan_mask:
+       kfree(cb_buff->buffer.scan_mask);
 error_release_channels:
        iio_channel_release_all(cb_buff->channels);
 error_free_cb_buff:
@@ -100,6 +102,7 @@ EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
 
 void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
 {
+       kfree(cb_buff->buffer.scan_mask);
        iio_channel_release_all(cb_buff->channels);
        kfree(cb_buff);
 }
index bd33473f8e38afd4deeeb243f12c2bf981587740..ed9bc8ae933030158c9d23b41acfc2bd2de7d61e 100644 (file)
@@ -312,6 +312,8 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
                        goto read_error;
 
                *val = *val >> ch->scan_type.shift;
+
+               err = st_sensors_set_enable(indio_dev, false);
        }
        mutex_unlock(&indio_dev->mlock);
 
index f4a6f0838327daaffe032bc4b3403d4c59bc83a7..b61160bd935eafffd7e04207a0e0fb043f87450f 100644 (file)
@@ -5,7 +5,7 @@ menu "Digital to analog converters"
 
 config AD5064
        tristate "Analog Devices AD5064 and similar multi-channel DAC driver"
-       depends on (SPI_MASTER || I2C)
+       depends on (SPI_MASTER && I2C!=m) || I2C
        help
          Say yes here to build support for Analog Devices AD5024, AD5025, AD5044,
          AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668,
@@ -27,7 +27,7 @@ config AD5360
 
 config AD5380
        tristate "Analog Devices AD5380/81/82/83/84/90/91/92 DAC driver"
-       depends on (SPI_MASTER || I2C)
+       depends on (SPI_MASTER && I2C!=m) || I2C
        select REGMAP_I2C if I2C
        select REGMAP_SPI if SPI_MASTER
        help
@@ -57,7 +57,7 @@ config AD5624R_SPI
 
 config AD5446
        tristate "Analog Devices AD5446 and similar single channel DACs driver"
-       depends on (SPI_MASTER || I2C)
+       depends on (SPI_MASTER && I2C!=m) || I2C
        help
          Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
          AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
index a884252ac66b477db3a0fce966b40fb76ae456b2..e76d4ace53ff76d73917933a191da08b907cdc2f 100644 (file)
@@ -212,7 +212,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
                (pdata->r2_user_settings & (ADF4350_REG2_PD_POLARITY_POS |
                ADF4350_REG2_LDP_6ns | ADF4350_REG2_LDF_INT_N |
                ADF4350_REG2_CHARGE_PUMP_CURR_uA(5000) |
-               ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x9)));
+               ADF4350_REG2_MUXOUT(0x7) | ADF4350_REG2_NOISE_MODE(0x3)));
 
        st->regs[ADF4350_REG3] = pdata->r3_user_settings &
                                 (ADF4350_REG3_12BIT_CLKDIV(0xFFF) |
index 795d100b4c36fea7c82785e4f4b8545a13ff7eab..98ddc323add011fe84e45abc4f8902ce9e7f09af 100644 (file)
@@ -124,7 +124,7 @@ static int __of_iio_channel_get(struct iio_channel *channel,
        channel->indio_dev = indio_dev;
        index = iiospec.args_count ? iiospec.args[0] : 0;
        if (index >= indio_dev->num_channels) {
-               return -EINVAL;
+               err = -EINVAL;
                goto err_put;
        }
        channel->channel = &indio_dev->channels[index];
@@ -450,7 +450,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
        s64 raw64 = raw;
        int ret;
 
-       ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE);
+       ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
        if (ret == 0)
                raw64 += offset;
 
index b08ca7a9f76bf8ea70f63bfd6c291d4d71b5814d..3f3f0416fbdd52cb6664d56e54a14f8df344b589 100644 (file)
@@ -2226,6 +2226,27 @@ static void srpt_close_ch(struct srpt_rdma_ch *ch)
        spin_unlock_irq(&sdev->spinlock);
 }
 
+/**
+ * srpt_shutdown_session() - Whether or not a session may be shut down.
+ */
+static int srpt_shutdown_session(struct se_session *se_sess)
+{
+       struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ch->spinlock, flags);
+       if (ch->in_shutdown) {
+               spin_unlock_irqrestore(&ch->spinlock, flags);
+               return true;
+       }
+
+       ch->in_shutdown = true;
+       target_sess_cmd_list_set_waiting(se_sess);
+       spin_unlock_irqrestore(&ch->spinlock, flags);
+
+       return true;
+}
+
 /**
  * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
  * @cm_id: Pointer to the CM ID of the channel to be drained.
@@ -2264,6 +2285,9 @@ static void srpt_drain_channel(struct ib_cm_id *cm_id)
        spin_unlock_irq(&sdev->spinlock);
 
        if (do_reset) {
+               if (ch->sess)
+                       srpt_shutdown_session(ch->sess);
+
                ret = srpt_ch_qp_err(ch);
                if (ret < 0)
                        printk(KERN_ERR "Setting queue pair in error state"
@@ -2328,7 +2352,7 @@ static void srpt_release_channel_work(struct work_struct *w)
        se_sess = ch->sess;
        BUG_ON(!se_sess);
 
-       target_wait_for_sess_cmds(se_sess, 0);
+       target_wait_for_sess_cmds(se_sess);
 
        transport_deregister_session_configfs(se_sess);
        transport_deregister_session(se_sess);
@@ -3466,14 +3490,6 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
        spin_unlock_irqrestore(&ch->spinlock, flags);
 }
 
-/**
- * srpt_shutdown_session() - Whether or not a session may be shut down.
- */
-static int srpt_shutdown_session(struct se_session *se_sess)
-{
-       return true;
-}
-
 /**
  * srpt_close_session() - Forcibly close a session.
  *
index 4caf55cda7b170ba2928dc065641a521f66ebc47..3dae156905de53c1f9e9c5f0717f4ef4c3729803 100644 (file)
@@ -325,6 +325,7 @@ struct srpt_rdma_ch {
        u8                      sess_name[36];
        struct work_struct      release_work;
        struct completion       *release_done;
+       bool                    in_shutdown;
 };
 
 /**
index 2f78538e09d0f9e733be6f862a4de8e9d2c0301c..b2420ae19e148039147a33081a651d718e8d2e4d 100644 (file)
@@ -1379,6 +1379,7 @@ static int synaptics_reconnect(struct psmouse *psmouse)
 {
        struct synaptics_data *priv = psmouse->private;
        struct synaptics_data old_priv = *priv;
+       unsigned char param[2];
        int retry = 0;
        int error;
 
@@ -1394,6 +1395,7 @@ static int synaptics_reconnect(struct psmouse *psmouse)
                         */
                        ssleep(1);
                }
+               ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_GETID);
                error = synaptics_detect(psmouse, 0);
        } while (error && ++retry < 3);
 
index 0bfd8cf252002d4095dc699cf8277931b1ec00a0..518282da6d850b180d56572c0517b62424f12e19 100644 (file)
@@ -342,10 +342,10 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
                        ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
 
-               switch (wacom->id[idx] & 0xfffff) {
+               switch (wacom->id[idx]) {
                case 0x812: /* Inking pen */
                case 0x801: /* Intuos3 Inking pen */
-               case 0x20802: /* Intuos4 Inking Pen */
+               case 0x120802: /* Intuos4/5 Inking Pen */
                case 0x012:
                        wacom->tool[idx] = BTN_TOOL_PENCIL;
                        break;
@@ -356,11 +356,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x823: /* Intuos3 Grip Pen */
                case 0x813: /* Intuos3 Classic Pen */
                case 0x885: /* Intuos3 Marker Pen */
-               case 0x802: /* Intuos4 General Pen */
-               case 0x804: /* Intuos4 Marker Pen */
-               case 0x40802: /* Intuos4 Classic Pen */
-               case 0x18802: /* DTH2242 Grip Pen */
+               case 0x802: /* Intuos4/5 13HD/24HD General Pen */
+               case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
                case 0x022:
+               case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
+               case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
+               case 0x160802: /* Cintiq 13HD Pro Pen */
+               case 0x180802: /* DTH2242 Pen */
                        wacom->tool[idx] = BTN_TOOL_PEN;
                        break;
 
@@ -391,10 +393,14 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x82b: /* Intuos3 Grip Pen Eraser */
                case 0x81b: /* Intuos3 Classic Pen Eraser */
                case 0x91b: /* Intuos3 Airbrush Eraser */
-               case 0x80c: /* Intuos4 Marker Pen Eraser */
-               case 0x80a: /* Intuos4 General Pen Eraser */
-               case 0x4080a: /* Intuos4 Classic Pen Eraser */
-               case 0x90a: /* Intuos4 Airbrush Eraser */
+               case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
+               case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+               case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+               case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
+               case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+               case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
+               case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
+               case 0x18080a: /* DTH2242 Eraser */
                        wacom->tool[idx] = BTN_TOOL_RUBBER;
                        break;
 
@@ -402,7 +408,8 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x912:
                case 0x112:
                case 0x913: /* Intuos3 Airbrush */
-               case 0x902: /* Intuos4 Airbrush */
+               case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
+               case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
                        wacom->tool[idx] = BTN_TOOL_AIRBRUSH;
                        break;
 
@@ -533,10 +540,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                                input_report_key(input, BTN_8, (data[3] & 0x80));
                        }
                        if (data[1] | (data[2] & 0x01) | data[3]) {
-                               input_report_key(input, wacom->tool[1], 1);
                                input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
                        } else {
-                               input_report_key(input, wacom->tool[1], 0);
                                input_report_abs(input, ABS_MISC, 0);
                        }
                } else if (features->type == DTK) {
@@ -546,6 +551,26 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                        input_report_key(input, BTN_3, (data[6] & 0x08));
                        input_report_key(input, BTN_4, (data[6] & 0x10));
                        input_report_key(input, BTN_5, (data[6] & 0x20));
+                       if (data[6] & 0x3f) {
+                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+                       } else {
+                               input_report_abs(input, ABS_MISC, 0);
+                       }
+               } else if (features->type == WACOM_13HD) {
+                       input_report_key(input, BTN_0, (data[3] & 0x01));
+                       input_report_key(input, BTN_1, (data[4] & 0x01));
+                       input_report_key(input, BTN_2, (data[4] & 0x02));
+                       input_report_key(input, BTN_3, (data[4] & 0x04));
+                       input_report_key(input, BTN_4, (data[4] & 0x08));
+                       input_report_key(input, BTN_5, (data[4] & 0x10));
+                       input_report_key(input, BTN_6, (data[4] & 0x20));
+                       input_report_key(input, BTN_7, (data[4] & 0x40));
+                       input_report_key(input, BTN_8, (data[4] & 0x80));
+                       if ((data[3] & 0x01) | data[4]) {
+                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+                       } else {
+                               input_report_abs(input, ABS_MISC, 0);
+                       }
                } else if (features->type == WACOM_24HD) {
                        input_report_key(input, BTN_0, (data[6] & 0x01));
                        input_report_key(input, BTN_1, (data[6] & 0x02));
@@ -590,10 +615,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                        }
 
                        if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) {
-                               input_report_key(input, wacom->tool[1], 1);
                                input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
                        } else {
-                               input_report_key(input, wacom->tool[1], 0);
                                input_report_abs(input, ABS_MISC, 0);
                        }
                } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
@@ -618,10 +641,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                        }
 
                        if (data[2] | (data[3] & 0x01) | data[4] | data[5]) {
-                               input_report_key(input, wacom->tool[1], 1);
                                input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
                        } else {
-                               input_report_key(input, wacom->tool[1], 0);
                                input_report_abs(input, ABS_MISC, 0);
                        }
                } else {
@@ -668,10 +689,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                        if ((data[5] & 0x1f) | data[6] | (data[1] & 0x1f) |
                                data[2] | (data[3] & 0x1f) | data[4] | data[8] |
                                (data[7] & 0x01)) {
-                               input_report_key(input, wacom->tool[1], 1);
                                input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
                        } else {
-                               input_report_key(input, wacom->tool[1], 0);
                                input_report_abs(input, ABS_MISC, 0);
                        }
                }
@@ -1301,6 +1320,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
        case INTUOS4L:
        case CINTIQ:
        case WACOM_BEE:
+       case WACOM_13HD:
        case WACOM_21UX2:
        case WACOM_22HD:
        case WACOM_24HD:
@@ -1530,15 +1550,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                __set_bit(KEY_PROG1, input_dev->keybit);
                __set_bit(KEY_PROG2, input_dev->keybit);
                __set_bit(KEY_PROG3, input_dev->keybit);
+
+               input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+               input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
                /* fall through */
 
        case DTK:
                for (i = 0; i < 6; i++)
                        __set_bit(BTN_0 + i, input_dev->keybit);
 
-               input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
-               input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
-
                __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
 
                wacom_setup_cintiq(wacom_wac);
@@ -1579,6 +1599,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                wacom_setup_cintiq(wacom_wac);
                break;
 
+       case WACOM_13HD:
+               for (i = 0; i < 9; i++)
+                       __set_bit(BTN_0 + i, input_dev->keybit);
+
+               input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+               __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+               wacom_setup_cintiq(wacom_wac);
+               break;
+
        case INTUOS3:
        case INTUOS3L:
                __set_bit(BTN_4, input_dev->keybit);
@@ -1937,7 +1966,8 @@ static const struct wacom_features wacom_features_0xF4 =
          63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0xF8 =
        { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS,   104480, 65600, 2047, /* Pen */
-         63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
+         63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+         .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
 static const struct wacom_features wacom_features_0xF6 =
        { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
          .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 };
@@ -1950,6 +1980,9 @@ static const struct wacom_features wacom_features_0xC5 =
 static const struct wacom_features wacom_features_0xC6 =
        { "Wacom Cintiq 12WX",    WACOM_PKGLEN_INTUOS,    53020, 33440, 1023,
          63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x304 =
+       { "Wacom Cintiq 13HD",    WACOM_PKGLEN_INTUOS,    59552, 33848, 1023,
+         63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0xC7 =
        { "Wacom DTU1931",        WACOM_PKGLEN_GRAPHIRE,  37832, 30305,  511,
          0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1959,6 +1992,9 @@ static const struct wacom_features wacom_features_0xCE =
 static const struct wacom_features wacom_features_0xF0 =
        { "Wacom DTU1631",        WACOM_PKGLEN_GRAPHIRE,  34623, 19553,  511,
          0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x57 =
+       { "Wacom DTK2241",        WACOM_PKGLEN_INTUOS,    95840, 54260, 2047,
+         63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES};
 static const struct wacom_features wacom_features_0x59 = /* Pen */
        { "Wacom DTH2242",        WACOM_PKGLEN_INTUOS,    95840, 54260, 2047,
          63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
@@ -1972,6 +2008,13 @@ static const struct wacom_features wacom_features_0xCC =
 static const struct wacom_features wacom_features_0xFA =
        { "Wacom Cintiq 22HD",    WACOM_PKGLEN_INTUOS,    95840, 54260, 2047,
          63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x5B =
+       { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS,      95840, 54260, 2047,
+         63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+         .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
+static const struct wacom_features wacom_features_0x5E =
+       { "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
+         .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10 };
 static const struct wacom_features wacom_features_0x90 =
        { "Wacom ISDv4 90",       WACOM_PKGLEN_GRAPHIRE,  26202, 16325,  255,
          0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2001,7 +2044,7 @@ static const struct wacom_features wacom_features_0xE5 =
 static const struct wacom_features wacom_features_0xE6 =
        { "Wacom ISDv4 E6",       WACOM_PKGLEN_TPC2FG,    27760, 15694,  255,
          0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
-       .touch_max = 2 };
+         .touch_max = 2 };
 static const struct wacom_features wacom_features_0xEC =
        { "Wacom ISDv4 EC",       WACOM_PKGLEN_GRAPHIRE,  25710, 14500,  255,
          0, TABLETPC,    WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2143,8 +2186,11 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x43) },
        { USB_DEVICE_WACOM(0x44) },
        { USB_DEVICE_WACOM(0x45) },
+       { USB_DEVICE_WACOM(0x57) },
        { USB_DEVICE_WACOM(0x59) },
        { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },
+       { USB_DEVICE_WACOM(0x5B) },
+       { USB_DEVICE_DETAILED(0x5E, USB_CLASS_HID, 0, 0) },
        { USB_DEVICE_WACOM(0xB0) },
        { USB_DEVICE_WACOM(0xB1) },
        { USB_DEVICE_WACOM(0xB2) },
@@ -2205,6 +2251,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x100) },
        { USB_DEVICE_WACOM(0x101) },
        { USB_DEVICE_WACOM(0x10D) },
+       { USB_DEVICE_WACOM(0x304) },
        { USB_DEVICE_WACOM(0x4001) },
        { USB_DEVICE_WACOM(0x47) },
        { USB_DEVICE_WACOM(0xF4) },
index 5f9a7721e16cf36a1a92021a03ddb45648292b88..dfc9e08e7f70458c0a41378ae7034a027e52d0ea 100644 (file)
@@ -82,6 +82,7 @@ enum {
        WACOM_24HD,
        CINTIQ,
        WACOM_BEE,
+       WACOM_13HD,
        WACOM_MO,
        WIRELESS,
        BAMBOO_PT,
index 17c9097f3b5ddddd40fad74345b1d2376c4ada7a..39f3df8670c311ec2373ec4ddad585ce4217743b 100644 (file)
@@ -216,7 +216,7 @@ static int egalax_ts_probe(struct i2c_client *client,
        input_set_abs_params(input_dev,
                             ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0);
        input_set_abs_params(input_dev,
-                            ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0);
+                            ABS_MT_POSITION_Y, 0, EGALAX_MAX_Y, 0, 0);
        input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS, 0);
 
        input_set_drvdata(input_dev, ts);
index 9b1b274c7d256d96667c42150e19f7cf547d3f1f..c123709acf823829a274db2ca6ced2060bbae341 100644 (file)
@@ -93,7 +93,7 @@ capi_ctr_put(struct capi_ctr *ctr)
 
 static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr)
 {
-       if (contr - 1 >= CAPI_MAXCONTR)
+       if (contr < 1 || contr - 1 >= CAPI_MAXCONTR)
                return NULL;
 
        return capi_controller[contr - 1];
@@ -103,7 +103,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
 {
        lockdep_assert_held(&capi_controller_lock);
 
-       if (applid - 1 >= CAPI_MAXAPPL)
+       if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
                return NULL;
 
        return capi_applications[applid - 1];
@@ -111,7 +111,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
 
 static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
 {
-       if (applid - 1 >= CAPI_MAXAPPL)
+       if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
                return NULL;
 
        return rcu_dereference(capi_applications[applid - 1]);
index a0d931bcb37c5cda58766ff365a0a42dd112f45d..b02b679abf3183a7b6d798c212ed21b349453548 100644 (file)
@@ -107,6 +107,10 @@ static int create_gpio_led(const struct gpio_led *template,
                return 0;
        }
 
+       ret = devm_gpio_request(parent, template->gpio, template->name);
+       if (ret < 0)
+               return ret;
+
        led_dat->cdev.name = template->name;
        led_dat->cdev.default_trigger = template->default_trigger;
        led_dat->gpio = template->gpio;
@@ -126,10 +130,7 @@ static int create_gpio_led(const struct gpio_led *template,
        if (!template->retain_state_suspended)
                led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
 
-       ret = devm_gpio_request_one(parent, template->gpio,
-                                   (led_dat->active_low ^ state) ?
-                                   GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
-                                   template->name);
+       ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
        if (ret < 0)
                return ret;
 
index ee14662ed5cef46b845aeeb3557e55dcd6580efb..98cae529373f44aa202a1a5c0f9d2f2259ab0e98 100644 (file)
@@ -47,37 +47,37 @@ static struct ot200_led leds[] = {
        {
                .name = "led_1",
                .port = 0x49,
-               .mask = BIT(7),
+               .mask = BIT(6),
        },
        {
                .name = "led_2",
                .port = 0x49,
-               .mask = BIT(6),
+               .mask = BIT(5),
        },
        {
                .name = "led_3",
                .port = 0x49,
-               .mask = BIT(5),
+               .mask = BIT(4),
        },
        {
                .name = "led_4",
                .port = 0x49,
-               .mask = BIT(4),
+               .mask = BIT(3),
        },
        {
                .name = "led_5",
                .port = 0x49,
-               .mask = BIT(3),
+               .mask = BIT(2),
        },
        {
                .name = "led_6",
                .port = 0x49,
-               .mask = BIT(2),
+               .mask = BIT(1),
        },
        {
                .name = "led_7",
                .port = 0x49,
-               .mask = BIT(1),
+               .mask = BIT(0),
        }
 };
 
index 699187ab380099a32d926a83dde84d96e7124e85..5b9ac32801c7604b25c64804fb888aa1ebbbceab 100644 (file)
@@ -1002,6 +1002,7 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
                        kill_guest(&lg->cpus[0],
                                   "Cannot populate switcher mapping");
                }
+               lg->pgdirs[pgdir].last_host_cpu = -1;
        }
 }
 
index c6083132c4b8ccaf21c7addb61cf8596165bf915..0387e05cdb98b9708bde55143cd8a5cba853a2fa 100644 (file)
@@ -319,6 +319,9 @@ static void __cache_size_refresh(void)
 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
                               enum data_mode *data_mode)
 {
+       unsigned noio_flag;
+       void *ptr;
+
        if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
                *data_mode = DATA_MODE_SLAB;
                return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
        }
 
        *data_mode = DATA_MODE_VMALLOC;
-       return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+       /*
+        * __vmalloc allocates the data pages and auxiliary structures with
+        * gfp_flags that were specified, but pagetables are always allocated
+        * with GFP_KERNEL, no matter what was specified as gfp_mask.
+        *
+        * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
+        * all allocations done by this process (including pagetables) are done
+        * as if GFP_NOIO was specified.
+        */
+
+       if (gfp_mask & __GFP_NORETRY)
+               noio_flag = memalloc_noio_save();
+
+       ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+       if (gfp_mask & __GFP_NORETRY)
+               memalloc_noio_restore(noio_flag);
+
+       return ptr;
 }
 
 /*
index 83e995fece88c1330335c85a23ef42b2ed54a1f4..1af7255bbffb547aa67db8b1f38bf99b2ff87094 100644 (file)
@@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
                                 struct dm_cache_statistics *stats)
 {
        down_read(&cmd->root_lock);
-       memcpy(stats, &cmd->stats, sizeof(*stats));
+       *stats = cmd->stats;
        up_read(&cmd->root_lock);
 }
 
@@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
                                 struct dm_cache_statistics *stats)
 {
        down_write(&cmd->root_lock);
-       memcpy(&cmd->stats, stats, sizeof(*stats));
+       cmd->stats = *stats;
        up_write(&cmd->root_lock);
 }
 
index 558bdfdabf5f2da39e5bef487dc5008a2399b659..33369ca9614f978ae0481e0d79677c980660f42d 100644 (file)
@@ -130,8 +130,8 @@ struct dm_cache_policy {
         *
         * Must not block.
         *
-        * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK
-        * would be typical).
+        * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
+        * (-EWOULDBLOCK would be typical).
         */
        int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
 
index 10744091e6cabb5f6bb55e59709eb4ba0350a98b..df44b60e66f289880c177e98d78f1aa04ce08e7a 100644 (file)
@@ -205,7 +205,7 @@ struct per_bio_data {
        /*
         * writethrough fields.  These MUST remain at the end of this
         * structure and the 'cache' member must be the first as it
-        * is used to determine the offsetof the writethrough fields.
+        * is used to determine the offset of the writethrough fields.
         */
        struct cache *cache;
        dm_cblock_t cblock;
@@ -393,7 +393,7 @@ static int get_cell(struct cache *cache,
        return r;
 }
 
- /*----------------------------------------------------------------*/
+/*----------------------------------------------------------------*/
 
 static bool is_dirty(struct cache *cache, dm_cblock_t b)
 {
@@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
 }
 
 /*----------------------------------------------------------------*/
+
 static bool block_size_is_power_of_two(struct cache *cache)
 {
        return cache->sectors_per_block_shift >= 0;
@@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err)
 
        /*
         * We can't issue this bio directly, since we're in interrupt
-        * context.  So it get's put on a bio list for processing by the
+        * context.  So it gets put on a bio list for processing by the
         * worker thread.
         */
        defer_writethrough_bio(pb->cache, bio);
@@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws)
 static void do_waker(struct work_struct *ws)
 {
        struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
+       policy_tick(cache->policy);
        wake_worker(cache);
        queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
 }
@@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
 
 static struct kmem_cache *migration_cache;
 
-static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
+#define NOT_CORE_OPTION 1
+
+static int process_config_option(struct cache *cache, const char *key, const char *value)
+{
+       unsigned long tmp;
+
+       if (!strcasecmp(key, "migration_threshold")) {
+               if (kstrtoul(value, 10, &tmp))
+                       return -EINVAL;
+
+               cache->migration_threshold = tmp;
+               return 0;
+       }
+
+       return NOT_CORE_OPTION;
+}
+
+static int set_config_value(struct cache *cache, const char *key, const char *value)
+{
+       int r = process_config_option(cache, key, value);
+
+       if (r == NOT_CORE_OPTION)
+               r = policy_set_config_value(cache->policy, key, value);
+
+       if (r)
+               DMWARN("bad config value for %s: %s", key, value);
+
+       return r;
+}
+
+static int set_config_values(struct cache *cache, int argc, const char **argv)
 {
        int r = 0;
 
@@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
        }
 
        while (argc) {
-               r = policy_set_config_value(p, argv[0], argv[1]);
-               if (r) {
-                       DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
-                              argv[0], argv[1]);
-                       return r;
-               }
+               r = set_config_value(cache, argv[0], argv[1]);
+               if (r)
+                       break;
 
                argc -= 2;
                argv += 2;
@@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
                               char **error)
 {
-       int r;
-
        cache->policy = dm_cache_policy_create(ca->policy_name,
                                               cache->cache_size,
                                               cache->origin_sectors,
@@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
                return -ENOMEM;
        }
 
-       r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
-       if (r) {
-               *error = "Error setting cache policy's config values";
-               dm_cache_policy_destroy(cache->policy);
-               cache->policy = NULL;
-       }
-
-       return r;
+       return 0;
 }
 
 /*
@@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
        return discard_block_size;
 }
 
-#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
+#define DEFAULT_MIGRATION_THRESHOLD 2048
 
 static int cache_create(struct cache_args *ca, struct cache **result)
 {
@@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        ti->discards_supported = true;
        ti->discard_zeroes_data_unsupported = true;
 
-       memcpy(&cache->features, &ca->features, sizeof(cache->features));
+       cache->features = ca->features;
        ti->per_bio_data_size = get_per_bio_data_size(cache);
 
        cache->callbacks.congested_fn = cache_is_congested;
@@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        r = create_cache_policy(cache, ca, error);
        if (r)
                goto bad;
+
        cache->policy_nr_args = ca->policy_argc;
+       cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
+
+       r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
+       if (r) {
+               *error = "Error setting cache policy's config values";
+               goto bad;
+       }
 
        cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
                                     ca->block_size, may_format,
@@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        INIT_LIST_HEAD(&cache->quiesced_migrations);
        INIT_LIST_HEAD(&cache->completed_migrations);
        INIT_LIST_HEAD(&cache->need_commit_migrations);
-       cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
        atomic_set(&cache->nr_migrations, 0);
        init_waitqueue_head(&cache->migration_wait);
 
+       r = -ENOMEM;
        cache->nr_dirty = 0;
        cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
        if (!cache->dirty_bitset) {
@@ -2517,23 +2545,6 @@ static void cache_status(struct dm_target *ti, status_type_t type,
        DMEMIT("Error");
 }
 
-#define NOT_CORE_OPTION 1
-
-static int process_config_option(struct cache *cache, char **argv)
-{
-       unsigned long tmp;
-
-       if (!strcasecmp(argv[0], "migration_threshold")) {
-               if (kstrtoul(argv[1], 10, &tmp))
-                       return -EINVAL;
-
-               cache->migration_threshold = tmp;
-               return 0;
-       }
-
-       return NOT_CORE_OPTION;
-}
-
 /*
  * Supports <key> <value>.
  *
@@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv)
  */
 static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
 {
-       int r;
        struct cache *cache = ti->private;
 
        if (argc != 2)
                return -EINVAL;
 
-       r = process_config_option(cache, argv);
-       if (r == NOT_CORE_OPTION)
-               return policy_set_config_value(cache->policy, argv[0], argv[1]);
-
-       return r;
+       return set_config_value(cache, argv[0], argv[1]);
 }
 
 static int cache_iterate_devices(struct dm_target *ti,
@@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type cache_target = {
        .name = "cache",
-       .version = {1, 1, 0},
+       .version = {1, 1, 1},
        .module = THIS_MODULE,
        .ctr = cache_ctr,
        .dtr = cache_dtr,
index 51bb81676be37d2ff4520557c70f83bda488845a..bdf26f5bd326011595f8b9801cc2e408af6842cc 100644 (file)
@@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
 
        ti->num_flush_bios = 1;
        ti->num_discard_bios = 1;
+       ti->num_write_same_bios = 1;
 
        return 0;
 
index c0e07026a8d136f084958800192c12236ffd37e2..c434e5aab2dfc9e6a63ca7700e5ac1c1deacd025 100644 (file)
@@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
        if (!s->pending_pool) {
                ti->error = "Could not allocate mempool for pending exceptions";
+               r = -ENOMEM;
                goto bad_pending_pool;
        }
 
index ea5e878a30b93b1f974d449738fc46b55d5eaeba..d907ca6227cec3519e46ac95e146656b342125c2 100644 (file)
@@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
 static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
        struct stripe_c *sc;
-       sector_t width;
+       sector_t width, tmp_len;
        uint32_t stripes;
        uint32_t chunk_size;
        int r;
@@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        width = ti->len;
-       if (sector_div(width, chunk_size)) {
+       if (sector_div(width, stripes)) {
                ti->error = "Target length not divisible by "
-                   "chunk size";
+                   "number of stripes";
                return -EINVAL;
        }
 
-       if (sector_div(width, stripes)) {
+       tmp_len = width;
+       if (sector_div(tmp_len, chunk_size)) {
                ti->error = "Target length not divisible by "
-                   "number of stripes";
+                   "chunk size";
                return -EINVAL;
        }
 
index e50dad0c65f4759d535b5cf81b0258a3b27066b5..1ff252ab7d46a1ed55fb98a93426acfb764998dd 100644 (file)
@@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
                        return false;
 
                if (!ti->type->iterate_devices ||
-                   !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
+                   ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
                        return false;
        }
 
index 00cee02f8fc9b299788941c9b4147abdfc56c83c..60bce435f4fa1443c2994bd483e70ea096c7aa92 100644 (file)
@@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
        return r;
 }
 
-static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
+static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
 {
        int r;
        dm_block_t old_count;
 
-       r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count);
+       r = dm_sm_get_nr_blocks(sm, &old_count);
        if (r)
                return r;
 
@@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
                return 0;
 
        if (new_count < old_count) {
-               DMERR("cannot reduce size of data device");
+               DMERR("cannot reduce size of space map");
                return -EINVAL;
        }
 
-       return dm_sm_extend(pmd->data_sm, new_count - old_count);
+       return dm_sm_extend(sm, new_count - old_count);
 }
 
 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
@@ -1671,7 +1671,19 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
 
        down_write(&pmd->root_lock);
        if (!pmd->fail_io)
-               r = __resize_data_dev(pmd, new_count);
+               r = __resize_space_map(pmd->data_sm, new_count);
+       up_write(&pmd->root_lock);
+
+       return r;
+}
+
+int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
+{
+       int r = -EINVAL;
+
+       down_write(&pmd->root_lock);
+       if (!pmd->fail_io)
+               r = __resize_space_map(pmd->metadata_sm, new_count);
        up_write(&pmd->root_lock);
 
        return r;
@@ -1684,3 +1696,17 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
        dm_bm_set_read_only(pmd->bm);
        up_write(&pmd->root_lock);
 }
+
+int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+                                       dm_block_t threshold,
+                                       dm_sm_threshold_fn fn,
+                                       void *context)
+{
+       int r;
+
+       down_write(&pmd->root_lock);
+       r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
+       up_write(&pmd->root_lock);
+
+       return r;
+}
index 0cecc3702885fb57452c013f83679fc437a393ca..845ebbe589a9e0a00505bab0150df48331233928 100644 (file)
@@ -8,6 +8,7 @@
 #define DM_THIN_METADATA_H
 
 #include "persistent-data/dm-block-manager.h"
+#include "persistent-data/dm-space-map.h"
 
 #define THIN_METADATA_BLOCK_SIZE 4096
 
@@ -185,6 +186,7 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
  * blocks would be lost.
  */
 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
+int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
 
 /*
  * Flicks the underlying block manager into read only mode, so you know
@@ -192,6 +194,11 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
  */
 void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
 
+int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+                                       dm_block_t threshold,
+                                       dm_sm_threshold_fn fn,
+                                       void *context);
+
 /*----------------------------------------------------------------*/
 
 #endif
index 004ad1652b73477dae0194b957842434cf48c38d..88f2f802d528be23b8e64c26085913677082be03 100644 (file)
@@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
                return r;
 
        if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
-               DMWARN("%s: reached low water mark, sending event.",
+               DMWARN("%s: reached low water mark for data device: sending event.",
                       dm_device_name(pool->pool_md));
                spin_lock_irqsave(&pool->lock, flags);
                pool->low_water_triggered = 1;
@@ -1281,6 +1281,10 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio)
        bio_io_error(bio);
 }
 
+/*
+ * FIXME: should we also commit due to size of transaction, measured in
+ * metadata blocks?
+ */
 static int need_commit_due_to_time(struct pool *pool)
 {
        return jiffies < pool->last_commit_jiffies ||
@@ -1909,6 +1913,56 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
        return r;
 }
 
+static void metadata_low_callback(void *context)
+{
+       struct pool *pool = context;
+
+       DMWARN("%s: reached low water mark for metadata device: sending event.",
+              dm_device_name(pool->pool_md));
+
+       dm_table_event(pool->ti->table);
+}
+
+static sector_t get_metadata_dev_size(struct block_device *bdev)
+{
+       sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+       char buffer[BDEVNAME_SIZE];
+
+       if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
+               DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
+                      bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
+               metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
+       }
+
+       return metadata_dev_size;
+}
+
+static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
+{
+       sector_t metadata_dev_size = get_metadata_dev_size(bdev);
+
+       sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+
+       return metadata_dev_size;
+}
+
+/*
+ * When a metadata threshold is crossed a dm event is triggered, and
+ * userland should respond by growing the metadata device.  We could let
+ * userland set the threshold, like we do with the data threshold, but I'm
+ * not sure they know enough to do this well.
+ */
+static dm_block_t calc_metadata_threshold(struct pool_c *pt)
+{
+       /*
+        * 4M is ample for all ops with the possible exception of thin
+        * device deletion which is harmless if it fails (just retry the
+        * delete after you've grown the device).
+        */
+       dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
+       return min((dm_block_t)1024ULL /* 4M */, quarter);
+}
+
 /*
  * thin-pool <metadata dev> <data dev>
  *          <data block size (sectors)>
@@ -1931,8 +1985,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
        unsigned long block_size;
        dm_block_t low_water_blocks;
        struct dm_dev *metadata_dev;
-       sector_t metadata_dev_size;
-       char b[BDEVNAME_SIZE];
+       fmode_t metadata_mode;
 
        /*
         * FIXME Remove validation from scope of lock.
@@ -1944,19 +1997,32 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                r = -EINVAL;
                goto out_unlock;
        }
+
        as.argc = argc;
        as.argv = argv;
 
-       r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
+       /*
+        * Set default pool features.
+        */
+       pool_features_init(&pf);
+
+       dm_consume_args(&as, 4);
+       r = parse_pool_features(&as, &pf, ti);
+       if (r)
+               goto out_unlock;
+
+       metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
+       r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
        if (r) {
                ti->error = "Error opening metadata block device";
                goto out_unlock;
        }
 
-       metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
-       if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
-               DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
-                      bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
+       /*
+        * Run for the side-effect of possibly issuing a warning if the
+        * device is too big.
+        */
+       (void) get_metadata_dev_size(metadata_dev->bdev);
 
        r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
        if (r) {
@@ -1979,16 +2045,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                goto out;
        }
 
-       /*
-        * Set default pool features.
-        */
-       pool_features_init(&pf);
-
-       dm_consume_args(&as, 4);
-       r = parse_pool_features(&as, &pf, ti);
-       if (r)
-               goto out;
-
        pt = kzalloc(sizeof(*pt), GFP_KERNEL);
        if (!pt) {
                r = -ENOMEM;
@@ -2040,6 +2096,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
        }
        ti->private = pt;
 
+       r = dm_pool_register_metadata_threshold(pt->pool->pmd,
+                                               calc_metadata_threshold(pt),
+                                               metadata_low_callback,
+                                               pool);
+       if (r)
+               goto out_free_pt;
+
        pt->callbacks.congested_fn = pool_is_congested;
        dm_table_add_target_callbacks(ti->table, &pt->callbacks);
 
@@ -2079,18 +2142,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
        return r;
 }
 
-/*
- * Retrieves the number of blocks of the data device from
- * the superblock and compares it to the actual device size,
- * thus resizing the data device in case it has grown.
- *
- * This both copes with opening preallocated data devices in the ctr
- * being followed by a resume
- * -and-
- * calling the resume method individually after userspace has
- * grown the data device in reaction to a table event.
- */
-static int pool_preresume(struct dm_target *ti)
+static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
 {
        int r;
        struct pool_c *pt = ti->private;
@@ -2098,12 +2150,7 @@ static int pool_preresume(struct dm_target *ti)
        sector_t data_size = ti->len;
        dm_block_t sb_data_size;
 
-       /*
-        * Take control of the pool object.
-        */
-       r = bind_control_target(pool, ti);
-       if (r)
-               return r;
+       *need_commit = false;
 
        (void) sector_div(data_size, pool->sectors_per_block);
 
@@ -2114,7 +2161,7 @@ static int pool_preresume(struct dm_target *ti)
        }
 
        if (data_size < sb_data_size) {
-               DMERR("pool target too small, is %llu blocks (expected %llu)",
+               DMERR("pool target (%llu blocks) too small: expected %llu",
                      (unsigned long long)data_size, sb_data_size);
                return -EINVAL;
 
@@ -2122,17 +2169,90 @@ static int pool_preresume(struct dm_target *ti)
                r = dm_pool_resize_data_dev(pool->pmd, data_size);
                if (r) {
                        DMERR("failed to resize data device");
-                       /* FIXME Stricter than necessary: Rollback transaction instead here */
                        set_pool_mode(pool, PM_READ_ONLY);
                        return r;
                }
 
-               (void) commit_or_fallback(pool);
+               *need_commit = true;
        }
 
        return 0;
 }
 
+static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
+{
+       int r;
+       struct pool_c *pt = ti->private;
+       struct pool *pool = pt->pool;
+       dm_block_t metadata_dev_size, sb_metadata_dev_size;
+
+       *need_commit = false;
+
+       metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
+
+       r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
+       if (r) {
+               DMERR("failed to retrieve data device size");
+               return r;
+       }
+
+       if (metadata_dev_size < sb_metadata_dev_size) {
+               DMERR("metadata device (%llu blocks) too small: expected %llu",
+                     metadata_dev_size, sb_metadata_dev_size);
+               return -EINVAL;
+
+       } else if (metadata_dev_size > sb_metadata_dev_size) {
+               r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
+               if (r) {
+                       DMERR("failed to resize metadata device");
+                       return r;
+               }
+
+               *need_commit = true;
+       }
+
+       return 0;
+}
+
+/*
+ * Retrieves the number of blocks of the data device from
+ * the superblock and compares it to the actual device size,
+ * thus resizing the data device in case it has grown.
+ *
+ * This both copes with opening preallocated data devices in the ctr
+ * being followed by a resume
+ * -and-
+ * calling the resume method individually after userspace has
+ * grown the data device in reaction to a table event.
+ */
+static int pool_preresume(struct dm_target *ti)
+{
+       int r;
+       bool need_commit1, need_commit2;
+       struct pool_c *pt = ti->private;
+       struct pool *pool = pt->pool;
+
+       /*
+        * Take control of the pool object.
+        */
+       r = bind_control_target(pool, ti);
+       if (r)
+               return r;
+
+       r = maybe_resize_data_dev(ti, &need_commit1);
+       if (r)
+               return r;
+
+       r = maybe_resize_metadata_dev(ti, &need_commit2);
+       if (r)
+               return r;
+
+       if (need_commit1 || need_commit2)
+               (void) commit_or_fallback(pool);
+
+       return 0;
+}
+
 static void pool_resume(struct dm_target *ti)
 {
        struct pool_c *pt = ti->private;
@@ -2549,7 +2669,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 7, 0},
+       .version = {1, 8, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
index f6d29e614ab728c521b0ebc1f8a7f320e5099deb..e735a6d5a793dfff9c71330fd7555469d5a58501 100644 (file)
@@ -248,7 +248,8 @@ static struct dm_space_map ops = {
        .new_block = sm_disk_new_block,
        .commit = sm_disk_commit,
        .root_size = sm_disk_root_size,
-       .copy_root = sm_disk_copy_root
+       .copy_root = sm_disk_copy_root,
+       .register_threshold_callback = NULL
 };
 
 struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
index 906cf3df71af33365e460cbca0a82b0077fc6abf..1c959684caef7512fafd3c1a5505c29c798a9c11 100644 (file)
 
 /*----------------------------------------------------------------*/
 
+/*
+ * An edge triggered threshold.
+ */
+struct threshold {
+       bool threshold_set;
+       bool value_set;
+       dm_block_t threshold;
+       dm_block_t current_value;
+       dm_sm_threshold_fn fn;
+       void *context;
+};
+
+static void threshold_init(struct threshold *t)
+{
+       t->threshold_set = false;
+       t->value_set = false;
+}
+
+static void set_threshold(struct threshold *t, dm_block_t value,
+                         dm_sm_threshold_fn fn, void *context)
+{
+       t->threshold_set = true;
+       t->threshold = value;
+       t->fn = fn;
+       t->context = context;
+}
+
+static bool below_threshold(struct threshold *t, dm_block_t value)
+{
+       return t->threshold_set && value <= t->threshold;
+}
+
+static bool threshold_already_triggered(struct threshold *t)
+{
+       return t->value_set && below_threshold(t, t->current_value);
+}
+
+static void check_threshold(struct threshold *t, dm_block_t value)
+{
+       if (below_threshold(t, value) &&
+           !threshold_already_triggered(t))
+               t->fn(t->context);
+
+       t->value_set = true;
+       t->current_value = value;
+}
+
+/*----------------------------------------------------------------*/
+
 /*
  * Space map interface.
  *
@@ -54,6 +103,8 @@ struct sm_metadata {
        unsigned allocated_this_transaction;
        unsigned nr_uncommitted;
        struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
+
+       struct threshold threshold;
 };
 
 static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
@@ -144,12 +195,6 @@ static void sm_metadata_destroy(struct dm_space_map *sm)
        kfree(smm);
 }
 
-static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
-{
-       DMERR("doesn't support extend");
-       return -EINVAL;
-}
-
 static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
 {
        struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
@@ -335,9 +380,19 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
 
 static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
 {
+       dm_block_t count;
+       struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
        int r = sm_metadata_new_block_(sm, b);
        if (r)
                DMERR("unable to allocate new metadata block");
+
+       r = sm_metadata_get_nr_free(sm, &count);
+       if (r)
+               DMERR("couldn't get free block count");
+
+       check_threshold(&smm->threshold, count);
+
        return r;
 }
 
@@ -357,6 +412,18 @@ static int sm_metadata_commit(struct dm_space_map *sm)
        return 0;
 }
 
+static int sm_metadata_register_threshold_callback(struct dm_space_map *sm,
+                                                  dm_block_t threshold,
+                                                  dm_sm_threshold_fn fn,
+                                                  void *context)
+{
+       struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+       set_threshold(&smm->threshold, threshold, fn, context);
+
+       return 0;
+}
+
 static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
 {
        *result = sizeof(struct disk_sm_root);
@@ -382,6 +449,8 @@ static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t
        return 0;
 }
 
+static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
+
 static struct dm_space_map ops = {
        .destroy = sm_metadata_destroy,
        .extend = sm_metadata_extend,
@@ -395,7 +464,8 @@ static struct dm_space_map ops = {
        .new_block = sm_metadata_new_block,
        .commit = sm_metadata_commit,
        .root_size = sm_metadata_root_size,
-       .copy_root = sm_metadata_copy_root
+       .copy_root = sm_metadata_copy_root,
+       .register_threshold_callback = sm_metadata_register_threshold_callback
 };
 
 /*----------------------------------------------------------------*/
@@ -410,7 +480,7 @@ static void sm_bootstrap_destroy(struct dm_space_map *sm)
 
 static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
 {
-       DMERR("boostrap doesn't support extend");
+       DMERR("bootstrap doesn't support extend");
 
        return -EINVAL;
 }
@@ -450,7 +520,7 @@ static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
 static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
                                  uint32_t count)
 {
-       DMERR("boostrap doesn't support set_count");
+       DMERR("bootstrap doesn't support set_count");
 
        return -EINVAL;
 }
@@ -491,7 +561,7 @@ static int sm_bootstrap_commit(struct dm_space_map *sm)
 
 static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
 {
-       DMERR("boostrap doesn't support root_size");
+       DMERR("bootstrap doesn't support root_size");
 
        return -EINVAL;
 }
@@ -499,7 +569,7 @@ static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
 static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
                                  size_t max)
 {
-       DMERR("boostrap doesn't support copy_root");
+       DMERR("bootstrap doesn't support copy_root");
 
        return -EINVAL;
 }
@@ -517,11 +587,42 @@ static struct dm_space_map bootstrap_ops = {
        .new_block = sm_bootstrap_new_block,
        .commit = sm_bootstrap_commit,
        .root_size = sm_bootstrap_root_size,
-       .copy_root = sm_bootstrap_copy_root
+       .copy_root = sm_bootstrap_copy_root,
+       .register_threshold_callback = NULL
 };
 
 /*----------------------------------------------------------------*/
 
+static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+       int r, i;
+       enum allocation_event ev;
+       struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+       dm_block_t old_len = smm->ll.nr_blocks;
+
+       /*
+        * Flick into a mode where all blocks get allocated in the new area.
+        */
+       smm->begin = old_len;
+       memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+
+       /*
+        * Extend.
+        */
+       r = sm_ll_extend(&smm->ll, extra_blocks);
+
+       /*
+        * Switch back to normal behaviour.
+        */
+       memcpy(&smm->sm, &ops, sizeof(smm->sm));
+       for (i = old_len; !r && i < smm->begin; i++)
+               r = sm_ll_inc(&smm->ll, i, &ev);
+
+       return r;
+}
+
+/*----------------------------------------------------------------*/
+
 struct dm_space_map *dm_sm_metadata_init(void)
 {
        struct sm_metadata *smm;
@@ -549,6 +650,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
        smm->recursion_count = 0;
        smm->allocated_this_transaction = 0;
        smm->nr_uncommitted = 0;
+       threshold_init(&smm->threshold);
 
        memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
 
@@ -590,6 +692,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
        smm->recursion_count = 0;
        smm->allocated_this_transaction = 0;
        smm->nr_uncommitted = 0;
+       threshold_init(&smm->threshold);
 
        memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
        return 0;
index 1cbfc6b1638a05a42535fe80c4e76afa0e39a3b0..3e6d1153b7c4b898b5a1355ad8bcc43cac2de4f4 100644 (file)
@@ -9,6 +9,8 @@
 
 #include "dm-block-manager.h"
 
+typedef void (*dm_sm_threshold_fn)(void *context);
+
 /*
  * struct dm_space_map keeps a record of how many times each block in a device
  * is referenced.  It needs to be fixed on disk as part of the transaction.
@@ -59,6 +61,15 @@ struct dm_space_map {
         */
        int (*root_size)(struct dm_space_map *sm, size_t *result);
        int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
+
+       /*
+        * You can register one threshold callback which is edge-triggered
+        * when the free space in the space map drops below the threshold.
+        */
+       int (*register_threshold_callback)(struct dm_space_map *sm,
+                                          dm_block_t threshold,
+                                          dm_sm_threshold_fn fn,
+                                          void *context);
 };
 
 /*----------------------------------------------------------------*/
@@ -131,4 +142,16 @@ static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le
        return sm->copy_root(sm, copy_to_here_le, len);
 }
 
+static inline int dm_sm_register_threshold_callback(struct dm_space_map *sm,
+                                                   dm_block_t threshold,
+                                                   dm_sm_threshold_fn fn,
+                                                   void *context)
+{
+       if (sm->register_threshold_callback)
+               return sm->register_threshold_callback(sm, threshold, fn, context);
+
+       return -EINVAL;
+}
+
+
 #endif /* _LINUX_DM_SPACE_MAP_H */
index ca2754a3cd63d83eefb8927ae072f6732e852716..5e040085c2ffe4b6550db7a029db359f37765dda 100644 (file)
@@ -176,7 +176,7 @@ struct zoran_fh;
 
 struct zoran_mapping {
        struct zoran_fh *fh;
-       int count;
+       atomic_t count;
 };
 
 struct zoran_buffer {
index 1168a84a737dafae1dbf6e7e3bbe195b891815d6..d133c30c3fdccbc21fb54b3f08b66966835d3565 100644 (file)
@@ -2803,8 +2803,7 @@ static void
 zoran_vm_open (struct vm_area_struct *vma)
 {
        struct zoran_mapping *map = vma->vm_private_data;
-
-       map->count++;
+       atomic_inc(&map->count);
 }
 
 static void
@@ -2815,7 +2814,7 @@ zoran_vm_close (struct vm_area_struct *vma)
        struct zoran *zr = fh->zr;
        int i;
 
-       if (--map->count > 0)
+       if (!atomic_dec_and_mutex_lock(&map->count, &zr->resource_lock))
                return;
 
        dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr),
@@ -2828,14 +2827,16 @@ zoran_vm_close (struct vm_area_struct *vma)
        kfree(map);
 
        /* Any buffers still mapped? */
-       for (i = 0; i < fh->buffers.num_buffers; i++)
-               if (fh->buffers.buffer[i].map)
+       for (i = 0; i < fh->buffers.num_buffers; i++) {
+               if (fh->buffers.buffer[i].map) {
+                       mutex_unlock(&zr->resource_lock);
                        return;
+               }
+       }
 
        dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr),
                __func__, mode_name(fh->map_mode));
 
-       mutex_lock(&zr->resource_lock);
 
        if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
                if (fh->buffers.active != ZORAN_FREE) {
@@ -2939,7 +2940,7 @@ zoran_mmap (struct file           *file,
                goto mmap_unlock_and_return;
        }
        map->fh = fh;
-       map->count = 1;
+       atomic_set(&map->count, 1);
 
        vma->vm_ops = &zoran_vm_ops;
        vma->vm_flags |= VM_DONTEXPAND;
index 477268a2415fd06d58e47eddc5e157b4940fd8c3..d338b19da544c92e8e1c73c66092ca96575bcb59 100644 (file)
@@ -2150,6 +2150,9 @@ static int __init omap_vout_probe(struct platform_device *pdev)
        struct omap_dss_device *def_display;
        struct omap2video_device *vid_dev = NULL;
 
+       if (omapdss_is_initialized() == false)
+               return -EPROBE_DEFER;
+
        ret = omapdss_compat_init();
        if (ret) {
                dev_err(&pdev->dev, "failed to init dss\n");
index cadf1cc19aafb918d03665815caffb0c11839155..04644e7b42b123ca5407840d11a9e412a26dc6dd 100644 (file)
@@ -1560,12 +1560,6 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, emif);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(emif->dev, "%s: error getting memory resource\n",
-                       __func__);
-               goto error;
-       }
-
        emif->base = devm_ioremap_resource(emif->dev, res);
        if (IS_ERR(emif->base))
                goto error;
index d9aed1593e5d34855b488a89750da5ed68f1a0fe..d54e985748b78403956a0b7ba0d2b8634b949244 100644 (file)
@@ -579,7 +579,7 @@ config AB8500_CORE
 
 config AB8500_DEBUG
        bool "Enable debug info via debugfs"
-       depends on AB8500_CORE && DEBUG_FS
+       depends on AB8500_GPADC && DEBUG_FS
        default y if DEBUG_FS
        help
          Select this option if you want debug information using the debug
@@ -818,6 +818,7 @@ config MFD_TPS65910
 config MFD_TPS65912
        bool "TI TPS65912 Power Management chip"
        depends on GPIOLIB
+       select MFD_CORE
        help
          If you say yes here you get support for the TPS65912 series of
          PM chips.
index 8e8a016effe95251cca5692fed6c7b4442139630..258b367e39891468eb4dd0a9dabf05558a75446d 100644 (file)
@@ -867,6 +867,15 @@ static struct resource ab8500_chargalg_resources[] = {};
 
 #ifdef CONFIG_DEBUG_FS
 static struct resource ab8500_debug_resources[] = {
+       {
+               .name   = "IRQ_AB8500",
+               /*
+                * Number will be filled in. NOTE: this is deliberately
+                * not flagged as an IRQ in ordet to avoid remapping using
+                * the irqdomain in the MFD core, so that this IRQ passes
+                * unremapped to the debug code.
+                */
+       },
        {
                .name   = "IRQ_FIRST",
                .start  = AB8500_INT_MAIN_EXT_CH_NOT_OK,
@@ -1051,6 +1060,7 @@ static struct mfd_cell ab8500_devs[] = {
        },
        {
                .name = "ab8500-gpadc",
+               .of_compatible = "stericsson,ab8500-gpadc",
                .num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
                .resources = ab8500_gpadc_resources,
        },
@@ -1097,7 +1107,7 @@ static struct mfd_cell ab8500_devs[] = {
                .of_compatible = "stericsson,ab8500-denc",
        },
        {
-               .name = "ab8500-gpio",
+               .name = "pinctrl-ab8500",
                .of_compatible = "stericsson,ab8500-gpio",
        },
        {
@@ -1208,6 +1218,7 @@ static struct mfd_cell ab8505_devs[] = {
        },
        {
                .name = "ab8500-gpadc",
+               .of_compatible = "stericsson,ab8500-gpadc",
                .num_resources = ARRAY_SIZE(ab8505_gpadc_resources),
                .resources = ab8505_gpadc_resources,
        },
@@ -1234,7 +1245,7 @@ static struct mfd_cell ab8505_devs[] = {
                .name = "ab8500-leds",
        },
        {
-               .name = "ab8500-gpio",
+               .name = "pinctrl-ab8505",
        },
        {
                .name = "ab8500-usb",
@@ -1271,6 +1282,7 @@ static struct mfd_cell ab8540_devs[] = {
        },
        {
                .name = "ab8500-gpadc",
+               .of_compatible = "stericsson,ab8500-gpadc",
                .num_resources = ARRAY_SIZE(ab8505_gpadc_resources),
                .resources = ab8505_gpadc_resources,
        },
@@ -1302,7 +1314,7 @@ static struct mfd_cell ab8540_devs[] = {
                .resources = ab8500_temp_resources,
        },
        {
-               .name = "ab8500-gpio",
+               .name = "pinctrl-ab8540",
        },
        {
                .name = "ab8540-usb",
@@ -1712,6 +1724,12 @@ static int ab8500_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+#if CONFIG_DEBUG_FS
+       /* Pass to debugfs */
+       ab8500_debug_resources[0].start = ab8500->irq;
+       ab8500_debug_resources[0].end = ab8500->irq;
+#endif
+
        if (is_ab9540(ab8500))
                ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
                                ARRAY_SIZE(ab9540_devs), NULL,
index b88bbbc15f1e962f8f131d0f50bc25768091f55c..37b7ce4c7c3be57b4c8ea30474ec1edee47f8ad5 100644 (file)
 #include <linux/ctype.h>
 #endif
 
-/* TODO: this file should not reference IRQ_DB8500_AB8500! */
-#include <mach/irqs.h>
-
 static u32 debug_bank;
 static u32 debug_address;
 
+static int irq_ab8500;
 static int irq_first;
 static int irq_last;
 static u32 *irq_count;
@@ -1589,7 +1587,7 @@ void ab8500_debug_register_interrupt(int line)
 {
        if (line < num_interrupt_lines) {
                num_interrupts[line]++;
-               if (suspend_test_wake_cause_interrupt_is_mine(IRQ_DB8500_AB8500))
+               if (suspend_test_wake_cause_interrupt_is_mine(irq_ab8500))
                        num_wake_interrupts[line]++;
        }
 }
@@ -2941,6 +2939,7 @@ static int ab8500_debug_probe(struct platform_device *plf)
        struct dentry *file;
        int ret = -ENOMEM;
        struct ab8500 *ab8500;
+       struct resource *res;
        debug_bank = AB8500_MISC;
        debug_address = AB8500_REV_REG & 0x00FF;
 
@@ -2959,6 +2958,15 @@ static int ab8500_debug_probe(struct platform_device *plf)
        if (!event_name)
                goto out_freedev_attr;
 
+       res = platform_get_resource_byname(plf, 0, "IRQ_AB8500");
+       if (!res) {
+               dev_err(&plf->dev, "AB8500 irq not found, err %d\n",
+                       irq_first);
+               ret = -ENXIO;
+               goto out_freeevent_name;
+       }
+       irq_ab8500 = res->start;
+
        irq_first = platform_get_irq_byname(plf, "IRQ_FIRST");
        if (irq_first < 0) {
                dev_err(&plf->dev, "First irq not found, err %d\n",
index 5e65b28a5d0901e1dc5058c084dc444cd2c4a1bd..13f7866de46eb21a4cdf0363f0386d315f8171dd 100644 (file)
@@ -907,14 +907,17 @@ static int ab8500_gpadc_suspend(struct device *dev)
 static int ab8500_gpadc_resume(struct device *dev)
 {
        struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
+       int ret;
 
-       regulator_enable(gpadc->regu);
+       ret = regulator_enable(gpadc->regu);
+       if (ret)
+               dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
 
        pm_runtime_mark_last_busy(gpadc->dev);
        pm_runtime_put_autosuspend(gpadc->dev);
 
        mutex_unlock(&gpadc->ab8500_gpadc_lock);
-       return 0;
+       return ret;
 }
 
 static int ab8500_gpadc_probe(struct platform_device *pdev)
index fbca1ced49faac60bfffdef6ff6e19474475ff7b..8e0dae59844d494cd4e2c79904779b8f0a3c0acf 100644 (file)
@@ -23,7 +23,7 @@
 
 static struct device *sysctrl_dev;
 
-void ab8500_power_off(void)
+static void ab8500_power_off(void)
 {
        sigset_t old;
        sigset_t all;
@@ -104,7 +104,7 @@ void ab8500_restart(char mode, const char *cmd)
 
        plat = dev_get_platdata(sysctrl_dev->parent);
        pdata = plat->sysctrl;
-       if (pdata->reboot_reason_code)
+       if (pdata && pdata->reboot_reason_code)
                reason = pdata->reboot_reason_code(cmd);
        else
                pr_warn("[%s] No reboot reason set. Default reason %d\n",
@@ -188,14 +188,15 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev)
 
        plat = dev_get_platdata(pdev->dev.parent);
 
-       if (!(plat && plat->sysctrl))
+       if (!plat)
                return -EINVAL;
 
-       if (plat->pm_power_off)
+       sysctrl_dev = &pdev->dev;
+
+       if (!pm_power_off)
                pm_power_off = ab8500_power_off;
 
        pdata = plat->sysctrl;
-
        if (pdata) {
                int last, ret, i, j;
 
@@ -226,6 +227,10 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev)
 static int ab8500_sysctrl_remove(struct platform_device *pdev)
 {
        sysctrl_dev = NULL;
+
+       if (pm_power_off == ab8500_power_off)
+               pm_power_off = NULL;
+
        return 0;
 }
 
index 9818afba25153b377cf2b64b54b18bccc02f3e2c..3714acb6145864611cd29fd314c7bfd9d0af5bfb 100644 (file)
@@ -156,7 +156,7 @@ EXPORT_SYMBOL(abx500_startup_irq_enabled);
 void abx500_dump_all_banks(void)
 {
        struct abx500_ops *ops;
-       struct device dummy_child = {0};
+       struct device dummy_child = {NULL};
        struct abx500_device_entry *dev_entry;
 
        list_for_each_entry(dev_entry, &abx500_list, list) {
index 19193cf1e7a1bf9075a4eb63e157b6a419eba14f..367ccb58ecb15a1954cf39e936c46bc5a4bf3498 100644 (file)
@@ -120,7 +120,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
 
                for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) {
                        if (*ptr == EC_MSG_HEADER) {
-                               dev_dbg(ec_dev->dev, "msg found at %ld\n",
+                               dev_dbg(ec_dev->dev, "msg found at %zd\n",
                                        ptr - ec_dev->din);
                                break;
                        }
@@ -154,7 +154,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
                 * maximum-supported transfer size.
                 */
                todo = min(need_len, 256);
-               dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%ld\n",
+               dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n",
                        todo, need_len, ptr - ec_dev->din);
 
                memset(&trans, '\0', sizeof(trans));
@@ -178,7 +178,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
                need_len -= todo;
        }
 
-       dev_dbg(ec_dev->dev, "loop done, ptr=%ld\n", ptr - ec_dev->din);
+       dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din);
 
        return 0;
 }
index 319b8abe742b4ef0d4bf5b74e300514e5c031c91..66f80973596bd8833557d13c1bfc5426a6b1290d 100644 (file)
@@ -1613,6 +1613,8 @@ static unsigned long dsiclk_rate(u8 n)
 
        if (divsel == PRCM_DSI_PLLOUT_SEL_OFF)
                divsel = dsiclk[n].divsel;
+       else
+               dsiclk[n].divsel = divsel;
 
        switch (divsel) {
        case PRCM_DSI_PLLOUT_SEL_PHI_4:
@@ -3095,6 +3097,7 @@ static struct mfd_cell db8500_prcmu_devs[] = {
                .num_resources = ARRAY_SIZE(db8500_thsens_resources),
                .resources = db8500_thsens_resources,
                .platform_data = &db8500_thsens_data,
+               .pdata_size = sizeof(db8500_thsens_data),
        },
 };
 
index 5be3b5e13855045f27c4697da69710ec324e74e9..d8d5137f9717214f70b235c15e51215ac684807b 100644 (file)
@@ -414,11 +414,6 @@ static int intel_msic_probe(struct platform_device *pdev)
         * the clients via intel_msic_irq_read().
         */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "failed to get SRAM iomem resource\n");
-               return -ENODEV;
-       }
-
        msic->irq_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(msic->irq_base))
                return PTR_ERR(msic->irq_base);
index de48b4e884501b7cce5540f87f9144c0a8b60742..6f1ef63086c9df98c56c00369c7b91b60c5320c5 100644 (file)
@@ -29,6 +29,8 @@
 
 #include <linux/mfd/si476x-core.h>
 
+#include <asm/unaligned.h>
+
 #define msb(x)                  ((u8)((u16) x >> 8))
 #define lsb(x)                  ((u8)((u16) x &  0x00FF))
 
@@ -150,7 +152,7 @@ enum si476x_acf_status_report_bits {
        SI476X_ACF_SOFTMUTE_INT = (1 << 0),
 
        SI476X_ACF_SMUTE        = (1 << 0),
-       SI476X_ACF_SMATTN       = 0b11111,
+       SI476X_ACF_SMATTN       = 0x1f,
        SI476X_ACF_PILOT        = (1 << 7),
        SI476X_ACF_STBLEND      = ~SI476X_ACF_PILOT,
 };
@@ -483,7 +485,7 @@ int si476x_core_cmd_get_property(struct si476x_core *core, u16 property)
        if (err < 0)
                return err;
        else
-               return be16_to_cpup((__be16 *)(resp + 2));
+               return get_unaligned_be16(resp + 2);
 }
 EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property);
 
@@ -772,18 +774,18 @@ int si476x_core_cmd_am_rsq_status(struct si476x_core *core,
        if (!report)
                return err;
 
-       report->snrhint         = 0b00001000 & resp[1];
-       report->snrlint         = 0b00000100 & resp[1];
-       report->rssihint        = 0b00000010 & resp[1];
-       report->rssilint        = 0b00000001 & resp[1];
+       report->snrhint         = 0x08 & resp[1];
+       report->snrlint         = 0x04 & resp[1];
+       report->rssihint        = 0x02 & resp[1];
+       report->rssilint        = 0x01 & resp[1];
 
-       report->bltf            = 0b10000000 & resp[2];
-       report->snr_ready       = 0b00100000 & resp[2];
-       report->rssiready       = 0b00001000 & resp[2];
-       report->afcrl           = 0b00000010 & resp[2];
-       report->valid           = 0b00000001 & resp[2];
+       report->bltf            = 0x80 & resp[2];
+       report->snr_ready       = 0x20 & resp[2];
+       report->rssiready       = 0x08 & resp[2];
+       report->afcrl           = 0x02 & resp[2];
+       report->valid           = 0x01 & resp[2];
 
-       report->readfreq        = be16_to_cpup((__be16 *)(resp + 3));
+       report->readfreq        = get_unaligned_be16(resp + 3);
        report->freqoff         = resp[5];
        report->rssi            = resp[6];
        report->snr             = resp[7];
@@ -931,26 +933,26 @@ int si476x_core_cmd_fm_rds_status(struct si476x_core *core,
        if (err < 0 || report == NULL)
                return err;
 
-       report->rdstpptyint     = 0b00010000 & resp[1];
-       report->rdspiint        = 0b00001000 & resp[1];
-       report->rdssyncint      = 0b00000010 & resp[1];
-       report->rdsfifoint      = 0b00000001 & resp[1];
+       report->rdstpptyint     = 0x10 & resp[1];
+       report->rdspiint        = 0x08 & resp[1];
+       report->rdssyncint      = 0x02 & resp[1];
+       report->rdsfifoint      = 0x01 & resp[1];
 
-       report->tpptyvalid      = 0b00010000 & resp[2];
-       report->pivalid         = 0b00001000 & resp[2];
-       report->rdssync         = 0b00000010 & resp[2];
-       report->rdsfifolost     = 0b00000001 & resp[2];
+       report->tpptyvalid      = 0x10 & resp[2];
+       report->pivalid         = 0x08 & resp[2];
+       report->rdssync         = 0x02 & resp[2];
+       report->rdsfifolost     = 0x01 & resp[2];
 
-       report->tp              = 0b00100000 & resp[3];
-       report->pty             = 0b00011111 & resp[3];
+       report->tp              = 0x20 & resp[3];
+       report->pty             = 0x1f & resp[3];
 
-       report->pi              = be16_to_cpup((__be16 *)(resp + 4));
+       report->pi              = get_unaligned_be16(resp + 4);
        report->rdsfifoused     = resp[6];
 
-       report->ble[V4L2_RDS_BLOCK_A]   = 0b11000000 & resp[7];
-       report->ble[V4L2_RDS_BLOCK_B]   = 0b00110000 & resp[7];
-       report->ble[V4L2_RDS_BLOCK_C]   = 0b00001100 & resp[7];
-       report->ble[V4L2_RDS_BLOCK_D]   = 0b00000011 & resp[7];
+       report->ble[V4L2_RDS_BLOCK_A]   = 0xc0 & resp[7];
+       report->ble[V4L2_RDS_BLOCK_B]   = 0x30 & resp[7];
+       report->ble[V4L2_RDS_BLOCK_C]   = 0x0c & resp[7];
+       report->ble[V4L2_RDS_BLOCK_D]   = 0x03 & resp[7];
 
        report->rds[V4L2_RDS_BLOCK_A].block = V4L2_RDS_BLOCK_A;
        report->rds[V4L2_RDS_BLOCK_A].msb = resp[8];
@@ -991,9 +993,9 @@ int si476x_core_cmd_fm_rds_blockcount(struct si476x_core *core,
                                       SI476X_DEFAULT_TIMEOUT);
 
        if (!err) {
-               report->expected        = be16_to_cpup((__be16 *)(resp + 2));
-               report->received        = be16_to_cpup((__be16 *)(resp + 4));
-               report->uncorrectable   = be16_to_cpup((__be16 *)(resp + 6));
+               report->expected        = get_unaligned_be16(resp + 2);
+               report->received        = get_unaligned_be16(resp + 4);
+               report->uncorrectable   = get_unaligned_be16(resp + 6);
        }
 
        return err;
@@ -1005,7 +1007,7 @@ int si476x_core_cmd_fm_phase_diversity(struct si476x_core *core,
 {
        u8       resp[CMD_FM_PHASE_DIVERSITY_NRESP];
        const u8 args[CMD_FM_PHASE_DIVERSITY_NARGS] = {
-               mode & 0b111,
+               mode & 0x07,
        };
 
        return si476x_core_send_command(core, CMD_FM_PHASE_DIVERSITY,
@@ -1162,7 +1164,7 @@ static int si476x_core_cmd_am_tune_freq_a20(struct si476x_core *core,
        const int am_freq = tuneargs->freq;
        u8       resp[CMD_AM_TUNE_FREQ_NRESP];
        const u8 args[CMD_AM_TUNE_FREQ_NARGS] = {
-               (tuneargs->zifsr << 6) | (tuneargs->injside & 0b11),
+               (tuneargs->zifsr << 6) | (tuneargs->injside & 0x03),
                msb(am_freq),
                lsb(am_freq),
        };
@@ -1197,20 +1199,20 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core,
        if (err < 0 || report == NULL)
                return err;
 
-       report->multhint        = 0b10000000 & resp[1];
-       report->multlint        = 0b01000000 & resp[1];
-       report->snrhint         = 0b00001000 & resp[1];
-       report->snrlint         = 0b00000100 & resp[1];
-       report->rssihint        = 0b00000010 & resp[1];
-       report->rssilint        = 0b00000001 & resp[1];
+       report->multhint        = 0x80 & resp[1];
+       report->multlint        = 0x40 & resp[1];
+       report->snrhint         = 0x08 & resp[1];
+       report->snrlint         = 0x04 & resp[1];
+       report->rssihint        = 0x02 & resp[1];
+       report->rssilint        = 0x01 & resp[1];
 
-       report->bltf            = 0b10000000 & resp[2];
-       report->snr_ready       = 0b00100000 & resp[2];
-       report->rssiready       = 0b00001000 & resp[2];
-       report->afcrl           = 0b00000010 & resp[2];
-       report->valid           = 0b00000001 & resp[2];
+       report->bltf            = 0x80 & resp[2];
+       report->snr_ready       = 0x20 & resp[2];
+       report->rssiready       = 0x08 & resp[2];
+       report->afcrl           = 0x02 & resp[2];
+       report->valid           = 0x01 & resp[2];
 
-       report->readfreq        = be16_to_cpup((__be16 *)(resp + 3));
+       report->readfreq        = get_unaligned_be16(resp + 3);
        report->freqoff         = resp[5];
        report->rssi            = resp[6];
        report->snr             = resp[7];
@@ -1218,7 +1220,7 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core,
        report->hassi           = resp[10];
        report->mult            = resp[11];
        report->dev             = resp[12];
-       report->readantcap      = be16_to_cpup((__be16 *)(resp + 13));
+       report->readantcap      = get_unaligned_be16(resp + 13);
        report->assi            = resp[15];
        report->usn             = resp[16];
 
@@ -1251,20 +1253,20 @@ static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core,
        if (err < 0 || report == NULL)
                return err;
 
-       report->multhint        = 0b10000000 & resp[1];
-       report->multlint        = 0b01000000 & resp[1];
-       report->snrhint         = 0b00001000 & resp[1];
-       report->snrlint         = 0b00000100 & resp[1];
-       report->rssihint        = 0b00000010 & resp[1];
-       report->rssilint        = 0b00000001 & resp[1];
+       report->multhint        = 0x80 & resp[1];
+       report->multlint        = 0x40 & resp[1];
+       report->snrhint         = 0x08 & resp[1];
+       report->snrlint         = 0x04 & resp[1];
+       report->rssihint        = 0x02 & resp[1];
+       report->rssilint        = 0x01 & resp[1];
 
-       report->bltf            = 0b10000000 & resp[2];
-       report->snr_ready       = 0b00100000 & resp[2];
-       report->rssiready       = 0b00001000 & resp[2];
-       report->afcrl           = 0b00000010 & resp[2];
-       report->valid           = 0b00000001 & resp[2];
+       report->bltf            = 0x80 & resp[2];
+       report->snr_ready       = 0x20 & resp[2];
+       report->rssiready       = 0x08 & resp[2];
+       report->afcrl           = 0x02 & resp[2];
+       report->valid           = 0x01 & resp[2];
 
-       report->readfreq        = be16_to_cpup((__be16 *)(resp + 3));
+       report->readfreq        = get_unaligned_be16(resp + 3);
        report->freqoff         = resp[5];
        report->rssi            = resp[6];
        report->snr             = resp[7];
@@ -1272,7 +1274,7 @@ static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core,
        report->hassi           = resp[10];
        report->mult            = resp[11];
        report->dev             = resp[12];
-       report->readantcap      = be16_to_cpup((__be16 *)(resp + 13));
+       report->readantcap      = get_unaligned_be16(resp + 13);
        report->assi            = resp[15];
        report->usn             = resp[16];
 
@@ -1306,21 +1308,21 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core,
        if (err < 0 || report == NULL)
                return err;
 
-       report->multhint        = 0b10000000 & resp[1];
-       report->multlint        = 0b01000000 & resp[1];
-       report->snrhint         = 0b00001000 & resp[1];
-       report->snrlint         = 0b00000100 & resp[1];
-       report->rssihint        = 0b00000010 & resp[1];
-       report->rssilint        = 0b00000001 & resp[1];
-
-       report->bltf            = 0b10000000 & resp[2];
-       report->snr_ready       = 0b00100000 & resp[2];
-       report->rssiready       = 0b00001000 & resp[2];
-       report->injside         = 0b00000100 & resp[2];
-       report->afcrl           = 0b00000010 & resp[2];
-       report->valid           = 0b00000001 & resp[2];
-
-       report->readfreq        = be16_to_cpup((__be16 *)(resp + 3));
+       report->multhint        = 0x80 & resp[1];
+       report->multlint        = 0x40 & resp[1];
+       report->snrhint         = 0x08 & resp[1];
+       report->snrlint         = 0x04 & resp[1];
+       report->rssihint        = 0x02 & resp[1];
+       report->rssilint        = 0x01 & resp[1];
+
+       report->bltf            = 0x80 & resp[2];
+       report->snr_ready       = 0x20 & resp[2];
+       report->rssiready       = 0x08 & resp[2];
+       report->injside         = 0x04 & resp[2];
+       report->afcrl           = 0x02 & resp[2];
+       report->valid           = 0x01 & resp[2];
+
+       report->readfreq        = get_unaligned_be16(resp + 3);
        report->freqoff         = resp[5];
        report->rssi            = resp[6];
        report->snr             = resp[7];
@@ -1329,7 +1331,7 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core,
        report->hassi           = resp[10];
        report->mult            = resp[11];
        report->dev             = resp[12];
-       report->readantcap      = be16_to_cpup((__be16 *)(resp + 13));
+       report->readantcap      = get_unaligned_be16(resp + 13);
        report->assi            = resp[15];
        report->usn             = resp[16];
 
@@ -1337,7 +1339,7 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core,
        report->rdsdev          = resp[18];
        report->assidev         = resp[19];
        report->strongdev       = resp[20];
-       report->rdspi           = be16_to_cpup((__be16 *)(resp + 21));
+       report->rdspi           = get_unaligned_be16(resp + 21);
 
        return err;
 }
index c09c28f92055b156245305da692e1c98582839cc..1abd5ad599251cc655672f656c2718bf65c49aa2 100644 (file)
@@ -154,11 +154,6 @@ static int ssc_probe(struct platform_device *pdev)
        ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat;
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!regs) {
-               dev_dbg(&pdev->dev, "no mmio resource defined\n");
-               return -ENXIO;
-       }
-
        ssc->regs = devm_ioremap_resource(&pdev->dev, regs);
        if (IS_ERR(ssc->regs))
                return PTR_ERR(ssc->regs);
index 7014167e2c619f23acc05398973552fd8ab23f9e..c37eeedfe215634775a59fa581d83f749a3fb536 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 
-static int irq;
+static int irq = -1;
 
 static irqreturn_t dummy_interrupt(int irq, void *dev_id)
 {
@@ -36,6 +36,10 @@ static irqreturn_t dummy_interrupt(int irq, void *dev_id)
 
 static int __init dummy_irq_init(void)
 {
+       if (irq < 0) {
+               printk(KERN_ERR "dummy-irq: no IRQ given.  Use irq=N\n");
+               return -EIO;
+       }
        if (request_irq(irq, &dummy_interrupt, IRQF_SHARED, "dummy_irq", &irq)) {
                printk(KERN_ERR "dummy-irq: cannot register IRQ %d\n", irq);
                return -EIO;
index 1e935eacaa7faee9903e20303dc703c75ed014ff..9ecd49a7be1b33cac4ece186e35e3ebc255d9d78 100644 (file)
@@ -496,6 +496,8 @@ int mei_cl_disable_device(struct mei_cl_device *device)
                }
        }
 
+       device->event_cb = NULL;
+
        mutex_unlock(&dev->device_lock);
 
        if (!device->ops || !device->ops->disable)
index 7c44c8dbae424904c53e041f5723ab51bf974e6f..053139f610861fa98c6778176d86f5a4969660f7 100644 (file)
@@ -489,11 +489,16 @@ static int mei_ioctl_connect_client(struct file *file,
 
        /* find ME client we're trying to connect to */
        i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
-       if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
-               cl->me_client_id = dev->me_clients[i].client_id;
-               cl->state = MEI_FILE_CONNECTING;
+       if (i < 0 || dev->me_clients[i].props.fixed_address) {
+               dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n",
+                               &data->in_client_uuid);
+               rets = -ENODEV;
+               goto end;
        }
 
+       cl->me_client_id = dev->me_clients[i].client_id;
+       cl->state = MEI_FILE_CONNECTING;
+
        dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
                        cl->me_client_id);
        dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
@@ -527,11 +532,6 @@ static int mei_ioctl_connect_client(struct file *file,
                goto end;
        }
 
-       if (cl->state != MEI_FILE_CONNECTING) {
-               rets = -ENODEV;
-               goto end;
-       }
-
 
        /* prepare the output buffer */
        client = &data->out_client_properties;
@@ -543,7 +543,6 @@ static int mei_ioctl_connect_client(struct file *file,
        rets = mei_cl_connect(cl, file);
 
 end:
-       dev_dbg(&dev->pdev->dev, "free connect cb memory.");
        return rets;
 }
 
index ea98f7e9ccd19d6ef8ea862616bcc4c0f3055b8d..39c2ecadb273d374b41be572bee6431e7b81e784 100644 (file)
@@ -4,7 +4,7 @@
 
 config VMWARE_VMCI
        tristate "VMware VMCI Driver"
-       depends on X86 && PCI && NET
+       depends on X86 && PCI
        help
          This is VMware's Virtual Machine Communication Interface.  It enables
          high-speed communication between host and guest in a virtual
index d94245dbd7651edc70e9bb23d6d9dcacbfdbd5d7..8ff2e5ee8fb8b955f8da30c8b2a2eba94ed43710 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/pagemap.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/socket.h>
+#include <linux/uio.h>
 #include <linux/wait.h>
 #include <linux/vmalloc.h>
 
index e75774f72606487573cd3ee1817040b460bd11fb..aca59d93d5a9b8d496790935d89c5018de796d61 100644 (file)
@@ -2230,10 +2230,15 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
        mmc_free_host(slot->mmc);
 }
 
-static bool atmci_filter(struct dma_chan *chan, void *slave)
+static bool atmci_filter(struct dma_chan *chan, void *pdata)
 {
-       struct mci_dma_data     *sl = slave;
+       struct mci_platform_data *sl_pdata = pdata;
+       struct mci_dma_data *sl;
 
+       if (!sl_pdata)
+               return false;
+
+       sl = sl_pdata->dma_slave;
        if (sl && find_slave_dev(sl) == chan->device->dev) {
                chan->private = slave_data_ptr(sl);
                return true;
@@ -2245,24 +2250,18 @@ static bool atmci_filter(struct dma_chan *chan, void *slave)
 static bool atmci_configure_dma(struct atmel_mci *host)
 {
        struct mci_platform_data        *pdata;
+       dma_cap_mask_t mask;
 
        if (host == NULL)
                return false;
 
        pdata = host->pdev->dev.platform_data;
 
-       if (!pdata)
-               return false;
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
 
-       if (pdata->dma_slave && find_slave_dev(pdata->dma_slave)) {
-               dma_cap_mask_t mask;
-
-               /* Try to grab a DMA channel */
-               dma_cap_zero(mask);
-               dma_cap_set(DMA_SLAVE, mask);
-               host->dma.chan =
-                       dma_request_channel(mask, atmci_filter, pdata->dma_slave);
-       }
+       host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata,
+                                                         &host->pdev->dev, "rxtx");
        if (!host->dma.chan) {
                dev_warn(&host->pdev->dev, "no DMA channel available\n");
                return false;
index 375c109607ff2020e632834cb4ffe7eca95fc2b1..f4f3038c1df08e2d5934014875abe38782f67db6 100644 (file)
@@ -1130,6 +1130,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        struct variant_data *variant = host->variant;
        u32 pwr = 0;
        unsigned long flags;
+       int ret;
 
        pm_runtime_get_sync(mmc_dev(mmc));
 
@@ -1161,8 +1162,12 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                break;
        case MMC_POWER_ON:
                if (!IS_ERR(mmc->supply.vqmmc) &&
-                   !regulator_is_enabled(mmc->supply.vqmmc))
-                       regulator_enable(mmc->supply.vqmmc);
+                   !regulator_is_enabled(mmc->supply.vqmmc)) {
+                       ret = regulator_enable(mmc->supply.vqmmc);
+                       if (ret < 0)
+                               dev_err(mmc_dev(mmc),
+                                       "failed to enable vqmmc regulator\n");
+               }
 
                pwr |= MCI_PWR_ON;
                break;
index 6e44025acf01fcf9669567dff93b8c3d712b2519..eccedc7d06a4301169d24187edc1925fe4a49f87 100644 (file)
@@ -161,6 +161,7 @@ struct omap_hsmmc_host {
         */
        struct  regulator       *vcc;
        struct  regulator       *vcc_aux;
+       int                     pbias_disable;
        void    __iomem         *base;
        resource_size_t         mapbase;
        spinlock_t              irq_lock; /* Prevent races with irq handler */
@@ -255,11 +256,11 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,
        if (!host->vcc)
                return 0;
        /*
-        * With DT, never turn OFF the regulator. This is because
+        * With DT, never turn OFF the regulator for MMC1. This is because
         * the pbias cell programming support is still missing when
         * booting with Device tree
         */
-       if (dev->of_node && !vdd)
+       if (host->pbias_disable && !vdd)
                return 0;
 
        if (mmc_slot(host).before_set_reg)
@@ -1520,10 +1521,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                        (ios->vdd == DUAL_VOLT_OCR_BIT) &&
                        /*
                         * With pbias cell programming missing, this
-                        * can't be allowed when booting with device
+                        * can't be allowed on MMC1 when booting with device
                         * tree.
                         */
-                       !host->dev->of_node) {
+                       !host->pbias_disable) {
                                /*
                                 * The mmc_select_voltage fn of the core does
                                 * not seem to set the power_mode to
@@ -1871,6 +1872,10 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
 
        omap_hsmmc_context_save(host);
 
+       /* This can be removed once we support PBIAS with DT */
+       if (host->dev->of_node && host->mapbase == 0x4809c000)
+               host->pbias_disable = 1;
+
        host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
        /*
         * MMC can still work without debounce clock.
@@ -1906,33 +1911,41 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
 
        omap_hsmmc_conf_bus_power(host);
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
-       if (!res) {
-               dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
-               ret = -ENXIO;
-               goto err_irq;
-       }
-       tx_req = res->start;
+       if (!pdev->dev.of_node) {
+               res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
+               if (!res) {
+                       dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
+                       ret = -ENXIO;
+                       goto err_irq;
+               }
+               tx_req = res->start;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
-       if (!res) {
-               dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
-               ret = -ENXIO;
-               goto err_irq;
+               res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
+               if (!res) {
+                       dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
+                       ret = -ENXIO;
+                       goto err_irq;
+               }
+               rx_req = res->start;
        }
-       rx_req = res->start;
 
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
 
-       host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
+       host->rx_chan =
+               dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+                                                &rx_req, &pdev->dev, "rx");
+
        if (!host->rx_chan) {
                dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
                ret = -ENXIO;
                goto err_irq;
        }
 
-       host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
+       host->tx_chan =
+               dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+                                                &tx_req, &pdev->dev, "tx");
+
        if (!host->tx_chan) {
                dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
                ret = -ENXIO;
index 7bcf74b1a5cdd8d6d2fd113fb16f540365db0a45..706d9cb1a49e1ed794cded2fbbcc033a4a892b9a 100644 (file)
@@ -87,6 +87,12 @@ static const struct sdhci_ops sdhci_acpi_ops_dflt = {
        .enable_dma = sdhci_acpi_enable_dma,
 };
 
+static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
+       .caps    = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+       .caps2   = MMC_CAP2_HC_ERASE_SZ,
+       .flags   = SDHCI_ACPI_RUNTIME_PM,
+};
+
 static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
        .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
        .caps    = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD,
@@ -94,23 +100,67 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
        .pm_caps = MMC_PM_KEEP_POWER,
 };
 
+static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
+};
+
+struct sdhci_acpi_uid_slot {
+       const char *hid;
+       const char *uid;
+       const struct sdhci_acpi_slot *slot;
+};
+
+static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
+       { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
+       { "80860F14" , "3" , &sdhci_acpi_slot_int_sd   },
+       { "INT33BB"  , "2" , &sdhci_acpi_slot_int_sdio },
+       { "INT33C6"  , NULL, &sdhci_acpi_slot_int_sdio },
+       { "PNP0D40"  },
+       { },
+};
+
 static const struct acpi_device_id sdhci_acpi_ids[] = {
-       { "INT33C6", (kernel_ulong_t)&sdhci_acpi_slot_int_sdio },
-       { "PNP0D40" },
+       { "80860F14" },
+       { "INT33BB"  },
+       { "INT33C6"  },
+       { "PNP0D40"  },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
 
-static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid)
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot_by_ids(const char *hid,
+                                                               const char *uid)
 {
-       const struct acpi_device_id *id;
-
-       for (id = sdhci_acpi_ids; id->id[0]; id++)
-               if (!strcmp(id->id, hid))
-                       return (const struct sdhci_acpi_slot *)id->driver_data;
+       const struct sdhci_acpi_uid_slot *u;
+
+       for (u = sdhci_acpi_uids; u->hid; u++) {
+               if (strcmp(u->hid, hid))
+                       continue;
+               if (!u->uid)
+                       return u->slot;
+               if (uid && !strcmp(u->uid, uid))
+                       return u->slot;
+       }
        return NULL;
 }
 
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(acpi_handle handle,
+                                                        const char *hid)
+{
+       const struct sdhci_acpi_slot *slot;
+       struct acpi_device_info *info;
+       const char *uid = NULL;
+       acpi_status status;
+
+       status = acpi_get_object_info(handle, &info);
+       if (!ACPI_FAILURE(status) && (info->valid & ACPI_VALID_UID))
+               uid = info->unique_id.string;
+
+       slot = sdhci_acpi_get_slot_by_ids(hid, uid);
+
+       kfree(info);
+       return slot;
+}
+
 static int sdhci_acpi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -148,7 +198,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
 
        c = sdhci_priv(host);
        c->host = host;
-       c->slot = sdhci_acpi_get_slot(hid);
+       c->slot = sdhci_acpi_get_slot(handle, hid);
        c->pdev = pdev;
        c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
 
@@ -202,6 +252,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
                goto err_free;
 
        if (c->use_runtime_pm) {
+               pm_runtime_set_active(dev);
                pm_suspend_ignore_children(dev, 1);
                pm_runtime_set_autosuspend_delay(dev, 50);
                pm_runtime_use_autosuspend(dev);
index 67d6dde2ff1961015304b3d3fac3fcd7040bcfd9..d5f0d59e13104957b7539f28ec743b5713f6bb2e 100644 (file)
@@ -85,6 +85,12 @@ struct pltfm_imx_data {
        struct clk *clk_ipg;
        struct clk *clk_ahb;
        struct clk *clk_per;
+       enum {
+               NO_CMD_PENDING,      /* no multiblock command pending*/
+               MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
+               WAIT_FOR_INT,        /* sent CMD12, waiting for response INT */
+       } multiblock_status;
+
 };
 
 static struct platform_device_id imx_esdhc_devtype[] = {
@@ -154,6 +160,8 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
 
 static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
 {
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct pltfm_imx_data *imx_data = pltfm_host->priv;
        u32 val = readl(host->ioaddr + reg);
 
        if (unlikely(reg == SDHCI_CAPABILITIES)) {
@@ -175,6 +183,18 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
                        val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR;
                        val |= SDHCI_INT_ADMA_ERROR;
                }
+
+               /*
+                * mask off the interrupt we get in response to the manually
+                * sent CMD12
+                */
+               if ((imx_data->multiblock_status == WAIT_FOR_INT) &&
+                   ((val & SDHCI_INT_RESPONSE) == SDHCI_INT_RESPONSE)) {
+                       val &= ~SDHCI_INT_RESPONSE;
+                       writel(SDHCI_INT_RESPONSE, host->ioaddr +
+                                                  SDHCI_INT_STATUS);
+                       imx_data->multiblock_status = NO_CMD_PENDING;
+               }
        }
 
        return val;
@@ -211,6 +231,15 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
                        v = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
                        v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK;
                        writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
+
+                       if (imx_data->multiblock_status == MULTIBLK_IN_PROCESS)
+                       {
+                               /* send a manual CMD12 with RESPTYP=none */
+                               data = MMC_STOP_TRANSMISSION << 24 |
+                                      SDHCI_CMD_ABORTCMD << 16;
+                               writel(data, host->ioaddr + SDHCI_TRANSFER_MODE);
+                               imx_data->multiblock_status = WAIT_FOR_INT;
+                       }
        }
 
        if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
@@ -277,11 +306,13 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
                }
                return;
        case SDHCI_COMMAND:
-               if ((host->cmd->opcode == MMC_STOP_TRANSMISSION ||
-                    host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
-                   (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+               if (host->cmd->opcode == MMC_STOP_TRANSMISSION)
                        val |= SDHCI_CMD_ABORTCMD;
 
+               if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
+                   (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+                       imx_data->multiblock_status = MULTIBLK_IN_PROCESS;
+
                if (is_imx6q_usdhc(imx_data))
                        writel(val << 16,
                               host->ioaddr + SDHCI_TRANSFER_MODE);
@@ -324,8 +355,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
                /*
                 * Do not touch buswidth bits here. This is done in
                 * esdhc_pltfm_bus_width.
+                * Do not touch the D3CD bit either which is used for the
+                * SDIO interrupt errata workaround.
                 */
-               mask = 0xffff & ~ESDHC_CTRL_BUSWIDTH_MASK;
+               mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD);
 
                esdhc_clrset_le(host, mask, new_val, reg);
                return;
index 0012d3fdc999753da93389868d5f90957f5c7127..701d06d0e1fb2c63a174f0332da96a37850fd240 100644 (file)
@@ -33,6 +33,9 @@
  */
 #define PCI_DEVICE_ID_INTEL_PCH_SDIO0  0x8809
 #define PCI_DEVICE_ID_INTEL_PCH_SDIO1  0x880a
+#define PCI_DEVICE_ID_INTEL_BYT_EMMC   0x0f14
+#define PCI_DEVICE_ID_INTEL_BYT_SDIO   0x0f15
+#define PCI_DEVICE_ID_INTEL_BYT_SD     0x0f16
 
 /*
  * PCI registers
@@ -304,6 +307,33 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
        .probe_slot     = pch_hc_probe_slot,
 };
 
+static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+       slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
+       slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
+       return 0;
+}
+
+static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+{
+       slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
+       return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
+       .allow_runtime_pm = true,
+       .probe_slot     = byt_emmc_probe_slot,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
+       .quirks2        = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+       .allow_runtime_pm = true,
+       .probe_slot     = byt_sdio_probe_slot,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+};
+
 /* O2Micro extra registers */
 #define O2_SD_LOCK_WP          0xD3
 #define O2_SD_MULTI_VCC3V      0xEE
@@ -855,6 +885,30 @@ static const struct pci_device_id pci_ids[] = {
                .driver_data    = (kernel_ulong_t)&sdhci_intel_pch_sdio,
        },
 
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BYT_EMMC,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BYT_SDIO,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BYT_SD,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
+       },
+
        {
                .vendor         = PCI_VENDOR_ID_O2,
                .device         = PCI_DEVICE_ID_O2_8120,
index a94facb46e5ca76bb239af03a32b160571ccd467..fd1df5e13ae44d77207d19fb492064166bf46525 100644 (file)
@@ -672,11 +672,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        }
 
        rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (rc == NULL) {
-               dev_err(&pdev->dev, "No memory resource found for device!\r\n");
-               return -ENXIO;
-       }
-
        host->io_base = devm_ioremap_resource(&pdev->dev, rc);
        if (IS_ERR(host->io_base))
                return PTR_ERR(host->io_base);
index fc58d118d844e3eefd684348d9ffeac48b7dfe74..390061d096934f83c04171a1a8a62bf9d8345ec5 100644 (file)
@@ -2360,14 +2360,15 @@ int bond_3ad_set_carrier(struct bonding *bond)
 }
 
 /**
- * bond_3ad_get_active_agg_info - get information of the active aggregator
+ * __bond_3ad_get_active_agg_info - get information of the active aggregator
  * @bond: bonding struct to work on
  * @ad_info: ad_info struct to fill with the bond's info
  *
  * Returns:   0 on success
  *          < 0 on error
  */
-int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
+int __bond_3ad_get_active_agg_info(struct bonding *bond,
+                                  struct ad_info *ad_info)
 {
        struct aggregator *aggregator = NULL;
        struct port *port;
@@ -2391,6 +2392,18 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
        return -1;
 }
 
+/* Wrapper used to hold bond->lock so no slave manipulation can occur */
+int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
+{
+       int ret;
+
+       read_lock(&bond->lock);
+       ret = __bond_3ad_get_active_agg_info(bond, ad_info);
+       read_unlock(&bond->lock);
+
+       return ret;
+}
+
 int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
 {
        struct slave *slave, *start_at;
@@ -2402,8 +2415,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        struct ad_info ad_info;
        int res = 1;
 
-       if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
-               pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n",
+       if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
+               pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
                         dev->name);
                goto out;
        }
index 0cfaa4afdecea333bd3d8831aa1e8c9e118e50b7..5d91ad0cc04142df9e73a52a62d06e5ab4d3898e 100644 (file)
@@ -273,6 +273,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave);
 void bond_3ad_adapter_duplex_changed(struct slave *slave);
 void bond_3ad_handle_link_change(struct slave *slave, char link);
 int  bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
+int  __bond_3ad_get_active_agg_info(struct bonding *bond,
+                                   struct ad_info *ad_info);
 int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
 int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
                         struct slave *slave);
index d0aade04e49aff739294de120d44defaba46fbf2..29b846cbfb48d3380fa8dae7e87782e641e2519b 100644 (file)
@@ -1362,6 +1362,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
                                                     slave->dev->features,
                                                     mask);
        }
+       features = netdev_add_tso_features(features, mask);
 
 out:
        read_unlock(&bond->lock);
@@ -2555,8 +2556,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
 {
        struct sk_buff *skb;
 
-       pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op,
-                slave_dev->name, dest_ip, src_ip, vlan_id);
+       pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op,
+                slave_dev->name, &dest_ip, &src_ip, vlan_id);
 
        skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
                         NULL, slave_dev->dev_addr, NULL);
@@ -2588,7 +2589,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                __be32 addr;
                if (!targets[i])
                        break;
-               pr_debug("basa: target %x\n", targets[i]);
+               pr_debug("basa: target %pI4\n", &targets[i]);
                if (!bond_vlan_used(bond)) {
                        pr_debug("basa: empty vlan: arp_send\n");
                        addr = bond_confirm_addr(bond->dev, targets[i], 0);
@@ -4470,7 +4471,7 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
 
 static int bond_check_params(struct bond_params *params)
 {
-       int arp_validate_value, fail_over_mac_value, primary_reselect_value;
+       int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
 
        /*
         * Convert string parameters.
@@ -4650,19 +4651,18 @@ static int bond_check_params(struct bond_params *params)
                arp_interval = BOND_LINK_ARP_INTERV;
        }
 
-       for (arp_ip_count = 0;
-            (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[arp_ip_count];
-            arp_ip_count++) {
+       for (arp_ip_count = 0, i = 0;
+            (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
                /* not complete check, but should be good enough to
                   catch mistakes */
-               __be32 ip = in_aton(arp_ip_target[arp_ip_count]);
-               if (!isdigit(arp_ip_target[arp_ip_count][0]) ||
-                   ip == 0 || ip == htonl(INADDR_BROADCAST)) {
+               __be32 ip = in_aton(arp_ip_target[i]);
+               if (!isdigit(arp_ip_target[i][0]) || ip == 0 ||
+                   ip == htonl(INADDR_BROADCAST)) {
                        pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
-                                  arp_ip_target[arp_ip_count]);
+                                  arp_ip_target[i]);
                        arp_interval = 0;
                } else {
-                       arp_target[arp_ip_count] = ip;
+                       arp_target[arp_ip_count++] = ip;
                }
        }
 
@@ -4696,8 +4696,6 @@ static int bond_check_params(struct bond_params *params)
        if (miimon) {
                pr_info("MII link monitoring set to %d ms\n", miimon);
        } else if (arp_interval) {
-               int i;
-
                pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
                        arp_interval,
                        arp_validate_tbl[arp_validate_value].modename,
index 94d06f1307b850f927c85b4b7645f49859b4772b..4060d41f0ee7b15bc228b6bda9488a9e6cc9c821 100644 (file)
@@ -130,7 +130,7 @@ static void bond_info_show_master(struct seq_file *seq)
                seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
                           ad_select_tbl[bond->params.ad_select].modename);
 
-               if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
+               if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
                        seq_printf(seq, "bond %s has no active aggregator\n",
                                   bond->dev->name);
                } else {
index ea7a388f484306710a33375f3a553fd1ecc5b621..d7434e0a610e925e6a8b6de206856b37b4fd0375 100644 (file)
@@ -316,6 +316,9 @@ static ssize_t bonding_store_mode(struct device *d,
        int new_value, ret = count;
        struct bonding *bond = to_bond(d);
 
+       if (!rtnl_trylock())
+               return restart_syscall();
+
        if (bond->dev->flags & IFF_UP) {
                pr_err("unable to update mode of %s because interface is up.\n",
                       bond->dev->name);
@@ -352,6 +355,7 @@ static ssize_t bonding_store_mode(struct device *d,
                bond->dev->name, bond_mode_tbl[new_value].modename,
                new_value);
 out:
+       rtnl_unlock();
        return ret;
 }
 static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
@@ -1315,7 +1319,6 @@ static ssize_t bonding_show_mii_status(struct device *d,
 }
 static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
 
-
 /*
  * Show current 802.3ad aggregator ID.
  */
@@ -1329,7 +1332,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
        if (bond->params.mode == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
-                               (bond_3ad_get_active_agg_info(bond, &ad_info))
+                               bond_3ad_get_active_agg_info(bond, &ad_info)
                                ?  0 : ad_info.aggregator_id);
        }
 
@@ -1351,7 +1354,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
        if (bond->params.mode == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
-                               (bond_3ad_get_active_agg_info(bond, &ad_info))
+                               bond_3ad_get_active_agg_info(bond, &ad_info)
                                ?  0 : ad_info.ports);
        }
 
@@ -1373,7 +1376,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
        if (bond->params.mode == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
-                               (bond_3ad_get_active_agg_info(bond, &ad_info))
+                               bond_3ad_get_active_agg_info(bond, &ad_info)
                                ?  0 : ad_info.actor_key);
        }
 
@@ -1395,7 +1398,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
        if (bond->params.mode == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
-                               (bond_3ad_get_active_agg_info(bond, &ad_info))
+                               bond_3ad_get_active_agg_info(bond, &ad_info)
                                ?  0 : ad_info.partner_key);
        }
 
index 7ffc756131a214a7b896f81d05e9c499c12cf752..547098086773cd06a936a4010bb8b66ffc933dfd 100644 (file)
@@ -43,7 +43,7 @@ config CAIF_HSI
 
 config CAIF_VIRTIO
        tristate "CAIF virtio transport driver"
-       depends on CAIF
+       depends on CAIF && HAS_DMA
        select VHOST_RING
        select VIRTIO
        select GENERIC_ALLOCATOR
index 9b74d1e3ad44a51c950257b6588bf082247a75d4..6aa7b3266c80904d8d2f2106869085a8b19d5248 100644 (file)
@@ -612,9 +612,15 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
 {
        struct esd_usb2 *dev = priv->usb2;
        struct net_device *netdev = priv->netdev;
-       struct esd_usb2_msg msg;
+       struct esd_usb2_msg *msg;
        int err, i;
 
+       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+       if (!msg) {
+               err = -ENOMEM;
+               goto out;
+       }
+
        /*
         * Enable all IDs
         * The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
@@ -628,33 +634,32 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
         * the number of the starting bitmask (0..64) to the filter.option
         * field followed by only some bitmasks.
         */
-       msg.msg.hdr.cmd = CMD_IDADD;
-       msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
-       msg.msg.filter.net = priv->index;
-       msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
+       msg->msg.hdr.cmd = CMD_IDADD;
+       msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
+       msg->msg.filter.net = priv->index;
+       msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
        for (i = 0; i < ESD_MAX_ID_SEGMENT; i++)
-               msg.msg.filter.mask[i] = cpu_to_le32(0xffffffff);
+               msg->msg.filter.mask[i] = cpu_to_le32(0xffffffff);
        /* enable 29bit extended IDs */
-       msg.msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
+       msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
 
-       err = esd_usb2_send_msg(dev, &msg);
+       err = esd_usb2_send_msg(dev, msg);
        if (err)
-               goto failed;
+               goto out;
 
        err = esd_usb2_setup_rx_urbs(dev);
        if (err)
-               goto failed;
+               goto out;
 
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
-       return 0;
-
-failed:
+out:
        if (err == -ENODEV)
                netif_device_detach(netdev);
+       if (err)
+               netdev_err(netdev, "couldn't start device: %d\n", err);
 
-       netdev_err(netdev, "couldn't start device: %d\n", err);
-
+       kfree(msg);
        return err;
 }
 
@@ -833,26 +838,30 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
 static int esd_usb2_close(struct net_device *netdev)
 {
        struct esd_usb2_net_priv *priv = netdev_priv(netdev);
-       struct esd_usb2_msg msg;
+       struct esd_usb2_msg *msg;
        int i;
 
+       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
        /* Disable all IDs (see esd_usb2_start()) */
-       msg.msg.hdr.cmd = CMD_IDADD;
-       msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
-       msg.msg.filter.net = priv->index;
-       msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
+       msg->msg.hdr.cmd = CMD_IDADD;
+       msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
+       msg->msg.filter.net = priv->index;
+       msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
        for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
-               msg.msg.filter.mask[i] = 0;
-       if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
+               msg->msg.filter.mask[i] = 0;
+       if (esd_usb2_send_msg(priv->usb2, msg) < 0)
                netdev_err(netdev, "sending idadd message failed\n");
 
        /* set CAN controller to reset mode */
-       msg.msg.hdr.len = 2;
-       msg.msg.hdr.cmd = CMD_SETBAUD;
-       msg.msg.setbaud.net = priv->index;
-       msg.msg.setbaud.rsvd = 0;
-       msg.msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
-       if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
+       msg->msg.hdr.len = 2;
+       msg->msg.hdr.cmd = CMD_SETBAUD;
+       msg->msg.setbaud.net = priv->index;
+       msg->msg.setbaud.rsvd = 0;
+       msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
+       if (esd_usb2_send_msg(priv->usb2, msg) < 0)
                netdev_err(netdev, "sending setbaud message failed\n");
 
        priv->can.state = CAN_STATE_STOPPED;
@@ -861,6 +870,8 @@ static int esd_usb2_close(struct net_device *netdev)
 
        close_candev(netdev);
 
+       kfree(msg);
+
        return 0;
 }
 
@@ -886,7 +897,8 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
 {
        struct esd_usb2_net_priv *priv = netdev_priv(netdev);
        struct can_bittiming *bt = &priv->can.bittiming;
-       struct esd_usb2_msg msg;
+       struct esd_usb2_msg *msg;
+       int err;
        u32 canbtr;
        int sjw_shift;
 
@@ -912,15 +924,22 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
        if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
                canbtr |= ESD_USB2_3_SAMPLES;
 
-       msg.msg.hdr.len = 2;
-       msg.msg.hdr.cmd = CMD_SETBAUD;
-       msg.msg.setbaud.net = priv->index;
-       msg.msg.setbaud.rsvd = 0;
-       msg.msg.setbaud.baud = cpu_to_le32(canbtr);
+       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       msg->msg.hdr.len = 2;
+       msg->msg.hdr.cmd = CMD_SETBAUD;
+       msg->msg.setbaud.net = priv->index;
+       msg->msg.setbaud.rsvd = 0;
+       msg->msg.setbaud.baud = cpu_to_le32(canbtr);
 
        netdev_info(netdev, "setting BTR=%#x\n", canbtr);
 
-       return esd_usb2_send_msg(priv->usb2, &msg);
+       err = esd_usb2_send_msg(priv->usb2, msg);
+
+       kfree(msg);
+       return err;
 }
 
 static int esd_usb2_get_berr_counter(const struct net_device *netdev,
@@ -1022,7 +1041,7 @@ static int esd_usb2_probe(struct usb_interface *intf,
                         const struct usb_device_id *id)
 {
        struct esd_usb2 *dev;
-       struct esd_usb2_msg msg;
+       struct esd_usb2_msg *msg;
        int i, err;
 
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1037,27 +1056,33 @@ static int esd_usb2_probe(struct usb_interface *intf,
 
        usb_set_intfdata(intf, dev);
 
+       msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+       if (!msg) {
+               err = -ENOMEM;
+               goto free_msg;
+       }
+
        /* query number of CAN interfaces (nets) */
-       msg.msg.hdr.cmd = CMD_VERSION;
-       msg.msg.hdr.len = 2;
-       msg.msg.version.rsvd = 0;
-       msg.msg.version.flags = 0;
-       msg.msg.version.drv_version = 0;
+       msg->msg.hdr.cmd = CMD_VERSION;
+       msg->msg.hdr.len = 2;
+       msg->msg.version.rsvd = 0;
+       msg->msg.version.flags = 0;
+       msg->msg.version.drv_version = 0;
 
-       err = esd_usb2_send_msg(dev, &msg);
+       err = esd_usb2_send_msg(dev, msg);
        if (err < 0) {
                dev_err(&intf->dev, "sending version message failed\n");
-               goto free_dev;
+               goto free_msg;
        }
 
-       err = esd_usb2_wait_msg(dev, &msg);
+       err = esd_usb2_wait_msg(dev, msg);
        if (err < 0) {
                dev_err(&intf->dev, "no version message answer\n");
-               goto free_dev;
+               goto free_msg;
        }
 
-       dev->net_count = (int)msg.msg.version_reply.nets;
-       dev->version = le32_to_cpu(msg.msg.version_reply.version);
+       dev->net_count = (int)msg->msg.version_reply.nets;
+       dev->version = le32_to_cpu(msg->msg.version_reply.version);
 
        if (device_create_file(&intf->dev, &dev_attr_firmware))
                dev_err(&intf->dev,
@@ -1075,10 +1100,10 @@ static int esd_usb2_probe(struct usb_interface *intf,
        for (i = 0; i < dev->net_count; i++)
                esd_usb2_probe_one_net(intf, i);
 
-       return 0;
-
-free_dev:
-       kfree(dev);
+free_msg:
+       kfree(msg);
+       if (err)
+               kfree(dev);
 done:
        return err;
 }
index 45cb9f3c1324817a912c4541358b34eecce818ea..3b95465882401fc556c647071b8c27f5724a8dbf 100644 (file)
 #define KVASER_CTRL_MODE_SELFRECEPTION 3
 #define KVASER_CTRL_MODE_OFF           4
 
+/* log message */
+#define KVASER_EXTENDED_FRAME          BIT(31)
+
 struct kvaser_msg_simple {
        u8 tid;
        u8 channel;
@@ -817,8 +820,13 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
        priv = dev->nets[channel];
        stats = &priv->netdev->stats;
 
-       if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR |
-                                 MSG_FLAG_OVERRUN)) {
+       if ((msg->u.rx_can.flag & MSG_FLAG_ERROR_FRAME) &&
+           (msg->id == CMD_LOG_MESSAGE)) {
+               kvaser_usb_rx_error(dev, msg);
+               return;
+       } else if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
+                                        MSG_FLAG_NERR |
+                                        MSG_FLAG_OVERRUN)) {
                kvaser_usb_rx_can_err(priv, msg);
                return;
        } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) {
@@ -834,22 +842,40 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
                return;
        }
 
-       cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
-                    (msg->u.rx_can.msg[1] & 0x3f);
-       cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+       if (msg->id == CMD_LOG_MESSAGE) {
+               cf->can_id = le32_to_cpu(msg->u.log_message.id);
+               if (cf->can_id & KVASER_EXTENDED_FRAME)
+                       cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+               else
+                       cf->can_id &= CAN_SFF_MASK;
 
-       if (msg->id == CMD_RX_EXT_MESSAGE) {
-               cf->can_id <<= 18;
-               cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
-                             ((msg->u.rx_can.msg[3] & 0xff) << 6) |
-                             (msg->u.rx_can.msg[4] & 0x3f);
-               cf->can_id |= CAN_EFF_FLAG;
-       }
+               cf->can_dlc = get_can_dlc(msg->u.log_message.dlc);
 
-       if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
-               cf->can_id |= CAN_RTR_FLAG;
-       else
-               memcpy(cf->data, &msg->u.rx_can.msg[6], cf->can_dlc);
+               if (msg->u.log_message.flags & MSG_FLAG_REMOTE_FRAME)
+                       cf->can_id |= CAN_RTR_FLAG;
+               else
+                       memcpy(cf->data, &msg->u.log_message.data,
+                              cf->can_dlc);
+       } else {
+               cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
+                            (msg->u.rx_can.msg[1] & 0x3f);
+
+               if (msg->id == CMD_RX_EXT_MESSAGE) {
+                       cf->can_id <<= 18;
+                       cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
+                                     ((msg->u.rx_can.msg[3] & 0xff) << 6) |
+                                     (msg->u.rx_can.msg[4] & 0x3f);
+                       cf->can_id |= CAN_EFF_FLAG;
+               }
+
+               cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
+
+               if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
+                       cf->can_id |= CAN_RTR_FLAG;
+               else
+                       memcpy(cf->data, &msg->u.rx_can.msg[6],
+                              cf->can_dlc);
+       }
 
        netif_rx(skb);
 
@@ -911,6 +937,7 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
 
        case CMD_RX_STD_MESSAGE:
        case CMD_RX_EXT_MESSAGE:
+       case CMD_LOG_MESSAGE:
                kvaser_usb_rx_can_msg(dev, msg);
                break;
 
@@ -919,11 +946,6 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
                kvaser_usb_rx_error(dev, msg);
                break;
 
-       case CMD_LOG_MESSAGE:
-               if (msg->u.log_message.flags & MSG_FLAG_ERROR_FRAME)
-                       kvaser_usb_rx_error(dev, msg);
-               break;
-
        case CMD_TX_ACKNOWLEDGE:
                kvaser_usb_tx_acknowledge(dev, msg);
                break;
index 30d79bfa5b109e5d6d212df46658a4cf1216d6e1..8ee9d1556e6e4eb3b8d32cfb988ba9870fccffaa 100644 (file)
@@ -504,15 +504,24 @@ static int pcan_usb_pro_restart_async(struct peak_usb_device *dev,
        return usb_submit_urb(urb, GFP_ATOMIC);
 }
 
-static void pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
+static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
 {
-       u8 buffer[16];
+       u8 *buffer;
+       int err;
+
+       buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
 
        buffer[0] = 0;
        buffer[1] = !!loaded;
 
-       pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT,
-                             PCAN_USBPRO_FCT_DRVLD, buffer, sizeof(buffer));
+       err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT,
+                                   PCAN_USBPRO_FCT_DRVLD, buffer,
+                                   PCAN_USBPRO_FCT_DRVLD_REQ_LEN);
+       kfree(buffer);
+
+       return err;
 }
 
 static inline
@@ -851,21 +860,24 @@ static int pcan_usb_pro_stop(struct peak_usb_device *dev)
  */
 static int pcan_usb_pro_init(struct peak_usb_device *dev)
 {
-       struct pcan_usb_pro_interface *usb_if;
        struct pcan_usb_pro_device *pdev =
                        container_of(dev, struct pcan_usb_pro_device, dev);
+       struct pcan_usb_pro_interface *usb_if = NULL;
+       struct pcan_usb_pro_fwinfo *fi = NULL;
+       struct pcan_usb_pro_blinfo *bi = NULL;
+       int err;
 
        /* do this for 1st channel only */
        if (!dev->prev_siblings) {
-               struct pcan_usb_pro_fwinfo fi;
-               struct pcan_usb_pro_blinfo bi;
-               int err;
-
                /* allocate netdevices common structure attached to first one */
                usb_if = kzalloc(sizeof(struct pcan_usb_pro_interface),
                                 GFP_KERNEL);
-               if (!usb_if)
-                       return -ENOMEM;
+               fi = kmalloc(sizeof(struct pcan_usb_pro_fwinfo), GFP_KERNEL);
+               bi = kmalloc(sizeof(struct pcan_usb_pro_blinfo), GFP_KERNEL);
+               if (!usb_if || !fi || !bi) {
+                       err = -ENOMEM;
+                       goto err_out;
+               }
 
                /* number of ts msgs to ignore before taking one into account */
                usb_if->cm_ignore_count = 5;
@@ -877,34 +889,34 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
                 */
                err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
                                            PCAN_USBPRO_INFO_FW,
-                                           &fi, sizeof(fi));
+                                           fi, sizeof(*fi));
                if (err) {
-                       kfree(usb_if);
                        dev_err(dev->netdev->dev.parent,
                                "unable to read %s firmware info (err %d)\n",
                                pcan_usb_pro.name, err);
-                       return err;
+                       goto err_out;
                }
 
                err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
                                            PCAN_USBPRO_INFO_BL,
-                                           &bi, sizeof(bi));
+                                           bi, sizeof(*bi));
                if (err) {
-                       kfree(usb_if);
                        dev_err(dev->netdev->dev.parent,
                                "unable to read %s bootloader info (err %d)\n",
                                pcan_usb_pro.name, err);
-                       return err;
+                       goto err_out;
                }
 
+               /* tell the device the can driver is running */
+               err = pcan_usb_pro_drv_loaded(dev, 1);
+               if (err)
+                       goto err_out;
+
                dev_info(dev->netdev->dev.parent,
                     "PEAK-System %s hwrev %u serial %08X.%08X (%u channels)\n",
                     pcan_usb_pro.name,
-                    bi.hw_rev, bi.serial_num_hi, bi.serial_num_lo,
+                    bi->hw_rev, bi->serial_num_hi, bi->serial_num_lo,
                     pcan_usb_pro.ctrl_count);
-
-               /* tell the device the can driver is running */
-               pcan_usb_pro_drv_loaded(dev, 1);
        } else {
                usb_if = pcan_usb_pro_dev_if(dev->prev_siblings);
        }
@@ -916,6 +928,13 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
        pcan_usb_pro_set_led(dev, 0, 1);
 
        return 0;
+
+ err_out:
+       kfree(bi);
+       kfree(fi);
+       kfree(usb_if);
+
+       return err;
 }
 
 static void pcan_usb_pro_exit(struct peak_usb_device *dev)
index a869918c5620ea8977e9c12e47cd1fadb9b0cea1..32275af547e06becb9d4150030012e52c753bd16 100644 (file)
@@ -29,6 +29,7 @@
 
 /* Vendor Request value for XXX_FCT */
 #define PCAN_USBPRO_FCT_DRVLD          5 /* tell device driver is loaded */
+#define PCAN_USBPRO_FCT_DRVLD_REQ_LEN  16
 
 /* PCAN_USBPRO_INFO_BL vendor request record type */
 struct __packed pcan_usb_pro_blinfo {
index de570a8f896742f5e0c345579b736d8030326356..072c6f14e8fcea3c88f04d74d68afa73aef60554 100644 (file)
@@ -632,7 +632,6 @@ struct vortex_private {
                pm_state_valid:1,                               /* pci_dev->saved_config_space has sane contents */
                open:1,
                medialock:1,
-               must_free_region:1,                             /* Flag: if zero, Cardbus owns the I/O region */
                large_frames:1,                 /* accept large frames */
                handling_irq:1;                 /* private in_irq indicator */
        /* {get|set}_wol operations are already serialized by rtnl.
@@ -1012,6 +1011,12 @@ static int vortex_init_one(struct pci_dev *pdev,
        if (rc < 0)
                goto out;
 
+       rc = pci_request_regions(pdev, DRV_NAME);
+       if (rc < 0) {
+               pci_disable_device(pdev);
+               goto out;
+       }
+
        unit = vortex_cards_found;
 
        if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
@@ -1027,6 +1032,7 @@ static int vortex_init_one(struct pci_dev *pdev,
        if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
                ioaddr = pci_iomap(pdev, 0, 0);
        if (!ioaddr) {
+               pci_release_regions(pdev);
                pci_disable_device(pdev);
                rc = -ENOMEM;
                goto out;
@@ -1036,6 +1042,7 @@ static int vortex_init_one(struct pci_dev *pdev,
                           ent->driver_data, unit);
        if (rc < 0) {
                pci_iounmap(pdev, ioaddr);
+               pci_release_regions(pdev);
                pci_disable_device(pdev);
                goto out;
        }
@@ -1178,11 +1185,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
 
        /* PCI-only startup logic */
        if (pdev) {
-               /* EISA resources already marked, so only PCI needs to do this here */
-               /* Ignore return value, because Cardbus drivers already allocate for us */
-               if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
-                       vp->must_free_region = 1;
-
                /* enable bus-mastering if necessary */
                if (vci->flags & PCI_USES_MASTER)
                        pci_set_master(pdev);
@@ -1220,7 +1222,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
                                           &vp->rx_ring_dma);
        retval = -ENOMEM;
        if (!vp->rx_ring)
-               goto free_region;
+               goto free_device;
 
        vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
        vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
@@ -1484,9 +1486,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
                                                        + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
                                                vp->rx_ring,
                                                vp->rx_ring_dma);
-free_region:
-       if (vp->must_free_region)
-               release_region(dev->base_addr, vci->io_size);
+free_device:
        free_netdev(dev);
        pr_err(PFX "vortex_probe1 fails.  Returns %d\n", retval);
 out:
@@ -3254,8 +3254,9 @@ static void vortex_remove_one(struct pci_dev *pdev)
                                                        + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
                                                vp->rx_ring,
                                                vp->rx_ring_dma);
-       if (vp->must_free_region)
-               release_region(dev->base_addr, vp->io_size);
+
+       pci_release_regions(pdev);
+
        free_netdev(dev);
 }
 
index b8fbe266ab68f1619a18b7e49e9d6df19a5f0625..638e55435b04f474ff841698cc46cc09f6da1a6f 100644 (file)
@@ -3192,11 +3192,11 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
                rc |= XMIT_CSUM_TCP;
 
        if (skb_is_gso_v6(skb)) {
-               rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+               rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
                if (rc & XMIT_CSUM_ENC)
                        rc |= XMIT_GSO_ENC_V6;
        } else if (skb_is_gso(skb)) {
-               rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
+               rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
                if (rc & XMIT_CSUM_ENC)
                        rc |= XMIT_GSO_ENC_V4;
        }
@@ -3313,6 +3313,7 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
  */
 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
                              struct eth_tx_parse_bd_e1x *pbd,
+                             struct eth_tx_start_bd *tx_start_bd,
                              u32 xmit_type)
 {
        pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
@@ -3326,11 +3327,14 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb,
                                                   ip_hdr(skb)->daddr,
                                                   0, IPPROTO_TCP, 0));
 
-       } else
+               /* GSO on 57710/57711 needs FW to calculate IP checksum */
+               tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
+       } else {
                pbd->tcp_pseudo_csum =
                        bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
                                                 &ipv6_hdr(skb)->daddr,
                                                 0, IPPROTO_TCP, 0));
+       }
 
        pbd->global_data |=
                cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
@@ -3479,19 +3483,18 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
 {
        u16 hlen_w = 0;
        u8 outerip_off, outerip_len = 0;
+
        /* from outer IP to transport */
        hlen_w = (skb_inner_transport_header(skb) -
                  skb_network_header(skb)) >> 1;
 
        /* transport len */
-       if (xmit_type & XMIT_CSUM_TCP)
-               hlen_w += inner_tcp_hdrlen(skb) >> 1;
-       else
-               hlen_w += sizeof(struct udphdr) >> 1;
+       hlen_w += inner_tcp_hdrlen(skb) >> 1;
 
        pbd2->fw_ip_hdr_to_payload_w = hlen_w;
 
-       if (xmit_type & XMIT_CSUM_ENC_V4) {
+       /* outer IP header info */
+       if (xmit_type & XMIT_CSUM_V4) {
                struct iphdr *iph = ip_hdr(skb);
                pbd2->fw_ip_csum_wo_len_flags_frag =
                        bswab16(csum_fold((~iph->check) -
@@ -3814,7 +3817,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
                                             xmit_type);
                else
-                       bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
+                       bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
        }
 
        /* Set the PBD's parsing_data field if not zero
index 728d42ab2a7636e4656a28174ac2b8c9159c60f0..0f493c8dc28b631039cf87ad266e7ffa19601e69 100644 (file)
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    131
+#define TG3_MIN_NUM                    132
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "April 09, 2013"
+#define DRV_MODULE_RELDATE     "May 21, 2013"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -2957,6 +2957,31 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
        return 0;
 }
 
+static bool tg3_phy_power_bug(struct tg3 *tp)
+{
+       switch (tg3_asic_rev(tp)) {
+       case ASIC_REV_5700:
+       case ASIC_REV_5704:
+               return true;
+       case ASIC_REV_5780:
+               if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+                       return true;
+               return false;
+       case ASIC_REV_5717:
+               if (!tp->pci_fn)
+                       return true;
+               return false;
+       case ASIC_REV_5719:
+       case ASIC_REV_5720:
+               if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+                   !tp->pci_fn)
+                       return true;
+               return false;
+       }
+
+       return false;
+}
+
 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
 {
        u32 val;
@@ -3016,12 +3041,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
        /* The PHY should not be powered down on some chips because
         * of bugs.
         */
-       if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
-           tg3_asic_rev(tp) == ASIC_REV_5704 ||
-           (tg3_asic_rev(tp) == ASIC_REV_5780 &&
-            (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
-           (tg3_asic_rev(tp) == ASIC_REV_5717 &&
-            !tp->pci_fn))
+       if (tg3_phy_power_bug(tp))
                return;
 
        if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
@@ -7428,6 +7448,20 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
        return (base > 0xffffdcc0) && (base + len + 8 < base);
 }
 
+/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
+ * of any 4GB boundaries: 4G, 8G, etc
+ */
+static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+                                          u32 len, u32 mss)
+{
+       if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
+               u32 base = (u32) mapping & 0xffffffff;
+
+               return ((base + len + (mss & 0x3fff)) < base);
+       }
+       return 0;
+}
+
 /* Test for DMA addresses > 40-bit */
 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
                                          int len)
@@ -7464,6 +7498,9 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
        if (tg3_4g_overflow_test(map, len))
                hwbug = true;
 
+       if (tg3_4g_tso_overflow_test(tp, map, len, mss))
+               hwbug = true;
+
        if (tg3_40bit_overflow_test(tp, map, len))
                hwbug = true;
 
@@ -8874,6 +8911,10 @@ static int tg3_chip_reset(struct tg3 *tp)
                tg3_halt_cpu(tp, RX_CPU_BASE);
        }
 
+       err = tg3_poll_fw(tp);
+       if (err)
+               return err;
+
        tw32(GRC_MODE, tp->grc_mode);
 
        if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
@@ -8904,10 +8945,6 @@ static int tg3_chip_reset(struct tg3 *tp)
 
        tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
 
-       err = tg3_poll_fw(tp);
-       if (err)
-               return err;
-
        tg3_mdio_start(tp);
 
        if (tg3_flag(tp, PCI_EXPRESS) &&
@@ -9431,6 +9468,14 @@ static void tg3_rss_write_indir_tbl(struct tg3 *tp)
        }
 }
 
+static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
+{
+       if (tg3_asic_rev(tp) == ASIC_REV_5719)
+               return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
+       else
+               return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
+}
+
 /* tp->lock is held. */
 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
 {
@@ -10116,16 +10161,17 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
        tw32_f(RDMAC_MODE, rdmac_mode);
        udelay(40);
 
-       if (tg3_asic_rev(tp) == ASIC_REV_5719) {
+       if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+           tg3_asic_rev(tp) == ASIC_REV_5720) {
                for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
                        if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
                                break;
                }
                if (i < TG3_NUM_RDMA_CHANNELS) {
                        val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
-                       val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
+                       val |= tg3_lso_rd_dma_workaround_bit(tp);
                        tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
-                       tg3_flag_set(tp, 5719_RDMA_BUG);
+                       tg3_flag_set(tp, 5719_5720_RDMA_BUG);
                }
        }
 
@@ -10489,15 +10535,15 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
        TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
        TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
        TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
-       if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
+       if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
                     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
                      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
                u32 val;
 
                val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
-               val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
+               val &= ~tg3_lso_rd_dma_workaround_bit(tp);
                tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
-               tg3_flag_clear(tp, 5719_RDMA_BUG);
+               tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
        }
 
        TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
index 9b2d3ac2474adda8e25398460f33d0abca38b05a..ff6e30eeae354e11895ae3e77bef4644050dd730 100644 (file)
 #define TG3_LSO_RD_DMA_CRPTEN_CTRL     0x00004910
 #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K   0x00030000
 #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K  0x000c0000
-#define TG3_LSO_RD_DMA_TX_LENGTH_WA     0x02000000
+#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719        0x02000000
+#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720        0x00200000
 /* 0x4914 --> 0x4be0 unused */
 
 #define TG3_NUM_RDMA_CHANNELS          4
@@ -3059,7 +3060,7 @@ enum TG3_FLAGS {
        TG3_FLAG_APE_HAS_NCSI,
        TG3_FLAG_TX_TSTAMP_EN,
        TG3_FLAG_4K_FIFO_LIMIT,
-       TG3_FLAG_5719_RDMA_BUG,
+       TG3_FLAG_5719_5720_RDMA_BUG,
        TG3_FLAG_RESET_TASK_PENDING,
        TG3_FLAG_PTP_CAPABLE,
        TG3_FLAG_5705_PLUS,
index ce4a030d3d0cab9705e8715b6169a9031b0fd840..07f7ef05c3f20b00b3b3f287b412deb1ff747b35 100644 (file)
@@ -3236,9 +3236,10 @@ bnad_init(struct bnad *bnad,
 
        sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
        bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
-
-       if (!bnad->work_q)
+       if (!bnad->work_q) {
+               iounmap(bnad->bar0);
                return -ENOMEM;
+       }
 
        return 0;
 }
index 1194446f859a0018f2be94d945b03a64a568d861..768285ec10f4d427362cc595be307e8e4d447eb0 100644 (file)
@@ -22,7 +22,7 @@ if NET_CADENCE
 
 config ARM_AT91_ETHER
        tristate "AT91RM9200 Ethernet support"
-       depends on GENERIC_HARDIRQS
+       depends on GENERIC_HARDIRQS && HAS_DMA
        select NET_CORE
        select MACB
        ---help---
@@ -31,6 +31,7 @@ config ARM_AT91_ETHER
 
 config MACB
        tristate "Cadence MACB/GEM support"
+       depends on HAS_DMA
        select PHYLIB
        ---help---
          The Cadence MACB ethernet interface is found on many Atmel AT32 and
index 6be513deb17f69b73be4b321821e99d3ec7a17cd..c89aa41dd44873f02fbea97c6af3f3cd6132dcbd 100644 (file)
@@ -485,7 +485,8 @@ static void macb_tx_interrupt(struct macb *bp)
        status = macb_readl(bp, TSR);
        macb_writel(bp, TSR, status);
 
-       macb_writel(bp, ISR, MACB_BIT(TCOMP));
+       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+               macb_writel(bp, ISR, MACB_BIT(TCOMP));
 
        netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
                (unsigned long)status);
@@ -738,7 +739,8 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                         * now.
                         */
                        macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
-                       macb_writel(bp, ISR, MACB_BIT(RCOMP));
+                       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                               macb_writel(bp, ISR, MACB_BIT(RCOMP));
 
                        if (napi_schedule_prep(&bp->napi)) {
                                netdev_vdbg(bp->dev, "scheduling RX softirq\n");
@@ -1062,6 +1064,17 @@ static void macb_configure_dma(struct macb *bp)
        }
 }
 
+/*
+ * Configure peripheral capacities according to integration options used
+ */
+static void macb_configure_caps(struct macb *bp)
+{
+       if (macb_is_gem(bp)) {
+               if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0)
+                       bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
+       }
+}
+
 static void macb_init_hw(struct macb *bp)
 {
        u32 config;
@@ -1084,6 +1097,7 @@ static void macb_init_hw(struct macb *bp)
        bp->duplex = DUPLEX_HALF;
 
        macb_configure_dma(bp);
+       macb_configure_caps(bp);
 
        /* Initialize TX and RX buffers */
        macb_writel(bp, RBQP, bp->rx_ring_dma);
index 993d703806885d9f3ebedd4f5c035bfdd03306d8..548c0ecae8697e5ffb0006819f7670b9fba813f8 100644 (file)
 #define MACB_REV_SIZE                          16
 
 /* Bitfields in DCFG1. */
+#define GEM_IRQCOR_OFFSET                      23
+#define GEM_IRQCOR_SIZE                                1
 #define GEM_DBWDEF_OFFSET                      25
 #define GEM_DBWDEF_SIZE                                3
 
 #define MACB_MAN_READ                          2
 #define MACB_MAN_CODE                          2
 
+/* Capability mask bits */
+#define MACB_CAPS_ISR_CLEAR_ON_WRITE           0x1
+
 /* Bit manipulation macros */
 #define MACB_BIT(name)                                 \
        (1 << MACB_##name##_OFFSET)
@@ -574,6 +579,8 @@ struct macb {
        unsigned int            speed;
        unsigned int            duplex;
 
+       u32                     caps;
+
        phy_interface_t         phy_interface;
 
        /* AT91RM9200 transmit */
index aba435c3d4ae9fa315cbb3781447cb0b1ac1470e..184a063bed5fa59bbdc705e29355134a2d31d731 100644 (file)
@@ -1,6 +1,6 @@
 config NET_CALXEDA_XGMAC
        tristate "Calxeda 1G/10G XGMAC Ethernet driver"
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && HAS_DMA
        select CRC32
        help
          This is the driver for the XGMAC Ethernet IP block found on Calxeda
index f544b297c9abef7b7ee73fa3a5fd4eb2b9921f42..0a510684e46857a66b8c969b63ea728b8ec446b9 100644 (file)
@@ -262,6 +262,7 @@ struct be_rx_compl_info {
        u8 ipv6;
        u8 vtm;
        u8 pkt_type;
+       u8 ip_frag;
 };
 
 struct be_rx_obj {
index fd7b547698abd89f829008e717e34f2fc9d9e2f9..1db2df61b8af65d5b0ca32534b8b6a96e9a2c794 100644 (file)
@@ -562,7 +562,7 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
 
        resource_error = lancer_provisioning_error(adapter);
        if (resource_error)
-               return -1;
+               return -EAGAIN;
 
        status = lancer_wait_ready(adapter);
        if (!status) {
@@ -590,8 +590,8 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
         * when PF provisions resources.
         */
        resource_error = lancer_provisioning_error(adapter);
-       if (status == -1 && !resource_error)
-               adapter->eeh_error = true;
+       if (resource_error)
+               status = -EAGAIN;
 
        return status;
 }
@@ -2976,22 +2976,17 @@ static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
        for (i = 0; i < desc_count; i++) {
                desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
                if (((void *)desc + desc->desc_len) >
-                   (void *)(buf + max_buf_size)) {
-                       desc = NULL;
-                       break;
-               }
+                   (void *)(buf + max_buf_size))
+                       return NULL;
 
                if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
                    desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
-                       break;
+                       return desc;
 
                desc = (void *)desc + desc->desc_len;
        }
 
-       if (!desc || i == MAX_RESOURCE_DESC)
-               return NULL;
-
-       return desc;
+       return NULL;
 }
 
 /* Uses Mbox */
index 3c1099b47f2a7013da331c9e5b00dca0f0b80946..8780183c6d1c3aa3168e55d59c51c3f63e545267 100644 (file)
@@ -356,7 +356,7 @@ struct amap_eth_rx_compl_v0 {
        u8 ip_version;          /* dword 1 */
        u8 macdst[6];           /* dword 1 */
        u8 vtp;                 /* dword 1 */
-       u8 rsvd0;               /* dword 1 */
+       u8 ip_frag;             /* dword 1 */
        u8 fragndx[10];         /* dword 1 */
        u8 ct[2];               /* dword 1 */
        u8 sw;                  /* dword 1 */
index a444110b060fd74361759be2f9f3a25a4f0fe095..8bc1b21b1c790b44cffc347d7fe365f22f3a7ee2 100644 (file)
@@ -780,26 +780,18 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
        if (unlikely(!skb))
                return skb;
 
-       if (vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb))
                vlan_tag = be_get_tx_vlan_tag(adapter, skb);
-               skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-               if (skb)
-                       skb->vlan_tci = 0;
-       }
-
-       if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
-               if (!vlan_tag)
-                       vlan_tag = adapter->pvid;
-               if (skip_hw_vlan)
-                       *skip_hw_vlan = true;
-       }
+       else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
+               vlan_tag = adapter->pvid;
 
        if (vlan_tag) {
                skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
                if (unlikely(!skb))
                        return skb;
-
                skb->vlan_tci = 0;
+               if (skip_hw_vlan)
+                       *skip_hw_vlan = true;
        }
 
        /* Insert the outer VLAN, if any */
@@ -1607,6 +1599,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
                                               compl);
        }
        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
+       rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
+                                     ip_frag, compl);
 }
 
 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
@@ -1628,6 +1622,9 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
        else
                be_parse_rx_compl_v0(compl, rxcp);
 
+       if (rxcp->ip_frag)
+               rxcp->l4_csum = 0;
+
        if (rxcp->vlanf) {
                /* vlanf could be wrongly set in some cards.
                 * ignore if vtm is not set */
@@ -2176,7 +2173,7 @@ static irqreturn_t be_msix(int irq, void *dev)
 
 static inline bool do_gro(struct be_rx_compl_info *rxcp)
 {
-       return (rxcp->tcpf && !rxcp->err) ? true : false;
+       return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
 }
 
 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
@@ -4101,6 +4098,7 @@ static int be_get_initial_config(struct be_adapter *adapter)
 
 static int lancer_recover_func(struct be_adapter *adapter)
 {
+       struct device *dev = &adapter->pdev->dev;
        int status;
 
        status = lancer_test_and_set_rdy_state(adapter);
@@ -4112,8 +4110,7 @@ static int lancer_recover_func(struct be_adapter *adapter)
 
        be_clear(adapter);
 
-       adapter->hw_error = false;
-       adapter->fw_timeout = false;
+       be_clear_all_error(adapter);
 
        status = be_setup(adapter);
        if (status)
@@ -4125,13 +4122,13 @@ static int lancer_recover_func(struct be_adapter *adapter)
                        goto err;
        }
 
-       dev_err(&adapter->pdev->dev,
-               "Adapter SLIPORT recovery succeeded\n");
+       dev_err(dev, "Error recovery successful\n");
        return 0;
 err:
-       if (adapter->eeh_error)
-               dev_err(&adapter->pdev->dev,
-                       "Adapter SLIPORT recovery failed\n");
+       if (status == -EAGAIN)
+               dev_err(dev, "Waiting for resource provisioning\n");
+       else
+               dev_err(dev, "Error recovery failed\n");
 
        return status;
 }
@@ -4140,28 +4137,27 @@ static void be_func_recovery_task(struct work_struct *work)
 {
        struct be_adapter *adapter =
                container_of(work, struct be_adapter,  func_recovery_work.work);
-       int status;
+       int status = 0;
 
        be_detect_error(adapter);
 
        if (adapter->hw_error && lancer_chip(adapter)) {
 
-               if (adapter->eeh_error)
-                       goto out;
-
                rtnl_lock();
                netif_device_detach(adapter->netdev);
                rtnl_unlock();
 
                status = lancer_recover_func(adapter);
-
                if (!status)
                        netif_device_attach(adapter->netdev);
        }
 
-out:
-       schedule_delayed_work(&adapter->func_recovery_work,
-                             msecs_to_jiffies(1000));
+       /* In Lancer, for all errors other than provisioning error (-EAGAIN),
+        * no need to attempt further recovery.
+        */
+       if (!status || status == -EAGAIN)
+               schedule_delayed_work(&adapter->func_recovery_work,
+                                     msecs_to_jiffies(1000));
 }
 
 static void be_worker(struct work_struct *work)
@@ -4444,20 +4440,19 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
 
        dev_err(&adapter->pdev->dev, "EEH error detected\n");
 
-       adapter->eeh_error = true;
-
-       cancel_delayed_work_sync(&adapter->func_recovery_work);
+       if (!adapter->eeh_error) {
+               adapter->eeh_error = true;
 
-       rtnl_lock();
-       netif_device_detach(netdev);
-       rtnl_unlock();
+               cancel_delayed_work_sync(&adapter->func_recovery_work);
 
-       if (netif_running(netdev)) {
                rtnl_lock();
-               be_close(netdev);
+               netif_device_detach(netdev);
+               if (netif_running(netdev))
+                       be_close(netdev);
                rtnl_unlock();
+
+               be_clear(adapter);
        }
-       be_clear(adapter);
 
        if (state == pci_channel_io_perm_failure)
                return PCI_ERS_RESULT_DISCONNECT;
@@ -4482,7 +4477,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
        int status;
 
        dev_info(&adapter->pdev->dev, "EEH reset\n");
-       be_clear_all_error(adapter);
 
        status = pci_enable_device(pdev);
        if (status)
@@ -4500,6 +4494,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
                return PCI_ERS_RESULT_DISCONNECT;
 
        pci_cleanup_aer_uncorrect_error_status(pdev);
+       be_clear_all_error(adapter);
        return PCI_ERS_RESULT_RECOVERED;
 }
 
index aff0310a778bf7afb545c4b53f995770f7f03ec2..a667015be22ada3a3bb85628620584e4fb1982e3 100644 (file)
@@ -87,6 +87,8 @@
 #define FEC_QUIRK_HAS_GBIT             (1 << 3)
 /* Controller has extend desc buffer */
 #define FEC_QUIRK_HAS_BUFDESC_EX       (1 << 4)
+/* Controller has hardware checksum support */
+#define FEC_QUIRK_HAS_CSUM             (1 << 5)
 
 static struct platform_device_id fec_devtype[] = {
        {
@@ -105,9 +107,9 @@ static struct platform_device_id fec_devtype[] = {
        }, {
                .name = "imx6q-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
-                               FEC_QUIRK_HAS_BUFDESC_EX,
+                               FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM,
        }, {
-               .name = "mvf-fec",
+               .name = "mvf600-fec",
                .driver_data = FEC_QUIRK_ENET_MAC,
        }, {
                /* sentinel */
@@ -120,7 +122,7 @@ enum imx_fec_type {
        IMX27_FEC,      /* runs on i.mx27/35/51 */
        IMX28_FEC,
        IMX6Q_FEC,
-       MVF_FEC,
+       MVF600_FEC,
 };
 
 static const struct of_device_id fec_dt_ids[] = {
@@ -128,7 +130,7 @@ static const struct of_device_id fec_dt_ids[] = {
        { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
        { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
        { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
-       { .compatible = "fsl,mvf-fec", .data = &fec_devtype[MVF_FEC], },
+       { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -449,7 +451,7 @@ fec_restart(struct net_device *ndev, int duplex)
                netif_device_detach(ndev);
                napi_disable(&fep->napi);
                netif_stop_queue(ndev);
-               netif_tx_lock(ndev);
+               netif_tx_lock_bh(ndev);
        }
 
        /* Whack a reset.  We should wait for this. */
@@ -614,10 +616,10 @@ fec_restart(struct net_device *ndev, int duplex)
        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 
        if (netif_running(ndev)) {
-               netif_device_attach(ndev);
-               napi_enable(&fep->napi);
+               netif_tx_unlock_bh(ndev);
                netif_wake_queue(ndev);
-               netif_tx_unlock(ndev);
+               napi_enable(&fep->napi);
+               netif_device_attach(ndev);
        }
 }
 
@@ -1036,6 +1038,18 @@ static void fec_get_mac(struct net_device *ndev)
                iap = &tmpaddr[0];
        }
 
+       /*
+        * 5) random mac address
+        */
+       if (!is_valid_ether_addr(iap)) {
+               /* Report it and use a random ethernet address instead */
+               netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
+               eth_hw_addr_random(ndev);
+               netdev_info(ndev, "Using random MAC address: %pM\n",
+                           ndev->dev_addr);
+               return;
+       }
+
        memcpy(ndev->dev_addr, iap, ETH_ALEN);
 
        /* Adjust MAC if using macaddr */
@@ -1744,6 +1758,8 @@ static const struct net_device_ops fec_netdev_ops = {
 static int fec_enet_init(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
+       const struct platform_device_id *id_entry =
+                               platform_get_device_id(fep->pdev);
        struct bufdesc *cbd_base;
 
        /* Allocate memory for buffer descriptors. */
@@ -1775,12 +1791,14 @@ static int fec_enet_init(struct net_device *ndev)
        writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
        netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
 
-       /* enable hw accelerator */
-       ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
-                       | NETIF_F_RXCSUM);
-       ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
-                       | NETIF_F_RXCSUM);
-       fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+       if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
+               /* enable hw accelerator */
+               ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+                               | NETIF_F_RXCSUM);
+               ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+                               | NETIF_F_RXCSUM);
+               fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+       }
 
        fec_restart(ndev, 0);
 
index 576e4b858fce09d7bd00f1a7492daf598bbe1741..083ea2b4d20a4106e5ae3c993da3855504493f0f 100644 (file)
@@ -524,6 +524,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
        return 0;
 
 no_clock:
+       iounmap(etsects->regs);
 no_ioremap:
        release_resource(etsects->rsrc);
 no_resource:
index 4989481c19f01b6dad9b560b535b6127da416eee..d300a0c0eafc0521a4e86688abd7e54afd0ce58e 100644 (file)
@@ -359,10 +359,26 @@ static int emac_reset(struct emac_instance *dev)
        }
 
 #ifdef CONFIG_PPC_DCR_NATIVE
-       /* Enable internal clock source */
-       if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
-               dcri_clrset(SDR0, SDR0_ETH_CFG,
-                           0, SDR0_ETH_CFG_ECS << dev->cell_index);
+       /*
+        * PPC460EX/GT Embedded Processor Advanced User's Manual
+        * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
+        * Note: The PHY must provide a TX Clk in order to perform a soft reset
+        * of the EMAC. If none is present, select the internal clock
+        * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
+        * After a soft reset, select the external clock.
+        */
+       if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
+               if (dev->phy_address == 0xffffffff &&
+                   dev->phy_map == 0xffffffff) {
+                       /* No PHY: select internal loop clock before reset */
+                       dcri_clrset(SDR0, SDR0_ETH_CFG,
+                                   0, SDR0_ETH_CFG_ECS << dev->cell_index);
+               } else {
+                       /* PHY present: select external clock before reset */
+                       dcri_clrset(SDR0, SDR0_ETH_CFG,
+                                   SDR0_ETH_CFG_ECS << dev->cell_index, 0);
+               }
+       }
 #endif
 
        out_be32(&p->mr0, EMAC_MR0_SRST);
@@ -370,10 +386,14 @@ static int emac_reset(struct emac_instance *dev)
                --n;
 
 #ifdef CONFIG_PPC_DCR_NATIVE
-        /* Enable external clock source */
-       if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
-               dcri_clrset(SDR0, SDR0_ETH_CFG,
-                           SDR0_ETH_CFG_ECS << dev->cell_index, 0);
+       if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
+               if (dev->phy_address == 0xffffffff &&
+                   dev->phy_map == 0xffffffff) {
+                       /* No PHY: restore external clock source after reset */
+                       dcri_clrset(SDR0, SDR0_ETH_CFG,
+                                   SDR0_ETH_CFG_ECS << dev->cell_index, 0);
+               }
+       }
 #endif
 
        if (n) {
index 6ce027355fcf721932336aee3fce299f4fe7f716..abb300a3191293387d3250ad4c215a49164011ba 100644 (file)
@@ -195,57 +195,57 @@ enum ipg_regs {
 /* TFD data structure masks. */
 
 /* TFDList, TFC */
-#define        IPG_TFC_RSVD_MASK                       0x0000FFFF9FFFFFFF
-#define        IPG_TFC_FRAMEID                         0x000000000000FFFF
-#define        IPG_TFC_WORDALIGN                       0x0000000000030000
-#define        IPG_TFC_WORDALIGNTODWORD                0x0000000000000000
-#define        IPG_TFC_WORDALIGNTOWORD                 0x0000000000020000
-#define        IPG_TFC_WORDALIGNDISABLED               0x0000000000030000
-#define        IPG_TFC_TCPCHECKSUMENABLE               0x0000000000040000
-#define        IPG_TFC_UDPCHECKSUMENABLE               0x0000000000080000
-#define        IPG_TFC_IPCHECKSUMENABLE                0x0000000000100000
-#define        IPG_TFC_FCSAPPENDDISABLE                0x0000000000200000
-#define        IPG_TFC_TXINDICATE                      0x0000000000400000
-#define        IPG_TFC_TXDMAINDICATE                   0x0000000000800000
-#define        IPG_TFC_FRAGCOUNT                       0x000000000F000000
-#define        IPG_TFC_VLANTAGINSERT                   0x0000000010000000
-#define        IPG_TFC_TFDDONE                         0x0000000080000000
-#define        IPG_TFC_VID                             0x00000FFF00000000
-#define        IPG_TFC_CFI                             0x0000100000000000
-#define        IPG_TFC_USERPRIORITY                    0x0000E00000000000
+#define        IPG_TFC_RSVD_MASK                       0x0000FFFF9FFFFFFFULL
+#define        IPG_TFC_FRAMEID                         0x000000000000FFFFULL
+#define        IPG_TFC_WORDALIGN                       0x0000000000030000ULL
+#define        IPG_TFC_WORDALIGNTODWORD                0x0000000000000000ULL
+#define        IPG_TFC_WORDALIGNTOWORD                 0x0000000000020000ULL
+#define        IPG_TFC_WORDALIGNDISABLED               0x0000000000030000ULL
+#define        IPG_TFC_TCPCHECKSUMENABLE               0x0000000000040000ULL
+#define        IPG_TFC_UDPCHECKSUMENABLE               0x0000000000080000ULL
+#define        IPG_TFC_IPCHECKSUMENABLE                0x0000000000100000ULL
+#define        IPG_TFC_FCSAPPENDDISABLE                0x0000000000200000ULL
+#define        IPG_TFC_TXINDICATE                      0x0000000000400000ULL
+#define        IPG_TFC_TXDMAINDICATE                   0x0000000000800000ULL
+#define        IPG_TFC_FRAGCOUNT                       0x000000000F000000ULL
+#define        IPG_TFC_VLANTAGINSERT                   0x0000000010000000ULL
+#define        IPG_TFC_TFDDONE                         0x0000000080000000ULL
+#define        IPG_TFC_VID                             0x00000FFF00000000ULL
+#define        IPG_TFC_CFI                             0x0000100000000000ULL
+#define        IPG_TFC_USERPRIORITY                    0x0000E00000000000ULL
 
 /* TFDList, FragInfo */
-#define        IPG_TFI_RSVD_MASK                       0xFFFF00FFFFFFFFFF
-#define        IPG_TFI_FRAGADDR                        0x000000FFFFFFFFFF
-#define        IPG_TFI_FRAGLEN                         0xFFFF000000000000LL
+#define        IPG_TFI_RSVD_MASK                       0xFFFF00FFFFFFFFFFULL
+#define        IPG_TFI_FRAGADDR                        0x000000FFFFFFFFFFULL
+#define        IPG_TFI_FRAGLEN                         0xFFFF000000000000ULL
 
 /* RFD data structure masks. */
 
 /* RFDList, RFS */
-#define        IPG_RFS_RSVD_MASK                       0x0000FFFFFFFFFFFF
-#define        IPG_RFS_RXFRAMELEN                      0x000000000000FFFF
-#define        IPG_RFS_RXFIFOOVERRUN                   0x0000000000010000
-#define        IPG_RFS_RXRUNTFRAME                     0x0000000000020000
-#define        IPG_RFS_RXALIGNMENTERROR                0x0000000000040000
-#define        IPG_RFS_RXFCSERROR                      0x0000000000080000
-#define        IPG_RFS_RXOVERSIZEDFRAME                0x0000000000100000
-#define        IPG_RFS_RXLENGTHERROR                   0x0000000000200000
-#define        IPG_RFS_VLANDETECTED                    0x0000000000400000
-#define        IPG_RFS_TCPDETECTED                     0x0000000000800000
-#define        IPG_RFS_TCPERROR                        0x0000000001000000
-#define        IPG_RFS_UDPDETECTED                     0x0000000002000000
-#define        IPG_RFS_UDPERROR                        0x0000000004000000
-#define        IPG_RFS_IPDETECTED                      0x0000000008000000
-#define        IPG_RFS_IPERROR                         0x0000000010000000
-#define        IPG_RFS_FRAMESTART                      0x0000000020000000
-#define        IPG_RFS_FRAMEEND                        0x0000000040000000
-#define        IPG_RFS_RFDDONE                         0x0000000080000000
-#define        IPG_RFS_TCI                             0x0000FFFF00000000
+#define        IPG_RFS_RSVD_MASK                       0x0000FFFFFFFFFFFFULL
+#define        IPG_RFS_RXFRAMELEN                      0x000000000000FFFFULL
+#define        IPG_RFS_RXFIFOOVERRUN                   0x0000000000010000ULL
+#define        IPG_RFS_RXRUNTFRAME                     0x0000000000020000ULL
+#define        IPG_RFS_RXALIGNMENTERROR                0x0000000000040000ULL
+#define        IPG_RFS_RXFCSERROR                      0x0000000000080000ULL
+#define        IPG_RFS_RXOVERSIZEDFRAME                0x0000000000100000ULL
+#define        IPG_RFS_RXLENGTHERROR                   0x0000000000200000ULL
+#define        IPG_RFS_VLANDETECTED                    0x0000000000400000ULL
+#define        IPG_RFS_TCPDETECTED                     0x0000000000800000ULL
+#define        IPG_RFS_TCPERROR                        0x0000000001000000ULL
+#define        IPG_RFS_UDPDETECTED                     0x0000000002000000ULL
+#define        IPG_RFS_UDPERROR                        0x0000000004000000ULL
+#define        IPG_RFS_IPDETECTED                      0x0000000008000000ULL
+#define        IPG_RFS_IPERROR                         0x0000000010000000ULL
+#define        IPG_RFS_FRAMESTART                      0x0000000020000000ULL
+#define        IPG_RFS_FRAMEEND                        0x0000000040000000ULL
+#define        IPG_RFS_RFDDONE                         0x0000000080000000ULL
+#define        IPG_RFS_TCI                             0x0000FFFF00000000ULL
 
 /* RFDList, FragInfo */
-#define        IPG_RFI_RSVD_MASK                       0xFFFF00FFFFFFFFFF
-#define        IPG_RFI_FRAGADDR                        0x000000FFFFFFFFFF
-#define        IPG_RFI_FRAGLEN                         0xFFFF000000000000LL
+#define        IPG_RFI_RSVD_MASK                       0xFFFF00FFFFFFFFFFULL
+#define        IPG_RFI_FRAGADDR                        0x000000FFFFFFFFFFULL
+#define        IPG_RFI_FRAGLEN                         0xFFFF000000000000ULL
 
 /* I/O Register masks. */
 
index d0afeea181fb0fd35918941c6e19a0008ded1141..2ad1494efbb3021e796481da6c4f48f21e63ca30 100644 (file)
@@ -867,7 +867,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
        struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
        int reclaimed;
 
-       __netif_tx_lock(nq, smp_processor_id());
+       __netif_tx_lock_bh(nq);
 
        reclaimed = 0;
        while (reclaimed < budget && txq->tx_desc_count > 0) {
@@ -913,7 +913,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                dev_kfree_skb(skb);
        }
 
-       __netif_tx_unlock(nq);
+       __netif_tx_unlock_bh(nq);
 
        if (reclaimed < budget)
                mp->work_tx &= ~(1 << txq->index);
@@ -2745,7 +2745,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 
        INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
 
-       netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
+       netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
 
        init_timer(&mp->rx_oom);
        mp->rx_oom.data = (unsigned long)mp;
index 1df56cc50ee93702128859e7a204a84a78f5fcd6..0e572a527154acc13e29e01293bacf9688cfca66 100644 (file)
@@ -222,8 +222,6 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
                 * FLR process. The only non-zero result in the RESET command
                 * is MLX4_DELAY_RESET_SLAVE*/
                if ((MLX4_COMM_CMD_RESET == cmd)) {
-                       mlx4_warn(dev, "Got slave FLRed from Communication"
-                                 " channel (ret:0x%x)\n", ret_from_pending);
                        err = MLX4_DELAY_RESET_SLAVE;
                } else {
                        mlx4_warn(dev, "Communication channel timed out\n");
index b35f9470009363bad560f7142322436baa76d8a3..89c47ea84b503ea9e1be08606d6e4937b6d6b178 100644 (file)
@@ -1323,6 +1323,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
                        priv->last_moder_time[ring] = moder_time;
                        cq = &priv->rx_cq[ring];
                        cq->moder_time = moder_time;
+                       cq->moder_cnt = priv->rx_frames;
                        err = mlx4_en_set_cq_moder(priv, cq);
                        if (err)
                                en_err(priv, "Failed modifying moderation for cq:%d\n",
@@ -2118,6 +2119,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        struct mlx4_en_priv *priv;
        int i;
        int err;
+       u64 mac_u64;
 
        dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
                                 MAX_TX_RINGS, MAX_RX_RINGS);
@@ -2191,10 +2193,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        dev->addr_len = ETH_ALEN;
        mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
        if (!is_valid_ether_addr(dev->dev_addr)) {
-               en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
-                      priv->port, dev->dev_addr);
-               err = -EINVAL;
-               goto out;
+               if (mlx4_is_slave(priv->mdev->dev)) {
+                       eth_hw_addr_random(dev);
+                       en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
+                       mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr);
+                       mdev->dev->caps.def_mac[priv->port] = mac_u64;
+               } else {
+                       en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+                              priv->port, dev->dev_addr);
+                       err = -EINVAL;
+                       goto out;
+               }
        }
 
        memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
index 91f2b2c43c12669f796ddf0327baa18c33025891..d3f508697a3dd664f65a687c81964f30fee04a93 100644 (file)
@@ -60,7 +60,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
        context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
        if (user_prio >= 0) {
                context->pri_path.sched_queue |= user_prio << 3;
-               context->pri_path.feup = 1 << 6;
+               context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
        }
        context->pri_path.counter_index = 0xff;
        context->cqn_send = cpu_to_be32(cqn);
index b147bdd40768802df3c11d935b1ed607b3e127c9..2c97901c6a6d2ff79231e294c5fd1d5622255263 100644 (file)
@@ -131,7 +131,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
                [2] = "RSS XOR Hash Function support",
                [3] = "Device manage flow steering support",
                [4] = "Automatic MAC reassignment support",
-               [5] = "Time stamping support"
+               [5] = "Time stamping support",
+               [6] = "VST (control vlan insertion/stripping) support",
+               [7] = "FSM (MAC anti-spoofing) support"
        };
        int i;
 
@@ -838,12 +840,16 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
                           MLX4_CMD_NATIVE);
 
        if (!err && dev->caps.function != slave) {
-               /* set slave default_mac address */
-               MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
-               def_mac += slave << 8;
                /* if config MAC in DB use it */
                if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
                        def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
+               else {
+                       /* set slave default_mac address */
+                       MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
+                       def_mac += slave << 8;
+                       priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac;
+               }
+
                MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
 
                /* get port type - currently only eth is enabled */
index 0d32a82458bfb0d10f95ebd6f914f05b6d5e4682..2f4a26039e801270b5d634abe43bc6de5ab3309f 100644 (file)
@@ -1290,7 +1290,6 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        u64 dma = (u64) priv->mfunc.vhcr_dma;
-       int num_of_reset_retries = NUM_OF_RESET_RETRIES;
        int ret_from_reset = 0;
        u32 slave_read;
        u32 cmd_channel_ver;
@@ -1304,18 +1303,10 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
         * NUM_OF_RESET_RETRIES times before leaving.*/
        if (ret_from_reset) {
                if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
-                       msleep(SLEEP_TIME_IN_RESET);
-                       while (ret_from_reset && num_of_reset_retries) {
-                               mlx4_warn(dev, "slave is currently in the"
-                                         "middle of FLR. retrying..."
-                                         "(try num:%d)\n",
-                                         (NUM_OF_RESET_RETRIES -
-                                          num_of_reset_retries  + 1));
-                               ret_from_reset =
-                                       mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
-                                                     0, MLX4_COMM_TIME);
-                               num_of_reset_retries = num_of_reset_retries - 1;
-                       }
+                       mlx4_warn(dev, "slave is currently in the "
+                                 "middle of FLR. Deferring probe.\n");
+                       mutex_unlock(&priv->cmd.slave_cmd_mutex);
+                       return -EPROBE_DEFER;
                } else
                        goto err;
        }
@@ -1526,7 +1517,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
        } else {
                err = mlx4_init_slave(dev);
                if (err) {
-                       mlx4_err(dev, "Failed to initialize slave\n");
+                       if (err != -EPROBE_DEFER)
+                               mlx4_err(dev, "Failed to initialize slave\n");
                        return err;
                }
 
index e12e0d2e0ee00c3fad03c1c012707c55d6f21a88..1157f028a90f341a50c300c40ba8e967d9fb2e01 100644 (file)
@@ -372,24 +372,29 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
                if (MLX4_QP_ST_RC == qp_type)
                        return -EINVAL;
 
+               /* force strip vlan by clear vsd */
+               qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+               if (0 != vp_oper->state.default_vlan) {
+                       qpc->pri_path.vlan_control =
+                               MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+                               MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+                               MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+               } else { /* priority tagged */
+                       qpc->pri_path.vlan_control =
+                               MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+                               MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
+               }
+
+               qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
                qpc->pri_path.vlan_index = vp_oper->vlan_idx;
-               qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/
-               qpc->pri_path.feup |= 1 << 3; /* set fvl bit */
+               qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+               qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
                qpc->pri_path.sched_queue &= 0xC7;
                qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
-               mlx4_dbg(dev, "qp %d  port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
-                        be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
-                        (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
-                        vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
-                        (int)(qpc->pri_path.fl));
        }
        if (vp_oper->state.spoofchk) {
-               qpc->pri_path.feup |= 1 << 5; /* set fsm bit */;
+               qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
                qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
-               mlx4_dbg(dev, "spoof qp %d  port %d feup  0x%x, myLmc 0x%x mindx %d\n",
-                        be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
-                        (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
-                        vp_oper->mac_idx);
        }
        return 0;
 }
index 90c253b145eff8b9ae622877933f101cdf70b633..c1b693cb3df38b180d1cece6d4acfe5582bb9d6a 100644 (file)
@@ -429,6 +429,7 @@ struct qlcnic_hardware_context {
 
        u16 port_type;
        u16 board_type;
+       u16 supported_type;
 
        u16 link_speed;
        u16 link_duplex;
@@ -906,8 +907,11 @@ struct qlcnic_ipaddr {
 #define QLCNIC_FW_HANG                 0x4000
 #define QLCNIC_FW_LRO_MSS_CAP          0x8000
 #define QLCNIC_TX_INTR_SHARED          0x10000
+#define QLCNIC_APP_CHANGED_FLAGS       0x20000
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
        ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+#define QLCNIC_IS_TSO_CAPABLE(adapter)  \
+       ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
 
 #define QLCNIC_DEF_NUM_STS_DESC_RINGS  4
 #define QLCNIC_MSIX_TBL_SPACE          8192
@@ -1033,6 +1037,7 @@ struct qlcnic_adapter {
        spinlock_t rx_mac_learn_lock;
        u32 file_prd_off;       /*File fw product offset*/
        u32 fw_version;
+       u32 offload_flags;
        const struct firmware *fw;
 };
 
@@ -1514,6 +1519,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
 void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
 void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
 void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 
 int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
 int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
@@ -1540,6 +1546,8 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16);
 int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
 int qlcnic_read_mac_addr(struct qlcnic_adapter *);
 int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
+void qlcnic_set_netdev_features(struct qlcnic_adapter *,
+                               struct qlcnic_esw_func_cfg *);
 void qlcnic_sriov_vf_schedule_multi(struct net_device *);
 void qlcnic_vf_add_mc_list(struct net_device *, u16);
 
index ea790a93ee7c0e79acedd062a1d1795fb413e8c4..b4ff1e35a11de24863966e1b8cdae2898cda8c6a 100644 (file)
@@ -696,15 +696,14 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
        return 1;
 }
 
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
+u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
 {
        u32 data;
-       unsigned long wait_time = 0;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        /* wait for mailbox completion */
        do {
                data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
-               if (++wait_time > QLCNIC_MBX_TIMEOUT) {
+               if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
                        data = QLCNIC_RCODE_TIMEOUT;
                        break;
                }
@@ -720,8 +719,8 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
        u16 opcode;
        u8 mbx_err_code;
        unsigned long flags;
-       u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
 
        opcode = LSW(cmd->req.arg[0]);
        if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
@@ -754,15 +753,13 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
        /* Signal FW about the impending command */
        QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
 poll:
-       rsp = qlcnic_83xx_mbx_poll(adapter);
+       rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
        if (rsp != QLCNIC_RCODE_TIMEOUT) {
                /* Get the FW response data */
                fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
                if (fw_data &  QLCNIC_MBX_ASYNC_EVENT) {
                        __qlcnic_83xx_process_aen(adapter);
-                       mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
-                       if (mbx_val)
-                               goto poll;
+                       goto poll;
                }
                mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
                rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
@@ -1276,11 +1273,13 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
        return err;
 }
 
-static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
+static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
+                                     int num_sds_ring)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_host_rds_ring *rds_ring;
+       u16 adapter_state = adapter->is_up;
        u8 ring;
        int ret;
 
@@ -1304,6 +1303,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
        ret = qlcnic_fw_create_ctx(adapter);
        if (ret) {
                qlcnic_detach(adapter);
+               if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
+                       adapter->max_sds_rings = num_sds_ring;
+                       qlcnic_attach(adapter);
+               }
                netif_device_attach(netdev);
                return ret;
        }
@@ -1596,7 +1599,8 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
        if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
                return -EBUSY;
 
-       ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+       ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
+                                        max_sds_rings);
        if (ret)
                goto fail_diag_alloc;
 
@@ -2830,6 +2834,23 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
                        break;
                }
                config = cmd.rsp.arg[3];
+               if (QLC_83XX_SFP_PRESENT(config)) {
+                       switch (ahw->module_type) {
+                       case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+                       case LINKEVENT_MODULE_OPTICAL_SRLR:
+                       case LINKEVENT_MODULE_OPTICAL_LRM:
+                       case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+                               ahw->supported_type = PORT_FIBRE;
+                               break;
+                       case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+                       case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+                       case LINKEVENT_MODULE_TWINAX:
+                               ahw->supported_type = PORT_TP;
+                               break;
+                       default:
+                               ahw->supported_type = PORT_OTHER;
+                       }
+               }
                if (config & 1)
                        err = 1;
        }
@@ -2838,7 +2859,8 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
        return config;
 }
 
-int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
+                            struct ethtool_cmd *ecmd)
 {
        u32 config = 0;
        int status = 0;
@@ -2851,6 +2873,54 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter)
        ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
        /* hard code until there is a way to get it from flash */
        ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
+
+       if (netif_running(adapter->netdev) && ahw->has_link_events) {
+               ethtool_cmd_speed_set(ecmd, ahw->link_speed);
+               ecmd->duplex = ahw->link_duplex;
+               ecmd->autoneg = ahw->link_autoneg;
+       } else {
+               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+               ecmd->duplex = DUPLEX_UNKNOWN;
+               ecmd->autoneg = AUTONEG_DISABLE;
+       }
+
+       if (ahw->port_type == QLCNIC_XGBE) {
+               ecmd->supported = SUPPORTED_1000baseT_Full;
+               ecmd->advertising = ADVERTISED_1000baseT_Full;
+       } else {
+               ecmd->supported = (SUPPORTED_10baseT_Half |
+                                  SUPPORTED_10baseT_Full |
+                                  SUPPORTED_100baseT_Half |
+                                  SUPPORTED_100baseT_Full |
+                                  SUPPORTED_1000baseT_Half |
+                                  SUPPORTED_1000baseT_Full);
+               ecmd->advertising = (ADVERTISED_100baseT_Half |
+                                    ADVERTISED_100baseT_Full |
+                                    ADVERTISED_1000baseT_Half |
+                                    ADVERTISED_1000baseT_Full);
+       }
+
+       switch (ahw->supported_type) {
+       case PORT_FIBRE:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
+               ecmd->port = PORT_FIBRE;
+               ecmd->transceiver = XCVR_EXTERNAL;
+               break;
+       case PORT_TP:
+               ecmd->supported |= SUPPORTED_TP;
+               ecmd->advertising |= ADVERTISED_TP;
+               ecmd->port = PORT_TP;
+               ecmd->transceiver = XCVR_INTERNAL;
+               break;
+       default:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
+               ecmd->port = PORT_OTHER;
+               ecmd->transceiver = XCVR_EXTERNAL;
+               break;
+       }
+       ecmd->phy_address = ahw->physical_port;
        return status;
 }
 
@@ -3046,7 +3116,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
        if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
                return -EIO;
 
-       ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+       ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
+                                        max_sds_rings);
        if (ret)
                goto fail_diag_irq;
 
index 1f1d85e6f2afd1e4cb1581f03088333b034726a3..f5db67fc9f55a0dd319112462b0952af932f3ffe 100644 (file)
@@ -603,7 +603,7 @@ int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
 
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
 void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
-int qlcnic_83xx_get_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
                                struct ethtool_pauseparam *);
@@ -620,7 +620,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
 int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
 int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
 u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *);
+u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
 void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
 void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
 #endif
index ab1d8d99cbd530d59727a4e131789f3eb210a9bb..5e7fb1dfb97b54c63c46314eea8eeccd9dbbc643 100644 (file)
@@ -382,8 +382,6 @@ static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
        dev_err(&adapter->pdev->dev, "%s:\n", __func__);
 
-       adapter->netdev->trans_start = jiffies;
-
        return 0;
 }
 
@@ -435,10 +433,6 @@ static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter)
        }
 done:
        netif_device_attach(netdev);
-       if (netif_running(netdev)) {
-               netif_carrier_on(netdev);
-               netif_wake_queue(netdev);
-       }
 }
 
 static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
@@ -642,15 +636,21 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
 
 static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
        qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
-       clear_bit(__QLCNIC_RESETTING, &adapter->state);
        set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
        qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
        set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
-       adapter->ahw->idc.quiesce_req = 0;
-       adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
-       adapter->ahw->idc.err_code = 0;
-       adapter->ahw->idc.collect_dump = 0;
+
+       ahw->idc.quiesce_req = 0;
+       ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+       ahw->idc.err_code = 0;
+       ahw->idc.collect_dump = 0;
+       ahw->reset_context = 0;
+       adapter->tx_timeo_cnt = 0;
+
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
 }
 
 /**
@@ -851,6 +851,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
        /* Check for soft reset request */
        if (ahw->reset_context &&
            !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+               adapter->ahw->reset_context = 0;
                qlcnic_83xx_idc_tx_soft_reset(adapter);
                return ret;
        }
@@ -914,6 +915,7 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
 static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
 {
        dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
+       clear_bit(__QLCNIC_RESETTING, &adapter->state);
        adapter->ahw->idc.err_code = -EIO;
 
        return 0;
index 08efb463500723a268923617157b4864a4345878..f67652de5a63ca4736794d01c794ce44f1a6e382 100644 (file)
@@ -131,12 +131,13 @@ static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
        "ctx_lro_pkt_cnt",
        "ctx_ip_csum_error",
        "ctx_rx_pkts_wo_ctx",
-       "ctx_rx_pkts_dropped_wo_sts",
+       "ctx_rx_pkts_drop_wo_sds_on_card",
+       "ctx_rx_pkts_drop_wo_sds_on_host",
        "ctx_rx_osized_pkts",
        "ctx_rx_pkts_dropped_wo_rds",
        "ctx_rx_unexpected_mcast_pkts",
        "ctx_invalid_mac_address",
-       "ctx_rx_rds_ring_prim_attemoted",
+       "ctx_rx_rds_ring_prim_attempted",
        "ctx_rx_rds_ring_prim_success",
        "ctx_num_lro_flows_added",
        "ctx_num_lro_flows_removed",
@@ -251,6 +252,18 @@ static int
 qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 {
        struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+       if (qlcnic_82xx_check(adapter))
+               return qlcnic_82xx_get_settings(adapter, ecmd);
+       else if (qlcnic_83xx_check(adapter))
+               return qlcnic_83xx_get_settings(adapter, ecmd);
+
+       return -EIO;
+}
+
+int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
+                            struct ethtool_cmd *ecmd)
+{
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        u32 speed, reg;
        int check_sfp_module = 0;
@@ -276,10 +289,7 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 
        } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
                u32 val = 0;
-               if (qlcnic_83xx_check(adapter))
-                       qlcnic_83xx_get_settings(adapter);
-               else
-                       val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
+               val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
 
                if (val == QLCNIC_PORT_MODE_802_3_AP) {
                        ecmd->supported = SUPPORTED_1000baseT_Full;
@@ -289,16 +299,13 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                        ecmd->advertising = ADVERTISED_10000baseT_Full;
                }
 
-               if (netif_running(dev) && adapter->ahw->has_link_events) {
-                       if (qlcnic_82xx_check(adapter)) {
-                               reg = QLCRD32(adapter,
-                                             P3P_LINK_SPEED_REG(pcifn));
-                               speed = P3P_LINK_SPEED_VAL(pcifn, reg);
-                               ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
-                       }
-                       ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
-                       ecmd->autoneg = adapter->ahw->link_autoneg;
-                       ecmd->duplex = adapter->ahw->link_duplex;
+               if (netif_running(adapter->netdev) && ahw->has_link_events) {
+                       reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
+                       speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+                       ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+                       ethtool_cmd_speed_set(ecmd, ahw->link_speed);
+                       ecmd->autoneg = ahw->link_autoneg;
+                       ecmd->duplex = ahw->link_duplex;
                        goto skip;
                }
 
@@ -340,8 +347,8 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
        case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
                ecmd->advertising |= ADVERTISED_TP;
                ecmd->supported |= SUPPORTED_TP;
-               check_sfp_module = netif_running(dev) &&
-                                  adapter->ahw->has_link_events;
+               check_sfp_module = netif_running(adapter->netdev) &&
+                                  ahw->has_link_events;
        case QLCNIC_BRDTYPE_P3P_10G_XFP:
                ecmd->supported |= SUPPORTED_FIBRE;
                ecmd->advertising |= ADVERTISED_FIBRE;
@@ -355,8 +362,8 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                        ecmd->advertising |=
                                (ADVERTISED_FIBRE | ADVERTISED_TP);
                        ecmd->port = PORT_FIBRE;
-                       check_sfp_module = netif_running(dev) &&
-                                          adapter->ahw->has_link_events;
+                       check_sfp_module = netif_running(adapter->netdev) &&
+                                          ahw->has_link_events;
                } else {
                        ecmd->autoneg = AUTONEG_ENABLE;
                        ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
@@ -365,13 +372,6 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                        ecmd->port = PORT_TP;
                }
                break;
-       case QLCNIC_BRDTYPE_83XX_10G:
-               ecmd->autoneg = AUTONEG_DISABLE;
-               ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
-               ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP);
-               ecmd->port = PORT_FIBRE;
-               check_sfp_module = netif_running(dev) && ahw->has_link_events;
-               break;
        default:
                dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
                        adapter->ahw->board_type);
index 6a6512ba9f38824f0d72a844aaa0d96a95415a8f..106a12f2a02f5173fea587ed8a09f0b2882c0335 100644 (file)
@@ -973,16 +973,57 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
        return rc;
 }
 
+static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter,
+                                             netdev_features_t features)
+{
+       u32 offload_flags = adapter->offload_flags;
+
+       if (offload_flags & BIT_0) {
+               features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+                           NETIF_F_IPV6_CSUM;
+               adapter->rx_csum = 1;
+               if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+                       if (!(offload_flags & BIT_1))
+                               features &= ~NETIF_F_TSO;
+                       else
+                               features |= NETIF_F_TSO;
+
+                       if (!(offload_flags & BIT_2))
+                               features &= ~NETIF_F_TSO6;
+                       else
+                               features |= NETIF_F_TSO6;
+               }
+       } else {
+               features &= ~(NETIF_F_RXCSUM |
+                             NETIF_F_IP_CSUM |
+                             NETIF_F_IPV6_CSUM);
+
+               if (QLCNIC_IS_TSO_CAPABLE(adapter))
+                       features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+               adapter->rx_csum = 0;
+       }
+
+       return features;
+}
 
 netdev_features_t qlcnic_fix_features(struct net_device *netdev,
        netdev_features_t features)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       netdev_features_t changed;
 
-       if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
-           qlcnic_82xx_check(adapter)) {
-               netdev_features_t changed = features ^ netdev->features;
-               features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+       if (qlcnic_82xx_check(adapter) &&
+           (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+               if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) {
+                       features = qlcnic_process_flags(adapter, features);
+               } else {
+                       changed = features ^ netdev->features;
+                       features ^= changed & (NETIF_F_RXCSUM |
+                                              NETIF_F_IP_CSUM |
+                                              NETIF_F_IPV6_CSUM |
+                                              NETIF_F_TSO |
+                                              NETIF_F_TSO6);
+               }
        }
 
        if (!(features & NETIF_F_RXCSUM))
index 95b1b57328385dac93c0268fb6bae262ae20a793..b6818f4356b905de891ed09660a7be62b3262e56 100644 (file)
@@ -134,7 +134,7 @@ struct qlcnic_mailbox_metadata {
 
 #define QLCNIC_SET_OWNER        1
 #define QLCNIC_CLR_OWNER        0
-#define QLCNIC_MBX_TIMEOUT      10000
+#define QLCNIC_MBX_TIMEOUT      5000
 
 #define QLCNIC_MBX_RSP_OK      1
 #define QLCNIC_MBX_PORT_RSP_OK 0x1a
index 264d5a4f81538a23653c370a7d85df18617c09be..aeb26a8506799c6a9263e41fa5d82633deafb9d7 100644 (file)
@@ -37,24 +37,24 @@ MODULE_PARM_DESC(qlcnic_mac_learn,
                 "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
 
 int qlcnic_use_msi = 1;
-MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)");
 module_param_named(use_msi, qlcnic_use_msi, int, 0444);
 
 int qlcnic_use_msi_x = 1;
-MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)");
 module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
 
 int qlcnic_auto_fw_reset = 1;
-MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)");
 module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
 
 int qlcnic_load_fw_file;
-MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
+MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)");
 module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
 
 int qlcnic_config_npars;
 module_param(qlcnic_config_npars, int, 0444);
-MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
+MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)");
 
 static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 static void qlcnic_remove(struct pci_dev *pdev);
@@ -84,14 +84,9 @@ static int qlcnic_start_firmware(struct qlcnic_adapter *);
 static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
 static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
-static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
-                               struct qlcnic_esw_func_cfg *);
 static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
 static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
 
-#define QLCNIC_IS_TSO_CAPABLE(adapter) \
-       ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
-
 static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -308,6 +303,23 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
        return 0;
 }
 
+static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_mac_list_s *cur;
+       struct list_head *head;
+
+       list_for_each(head, &adapter->mac_list) {
+               cur = list_entry(head, struct qlcnic_mac_list_s, list);
+               if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
+                       qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+                                                 0, QLCNIC_MAC_DEL);
+                       list_del(&cur->list);
+                       kfree(cur);
+                       return;
+               }
+       }
+}
+
 static int qlcnic_set_mac(struct net_device *netdev, void *p)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -322,11 +334,15 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
        if (!is_valid_ether_addr(addr->sa_data))
                return -EINVAL;
 
+       if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
+               return 0;
+
        if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
                netif_device_detach(netdev);
                qlcnic_napi_disable(adapter);
        }
 
+       qlcnic_delete_adapter_mac(adapter);
        memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        qlcnic_set_multi(adapter->netdev);
@@ -1053,8 +1069,6 @@ void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
 
        if (!esw_cfg->promisc_mode)
                adapter->flags |= QLCNIC_PROMISC_DISABLED;
-
-       qlcnic_set_netdev_features(adapter, esw_cfg);
 }
 
 int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
@@ -1069,51 +1083,23 @@ int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
                        return -EIO;
        qlcnic_set_vlan_config(adapter, &esw_cfg);
        qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
+       qlcnic_set_netdev_features(adapter, &esw_cfg);
 
        return 0;
 }
 
-static void
-qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
-               struct qlcnic_esw_func_cfg *esw_cfg)
+void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
+                               struct qlcnic_esw_func_cfg *esw_cfg)
 {
        struct net_device *netdev = adapter->netdev;
-       unsigned long features, vlan_features;
 
        if (qlcnic_83xx_check(adapter))
                return;
 
-       features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-                   NETIF_F_IPV6_CSUM | NETIF_F_GRO);
-       vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
-                       NETIF_F_IPV6_CSUM);
-
-       if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
-               features |= (NETIF_F_TSO | NETIF_F_TSO6);
-               vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
-       }
-
-       if (netdev->features & NETIF_F_LRO)
-               features |= NETIF_F_LRO;
-
-       if (esw_cfg->offload_flags & BIT_0) {
-               netdev->features |= features;
-               adapter->rx_csum = 1;
-               if (!(esw_cfg->offload_flags & BIT_1)) {
-                       netdev->features &= ~NETIF_F_TSO;
-                       features &= ~NETIF_F_TSO;
-               }
-               if (!(esw_cfg->offload_flags & BIT_2)) {
-                       netdev->features &= ~NETIF_F_TSO6;
-                       features &= ~NETIF_F_TSO6;
-               }
-       } else {
-               netdev->features &= ~features;
-               features &= ~features;
-               adapter->rx_csum = 0;
-       }
-
-       netdev->vlan_features = (features & vlan_features);
+       adapter->offload_flags = esw_cfg->offload_flags;
+       adapter->flags |= QLCNIC_APP_CHANGED_FLAGS;
+       netdev_update_features(netdev);
+       adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS;
 }
 
 static int
@@ -1995,8 +1981,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_enable_pcie_error_reporting(pdev);
 
        ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
-       if (!ahw)
+       if (!ahw) {
+               err = -ENOMEM;
                goto err_out_free_res;
+       }
 
        switch (ent->device) {
        case PCI_DEVICE_ID_QLOGIC_QLE824X:
@@ -2032,6 +2020,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic");
        if (adapter->qlcnic_wq == NULL) {
+               err = -ENOMEM;
                dev_err(&pdev->dev, "Failed to create workqueue\n");
                goto err_out_free_netdev;
        }
@@ -2112,6 +2101,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        goto err_out_disable_msi;
        }
 
+       err = qlcnic_get_act_pci_func(adapter);
+       if (err)
+               goto err_out_disable_mbx_intr;
+
        err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
        if (err)
                goto err_out_disable_mbx_intr;
@@ -2141,9 +2134,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                break;
        }
 
-       if (qlcnic_get_act_pci_func(adapter))
-               goto err_out_disable_mbx_intr;
-
        if (adapter->drv_mac_learn)
                qlcnic_alloc_lb_filters_mem(adapter);
 
@@ -2481,12 +2471,17 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
        if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return;
 
-       dev_err(&netdev->dev, "transmit timeout, resetting.\n");
-
-       if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
-               adapter->need_fw_reset = 1;
-       else
+       if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) {
+               netdev_info(netdev, "Tx timeout, reset the adapter.\n");
+               if (qlcnic_82xx_check(adapter))
+                       adapter->need_fw_reset = 1;
+               else if (qlcnic_83xx_check(adapter))
+                       qlcnic_83xx_idc_request_reset(adapter,
+                                                     QLCNIC_FORCE_FW_DUMP_KEY);
+       } else {
+               netdev_info(netdev, "Tx timeout, reset adapter context.\n");
                adapter->ahw->reset_context = 1;
+       }
 }
 
 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -3123,10 +3118,8 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
                if (adapter->need_fw_reset)
                        goto detach;
 
-               if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) {
+               if (adapter->ahw->reset_context && qlcnic_auto_fw_reset)
                        qlcnic_reset_hw_context(adapter);
-                       adapter->netdev->trans_start = jiffies;
-               }
 
                return 0;
        }
index 44d547d78b84927db40201cfa03d6aea49bf37cd..196b2d100407bf1de67d284c85f88e9f48d310c1 100644 (file)
@@ -280,9 +280,9 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
                                    u32 *pay, u8 pci_func, u8 size)
 {
+       u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        unsigned long flags;
-       u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val;
        u16 opcode;
        u8 mbx_err_code;
        int i, j;
@@ -330,15 +330,13 @@ static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
         * assume something is wrong.
         */
 poll:
-       rsp = qlcnic_83xx_mbx_poll(adapter);
+       rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
        if (rsp != QLCNIC_RCODE_TIMEOUT) {
                /* Get the FW response data */
                fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
                if (fw_data &  QLCNIC_MBX_ASYNC_EVENT) {
                        __qlcnic_83xx_process_aen(adapter);
-                       mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
-                       if (mbx_val)
-                               goto poll;
+                       goto poll;
                }
                mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
                rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
@@ -1736,7 +1734,6 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
 
        if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
                qlcnic_sriov_vf_attach(adapter);
-               adapter->netdev->trans_start = jiffies;
                adapter->tx_timeo_cnt = 0;
                adapter->reset_ctx_cnt = 0;
                adapter->fw_fail_cnt = 0;
index c81be2da119bdc4b9aef1d3f770f3d76d4074b56..1a66ccded235e202616be03ae9bdb08c6bad9155 100644 (file)
@@ -1133,9 +1133,6 @@ static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
        if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
                return -EINVAL;
 
-       if (!(cmd->req.arg[1] & BIT_8))
-               return -EINVAL;
-
        return 0;
 }
 
index 4e22e794a1863ea1abec985d91abe154afcf8b49..e7a2fe21b64911939b5279e3f459f7108054fed6 100644 (file)
@@ -544,6 +544,9 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
                switch (esw_cfg[i].op_mode) {
                case QLCNIC_PORT_DEFAULTS:
                        qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+                       rtnl_lock();
+                       qlcnic_set_netdev_features(adapter, &esw_cfg[i]);
+                       rtnl_unlock();
                        break;
                case QLCNIC_ADD_VLAN:
                        qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
index 87463bc701a653781371feb05599aad2b45d224a..f87cc216045b5feb5e20dbc2f4df3fe2190eba14 100644 (file)
@@ -1106,6 +1106,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
                if (pci_dma_mapping_error(qdev->pdev, map)) {
                        __free_pages(rx_ring->pg_chunk.page,
                                        qdev->lbq_buf_order);
+                       rx_ring->pg_chunk.page = NULL;
                        netif_err(qdev, drv, qdev->ndev,
                                  "PCI mapping failed.\n");
                        return -ENOMEM;
@@ -2777,6 +2778,12 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
                        curr_idx = 0;
 
        }
+       if (rx_ring->pg_chunk.page) {
+               pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
+                       ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+               put_page(rx_ring->pg_chunk.page);
+               rx_ring->pg_chunk.page = NULL;
+       }
 }
 
 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
@@ -4710,6 +4717,7 @@ static int qlge_probe(struct pci_dev *pdev,
                dev_err(&pdev->dev, "net device registration failed.\n");
                ql_release_all(pdev);
                pci_disable_device(pdev);
+               free_netdev(ndev);
                return err;
        }
        /* Start up the timer to trigger EEH if
index 7d1fb9ad1296e5825da2d845129ef38b51728cd7..03523459c4061627d1fad51f751082298eec2147 100644 (file)
@@ -1136,6 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp)
                        cp->dev->stats.tx_dropped++;
                }
        }
+       netdev_reset_queue(cp->dev);
 
        memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
index 79c520b64fddd00ab36436215abbfa5847aa8336..393f961a013cec14daf9c624021fdd60be23939d 100644 (file)
@@ -5856,7 +5856,20 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
        return -EIO;
 }
 
-static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
+static bool rtl_skb_pad(struct sk_buff *skb)
+{
+       if (skb_padto(skb, ETH_ZLEN))
+               return false;
+       skb_put(skb, ETH_ZLEN - skb->len);
+       return true;
+}
+
+static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
+{
+       return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
+}
+
+static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
                                    struct sk_buff *skb, u32 *opts)
 {
        const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
@@ -5869,13 +5882,20 @@ static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
                const struct iphdr *ip = ip_hdr(skb);
 
+               if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
+                       return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
+
                if (ip->protocol == IPPROTO_TCP)
                        opts[offset] |= info->checksum.tcp;
                else if (ip->protocol == IPPROTO_UDP)
                        opts[offset] |= info->checksum.udp;
                else
                        WARN_ON_ONCE(1);
+       } else {
+               if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
+                       return rtl_skb_pad(skb);
        }
+       return true;
 }
 
 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -5896,17 +5916,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
                goto err_stop_0;
        }
 
-       /* 8168evl does not automatically pad to minimum length. */
-       if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
-                    skb->len < ETH_ZLEN)) {
-               if (skb_padto(skb, ETH_ZLEN))
-                       goto err_update_stats;
-               skb_put(skb, ETH_ZLEN - skb->len);
-       }
-
        if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
                goto err_stop_0;
 
+       opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
+       opts[0] = DescOwn;
+
+       if (!rtl8169_tso_csum(tp, skb, opts))
+               goto err_update_stats;
+
        len = skb_headlen(skb);
        mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(d, mapping))) {
@@ -5918,11 +5936,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
        tp->tx_skb[entry].len = len;
        txd->addr = cpu_to_le64(mapping);
 
-       opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
-       opts[0] = DescOwn;
-
-       rtl8169_tso_csum(tp, skb, opts);
-
        frags = rtl8169_xmit_frags(tp, skb, opts);
        if (frags < 0)
                goto err_dma_1;
index 33dc6f2418f2968e23a383e2c0395ed41d832132..b4479b5aaee43ba49bb254f9fdeaf7cea7638b1c 100644 (file)
@@ -897,8 +897,8 @@ static int sh_eth_check_reset(struct net_device *ndev)
                mdelay(1);
                cnt--;
        }
-       if (cnt < 0) {
-               pr_err("Device reset fail\n");
+       if (cnt <= 0) {
+               pr_err("Device reset failed\n");
                ret = -ETIMEDOUT;
        }
        return ret;
@@ -2745,11 +2745,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        if (mdp->cd->tsu) {
                struct resource *rtsu;
                rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               if (!rtsu) {
-                       dev_err(&pdev->dev, "Not found TSU resource\n");
-                       ret = -ENODEV;
-                       goto out_release;
-               }
                mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
                if (IS_ERR(mdp->tsu_addr)) {
                        ret = PTR_ERR(mdp->tsu_addr);
index 01b99206139a0acefeb9452ad62b55346c6278f6..39e4cb39de29493d30079f3ddcfc81e46cfbf5b4 100644 (file)
@@ -638,14 +638,16 @@ static void efx_start_datapath(struct efx_nic *efx)
                           EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
                           efx->type->rx_buffer_padding);
        rx_buf_len = (sizeof(struct efx_rx_page_state) +
-                     EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
+                     NET_IP_ALIGN + efx->rx_dma_len);
        if (rx_buf_len <= PAGE_SIZE) {
                efx->rx_scatter = false;
                efx->rx_buffer_order = 0;
        } else if (efx->type->can_rx_scatter) {
+               BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
                BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
-                            EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE >
-                            PAGE_SIZE / 2);
+                            2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
+                                      EFX_RX_BUF_ALIGNMENT) >
+                            PAGE_SIZE);
                efx->rx_scatter = true;
                efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
                efx->rx_buffer_order = 0;
index 9bd433a095c57b8a127b24a7ec5e55c60a50b24b..39d6bd77f0157523fcafc9b4ac8d0affded56d76 100644 (file)
 /* Maximum possible MTU the driver supports */
 #define EFX_MAX_MTU (9 * 1024)
 
-/* Size of an RX scatter buffer.  Small enough to pack 2 into a 4K page. */
-#define EFX_RX_USR_BUF_SIZE 1824
+/* Size of an RX scatter buffer.  Small enough to pack 2 into a 4K page,
+ * and should be a multiple of the cache line size.
+ */
+#define EFX_RX_USR_BUF_SIZE    (2048 - 256)
+
+/* If possible, we should ensure cache line alignment at start and end
+ * of every buffer.  Otherwise, we just need to ensure 4-byte
+ * alignment of the network header.
+ */
+#if NET_IP_ALIGN == 0
+#define EFX_RX_BUF_ALIGNMENT   L1_CACHE_BYTES
+#else
+#define EFX_RX_BUF_ALIGNMENT   4
+#endif
 
 /* Forward declare Precision Time Protocol (PTP) support structure. */
 struct efx_ptp_data;
@@ -467,25 +479,12 @@ enum nic_state {
        STATE_RECOVERY = 3,     /* device recovering from PCI error */
 };
 
-/*
- * Alignment of page-allocated RX buffers
- *
- * Controls the number of bytes inserted at the start of an RX buffer.
- * This is the equivalent of NET_IP_ALIGN [which controls the alignment
- * of the skb->head for hardware DMA].
- */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#define EFX_PAGE_IP_ALIGN 0
-#else
-#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
-#endif
-
 /*
  * Alignment of the skb->head which wraps a page-allocated RX buffer
  *
  * The skb allocated to wrap an rx_buffer can have this alignment. Since
  * the data is memcpy'd from the rx_buf, it does not need to be equal to
- * EFX_PAGE_IP_ALIGN.
+ * NET_IP_ALIGN.
  */
 #define EFX_PAGE_SKB_ALIGN 2
 
index e73e30bac10e268a6f031d717a3551cda1ddeeae..a7dfe36cabf4b7855291237c0ed5ec5caec28519 100644 (file)
@@ -93,8 +93,8 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
 
 void efx_rx_config_page_split(struct efx_nic *efx)
 {
-       efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN,
-                                     L1_CACHE_BYTES);
+       efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
+                                     EFX_RX_BUF_ALIGNMENT);
        efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
                ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
                 efx->rx_page_buf_step);
@@ -188,9 +188,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
                do {
                        index = rx_queue->added_count & rx_queue->ptr_mask;
                        rx_buf = efx_rx_buffer(rx_queue, index);
-                       rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+                       rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
                        rx_buf->page = page;
-                       rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
+                       rx_buf->page_offset = page_offset + NET_IP_ALIGN;
                        rx_buf->len = efx->rx_dma_len;
                        rx_buf->flags = 0;
                        ++rx_queue->added_count;
index f695a50bac47d99cdc141a3cf059dff340e54dc3..43c1f32233220e17a27936572fa27de9d31219ee 100644 (file)
@@ -1,6 +1,6 @@
 config STMMAC_ETH
        tristate "STMicroelectronics 10/100/1000 Ethernet driver"
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && HAS_DMA
        select NET_CORE
        select MII
        select PHYLIB
index 12aec173564ca6fdc6b6e15ab8a8dcc0c293c9a4..b2275d1b19b3af0d918c1de2e1756f8ee99bff7a 100644 (file)
@@ -449,10 +449,9 @@ static int davinci_mdio_suspend(struct device *dev)
        __raw_writel(ctrl, &data->regs->control);
        wait_for_idle(data);
 
-       pm_runtime_put_sync(data->dev);
-
        data->suspended = true;
        spin_unlock(&data->lock);
+       pm_runtime_put_sync(data->dev);
 
        return 0;
 }
@@ -462,9 +461,9 @@ static int davinci_mdio_resume(struct device *dev)
        struct davinci_mdio_data *data = dev_get_drvdata(dev);
        u32 ctrl;
 
-       spin_lock(&data->lock);
        pm_runtime_get_sync(data->dev);
 
+       spin_lock(&data->lock);
        /* restart the scan state machine */
        ctrl = __raw_readl(&data->regs->control);
        ctrl |= CONTROL_ENABLE;
index 919b983114e907242bc49ce64ba95eb00c3e2008..b7268b3dae777a1fc29b5aed36f19ec96df462aa 100644 (file)
@@ -946,7 +946,8 @@ static int xemaclite_open(struct net_device *dev)
                phy_write(lp->phy_dev, MII_CTRL1000, 0);
 
                /* Advertise only 10 and 100mbps full/half duplex speeds */
-               phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL);
+               phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL |
+                         ADVERTISE_CSMA);
 
                /* Restart auto negotiation */
                bmcr = phy_read(lp->phy_dev, MII_BMCR);
index 088c554961918beeec45dd9c6d22d2508409e92c..ab2307b5d9a7c881cff59aabdc35b20823d14cd9 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/inetdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
+#include <linux/if_vlan.h>
 #include <linux/in.h>
 #include <linux/slab.h>
 #include <net/arp.h>
@@ -284,7 +285,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
 
        skb->protocol = eth_type_trans(skb, net);
        skb->ip_summed = CHECKSUM_NONE;
-       skb->vlan_tci = packet->vlan_tci;
+       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci);
 
        net->stats.rx_packets++;
        net->stats.rx_bytes += packet->total_data_buflen;
index d5a141c7c4e786b06746c73668407c110e74611e..1c502bb0c916ff549ab10400af5d24b7f61c4866 100644 (file)
@@ -229,7 +229,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
        }
 
        if (port->passthru)
-               vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
+               vlan = list_first_or_null_rcu(&port->vlans,
+                                             struct macvlan_dev, list);
        else
                vlan = macvlan_hash_lookup(port, eth->h_dest);
        if (vlan == NULL)
@@ -814,7 +815,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        if (err < 0)
                goto upper_dev_unlink;
 
-       list_add_tail(&vlan->list, &port->vlans);
+       list_add_tail_rcu(&vlan->list, &port->vlans);
        netif_stacked_transfer_operstate(lowerdev, dev);
 
        return 0;
@@ -842,7 +843,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
 
-       list_del(&vlan->list);
+       list_del_rcu(&vlan->list);
        unregister_netdevice_queue(dev, head);
        netdev_upper_dev_unlink(vlan->lowerdev, dev);
 }
index ed947dd76fbd1ded245f3e649dcf920cbe86320f..f3cdf64997d60dd6937ba8885dab3c72525b1f2b 100644 (file)
@@ -375,6 +375,8 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
        if (dev == NULL)
                return;
 
+       list_del(&dev->list);
+
        ndev = dev->ndev;
 
        unregister_netdev(ndev);
index c14f14741b3f7a8c4a6634d2f8b4b0b9c254d5df..38f0b312ff85e339bc742c8472ef244bb9c50a40 100644 (file)
@@ -1044,7 +1044,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
                lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
                idx = phy_find_setting(phydev->speed, phydev->duplex);
-               if ((lp & adv & settings[idx].setting))
+               if (!(lp & adv & settings[idx].setting))
                        goto eee_exit;
 
                if (clk_stop_enable) {
index 7c43261975bd1d57e93a4b35382e42c92891cfb1..b3051052f3ad5ee1bcd4c53a5c3bdf5370c057f2 100644 (file)
@@ -1092,8 +1092,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
        }
 
        port->index = -1;
-       team_port_enable(team, port);
        list_add_tail_rcu(&port->list, &team->port_list);
+       team_port_enable(team, port);
        __team_compute_features(team);
        __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
        __team_options_change_check(team);
@@ -2374,7 +2374,8 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
        bool incomplete;
        int i;
 
-       port = list_first_entry(&team->port_list, struct team_port, list);
+       port = list_first_entry_or_null(&team->port_list,
+                                       struct team_port, list);
 
 start_again:
        err = __send_and_alloc_skb(&skb, team, portid, send_func);
@@ -2402,8 +2403,8 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
                err = team_nl_fill_one_port_get(skb, one_port);
                if (err)
                        goto errout;
-       } else {
-               list_for_each_entry(port, &team->port_list, list) {
+       } else if (port) {
+               list_for_each_entry_from(port, &team->port_list, list) {
                        err = team_nl_fill_one_port_get(skb, port);
                        if (err) {
                                if (err == -EMSGSIZE) {
index 5ca14d463ba7d897931b41b8c8f515a67ff9829c..7f032e2113437f1ca6b2952ba4ccc28e56dd20ba 100644 (file)
@@ -28,6 +28,8 @@ static bool rnd_transmit(struct team *team, struct sk_buff *skb)
 
        port_index = random_N(team->en_port_count);
        port = team_get_port_by_index_rcu(team, port_index);
+       if (unlikely(!port))
+               goto drop;
        port = team_get_first_port_txable_rcu(team, port);
        if (unlikely(!port))
                goto drop;
index d268e4de781b46b3cf2e3364770098f43f48d760..472623f8ce3d1c3cb20efe4403e50341b6cbc86e 100644 (file)
@@ -32,6 +32,8 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
 
        port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
        port = team_get_port_by_index_rcu(team, port_index);
+       if (unlikely(!port))
+               goto drop;
        port = team_get_first_port_txable_rcu(team, port);
        if (unlikely(!port))
                goto drop;
index f042b0373e5ddec6a8a85703843b89a43cbeb1f7..bfa9bb48e42d5cfc06e299e64809e29ea3097b5b 100644 (file)
@@ -352,7 +352,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
        u32 numqueues = 0;
 
        rcu_read_lock();
-       numqueues = tun->numqueues;
+       numqueues = ACCESS_ONCE(tun->numqueues);
 
        txq = skb_get_rxhash(skb);
        if (txq) {
@@ -1585,6 +1585,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                else
                        return -EINVAL;
 
+               if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
+                   !!(tun->flags & TUN_TAP_MQ))
+                       return -EINVAL;
+
                if (tun_not_capable(tun))
                        return -EPERM;
                err = security_tun_dev_open(tun->security);
@@ -2155,6 +2159,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
        set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
        INIT_LIST_HEAD(&tfile->next);
 
+       sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
+
        return 0;
 }
 
index 078795fe6e312f22348d381e03b07c4274e07f87..04ee044dde511badbe8117f45faed74ee92b93be 100644 (file)
@@ -627,6 +627,12 @@ static const struct usb_device_id  products [] = {
        .driver_info = 0,
 },
 
+/* Huawei E1820 - handled by qmi_wwan */
+{
+       USB_DEVICE_INTERFACE_NUMBER(HUAWEI_VENDOR_ID, 0x14ac, 1),
+       .driver_info = 0,
+},
+
 /* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
 #if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
 {
index cf887c2384e95004547bf56ef7dc8ea0a67e66e5..d095d0d3056b82e05df3daddae6fe0d10bb10564 100644 (file)
@@ -519,6 +519,7 @@ static const struct usb_device_id products[] = {
        /* 3. Combined interface devices matching on interface number */
        {QMI_FIXED_INTF(0x0408, 0xea42, 4)},    /* Yota / Megafon M100-1 */
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
+       {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
        {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
@@ -582,6 +583,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
+       {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)},    /* Cinterion PLxx */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
index a491d3a95393e4c533918c572ab7ba9256fa48ec..6cbdac67f3a0d3f899b0a3ca783d427b2e1c9704 100644 (file)
@@ -130,19 +130,23 @@ struct rtl8150 {
        struct usb_device *udev;
        struct tasklet_struct tl;
        struct net_device *netdev;
-       struct urb *rx_urb, *tx_urb, *intr_urb, *ctrl_urb;
+       struct urb *rx_urb, *tx_urb, *intr_urb;
        struct sk_buff *tx_skb, *rx_skb;
        struct sk_buff *rx_skb_pool[RX_SKB_POOL_SIZE];
        spinlock_t rx_pool_lock;
        struct usb_ctrlrequest dr;
        int intr_interval;
-       __le16 rx_creg;
        u8 *intr_buff;
        u8 phy;
 };
 
 typedef struct rtl8150 rtl8150_t;
 
+struct async_req {
+       struct usb_ctrlrequest dr;
+       u16 rx_creg;
+};
+
 static const char driver_name [] = "rtl8150";
 
 /*
@@ -164,51 +168,47 @@ static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
                               indx, 0, data, size, 500);
 }
 
-static void ctrl_callback(struct urb *urb)
+static void async_set_reg_cb(struct urb *urb)
 {
-       rtl8150_t *dev;
+       struct async_req *req = (struct async_req *)urb->context;
        int status = urb->status;
 
-       switch (status) {
-       case 0:
-               break;
-       case -EINPROGRESS:
-               break;
-       case -ENOENT:
-               break;
-       default:
-               if (printk_ratelimit())
-                       dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status);
-       }
-       dev = urb->context;
-       clear_bit(RX_REG_SET, &dev->flags);
+       if (status < 0)
+               dev_dbg(&urb->dev->dev, "%s failed with %d", __func__, status);
+       kfree(req);
+       usb_free_urb(urb);
 }
 
-static int async_set_registers(rtl8150_t * dev, u16 indx, u16 size)
+static int async_set_registers(rtl8150_t *dev, u16 indx, u16 size, u16 reg)
 {
-       int ret;
-
-       if (test_bit(RX_REG_SET, &dev->flags))
-               return -EAGAIN;
+       int res = -ENOMEM;
+       struct urb *async_urb;
+       struct async_req *req;
 
-       dev->dr.bRequestType = RTL8150_REQT_WRITE;
-       dev->dr.bRequest = RTL8150_REQ_SET_REGS;
-       dev->dr.wValue = cpu_to_le16(indx);
-       dev->dr.wIndex = 0;
-       dev->dr.wLength = cpu_to_le16(size);
-       dev->ctrl_urb->transfer_buffer_length = size;
-       usb_fill_control_urb(dev->ctrl_urb, dev->udev,
-                        usb_sndctrlpipe(dev->udev, 0), (char *) &dev->dr,
-                        &dev->rx_creg, size, ctrl_callback, dev);
-       if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) {
-               if (ret == -ENODEV)
+       req = kmalloc(sizeof(struct async_req), GFP_ATOMIC);
+       if (req == NULL)
+               return res;
+       async_urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (async_urb == NULL) {
+               kfree(req);
+               return res;
+       }
+       req->rx_creg = cpu_to_le16(reg);
+       req->dr.bRequestType = RTL8150_REQT_WRITE;
+       req->dr.bRequest = RTL8150_REQ_SET_REGS;
+       req->dr.wIndex = 0;
+       req->dr.wValue = cpu_to_le16(indx);
+       req->dr.wLength = cpu_to_le16(size);
+       usb_fill_control_urb(async_urb, dev->udev,
+                            usb_sndctrlpipe(dev->udev, 0), (void *)&req->dr,
+                            &req->rx_creg, size, async_set_reg_cb, req);
+       res = usb_submit_urb(async_urb, GFP_ATOMIC);
+       if (res) {
+               if (res == -ENODEV)
                        netif_device_detach(dev->netdev);
-               dev_err(&dev->udev->dev,
-                       "control request submission failed: %d\n", ret);
-       } else
-               set_bit(RX_REG_SET, &dev->flags);
-
-       return ret;
+               dev_err(&dev->udev->dev, "%s failed with %d\n", __func__, res);
+       }
+       return res;
 }
 
 static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg)
@@ -330,13 +330,6 @@ static int alloc_all_urbs(rtl8150_t * dev)
                usb_free_urb(dev->tx_urb);
                return 0;
        }
-       dev->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
-       if (!dev->ctrl_urb) {
-               usb_free_urb(dev->rx_urb);
-               usb_free_urb(dev->tx_urb);
-               usb_free_urb(dev->intr_urb);
-               return 0;
-       }
 
        return 1;
 }
@@ -346,7 +339,6 @@ static void free_all_urbs(rtl8150_t * dev)
        usb_free_urb(dev->rx_urb);
        usb_free_urb(dev->tx_urb);
        usb_free_urb(dev->intr_urb);
-       usb_free_urb(dev->ctrl_urb);
 }
 
 static void unlink_all_urbs(rtl8150_t * dev)
@@ -354,7 +346,6 @@ static void unlink_all_urbs(rtl8150_t * dev)
        usb_kill_urb(dev->rx_urb);
        usb_kill_urb(dev->tx_urb);
        usb_kill_urb(dev->intr_urb);
-       usb_kill_urb(dev->ctrl_urb);
 }
 
 static inline struct sk_buff *pull_skb(rtl8150_t *dev)
@@ -629,7 +620,6 @@ static int enable_net_traffic(rtl8150_t * dev)
        }
        /* RCR bit7=1 attach Rx info at the end;  =0 HW CRC (which is broken) */
        rcr = 0x9e;
-       dev->rx_creg = cpu_to_le16(rcr);
        tcr = 0xd8;
        cr = 0x0c;
        if (!(rcr & 0x80))
@@ -662,20 +652,22 @@ static void rtl8150_tx_timeout(struct net_device *netdev)
 static void rtl8150_set_multicast(struct net_device *netdev)
 {
        rtl8150_t *dev = netdev_priv(netdev);
+       u16 rx_creg = 0x9e;
+
        netif_stop_queue(netdev);
        if (netdev->flags & IFF_PROMISC) {
-               dev->rx_creg |= cpu_to_le16(0x0001);
+               rx_creg |= 0x0001;
                dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
        } else if (!netdev_mc_empty(netdev) ||
                   (netdev->flags & IFF_ALLMULTI)) {
-               dev->rx_creg &= cpu_to_le16(0xfffe);
-               dev->rx_creg |= cpu_to_le16(0x0002);
+               rx_creg &= 0xfffe;
+               rx_creg |= 0x0002;
                dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
        } else {
                /* ~RX_MULTICAST, ~RX_PROMISCUOUS */
-               dev->rx_creg &= cpu_to_le16(0x00fc);
+               rx_creg &= 0x00fc;
        }
-       async_set_registers(dev, RCR, 2);
+       async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg);
        netif_wake_queue(netdev);
 }
 
index f95cb032394bb03f05b7ae2605b7b9585e8e3c67..06ee82f557d45ba31b4847c187f57771ae2c73d2 100644 (file)
@@ -1477,7 +1477,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 
        /* usbnet already took usb runtime pm, so have to enable the feature
         * for usb interface, otherwise usb_autopm_get_interface may return
-        * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+        * failure if RUNTIME_PM is enabled.
         */
        if (!driver->supports_autosuspend) {
                driver->supports_autosuspend = 1;
index 3c23fdc27bf0798085361a64d6262cdaed34c66b..c9e00387d9996e3c5b0660a57bf296e4ff15f3d5 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/slab.h>
 #include <linux/cpu.h>
 
-static int napi_weight = 128;
+static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
 
 static bool csum = true, gso = true;
@@ -636,10 +636,11 @@ static int virtnet_open(struct net_device *dev)
        struct virtnet_info *vi = netdev_priv(dev);
        int i;
 
-       for (i = 0; i < vi->curr_queue_pairs; i++) {
-               /* Make sure we have some buffers: if oom use wq. */
-               if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
-                       schedule_delayed_work(&vi->refill, 0);
+       for (i = 0; i < vi->max_queue_pairs; i++) {
+               if (i < vi->curr_queue_pairs)
+                       /* Make sure we have some buffers: if oom use wq. */
+                       if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+                               schedule_delayed_work(&vi->refill, 0);
                virtnet_napi_enable(&vi->rq[i]);
        }
 
index ba81f3c39a837af31af507dd1b2c7f4a599fe479..3b1d2ee7156b00195376c674f605f5749a2daf64 100644 (file)
@@ -301,7 +301,7 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
 }
 
 /* Look up Ethernet address in forwarding table */
-static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
+static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
                                        const u8 *mac)
 
 {
@@ -316,6 +316,18 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
        return NULL;
 }
 
+static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
+                                       const u8 *mac)
+{
+       struct vxlan_fdb *f;
+
+       f = __vxlan_find_mac(vxlan, mac);
+       if (f)
+               f->used = jiffies;
+
+       return f;
+}
+
 /* Add/update destinations for multicast */
 static int vxlan_fdb_append(struct vxlan_fdb *f,
                            __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
@@ -353,7 +365,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
        struct vxlan_fdb *f;
        int notify = 0;
 
-       f = vxlan_find_mac(vxlan, mac);
+       f = __vxlan_find_mac(vxlan, mac);
        if (f) {
                if (flags & NLM_F_EXCL) {
                        netdev_dbg(vxlan->dev,
@@ -563,7 +575,6 @@ static void vxlan_snoop(struct net_device *dev,
 
        f = vxlan_find_mac(vxlan, src_mac);
        if (likely(f)) {
-               f->used = jiffies;
                if (likely(f->remote.remote_ip == src_ip))
                        return;
 
index f802e7c923561d2bd331402b192c99aac6da9ff1..2dacd19e1b8a15c5a7c8d66db2eb7cb2793a58ed 100644 (file)
@@ -345,7 +345,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
  */
 void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
 {
-       if (mw > NTB_NUM_MW)
+       if (mw >= NTB_NUM_MW)
                return NULL;
 
        return ndev->mw[mw].vbase;
@@ -362,7 +362,7 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
  */
 resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
 {
-       if (mw > NTB_NUM_MW)
+       if (mw >= NTB_NUM_MW)
                return 0;
 
        return ndev->mw[mw].bar_sz;
@@ -380,7 +380,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
  */
 void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
 {
-       if (mw > NTB_NUM_MW)
+       if (mw >= NTB_NUM_MW)
                return;
 
        dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
@@ -1027,8 +1027,8 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                ndev->mw[i].vbase =
                    ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
                               ndev->mw[i].bar_sz);
-               dev_info(&pdev->dev, "MW %d size %d\n", i,
-                        (u32) pci_resource_len(pdev, MW_TO_BAR(i)));
+               dev_info(&pdev->dev, "MW %d size %llu\n", i,
+                        pci_resource_len(pdev, MW_TO_BAR(i)));
                if (!ndev->mw[i].vbase) {
                        dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
                                 MW_TO_BAR(i));
index e0bdfd7f9930aab4b42984b468ff87fac2868f44..f8d7081ee3014322e90c767fd766456f5410d311 100644 (file)
@@ -58,7 +58,7 @@
 #include <linux/ntb.h>
 #include "ntb_hw.h"
 
-#define NTB_TRANSPORT_VERSION  2
+#define NTB_TRANSPORT_VERSION  3
 
 static unsigned int transport_mtu = 0x401E;
 module_param(transport_mtu, uint, 0644);
@@ -173,10 +173,13 @@ struct ntb_payload_header {
 
 enum {
        VERSION = 0,
-       MW0_SZ,
-       MW1_SZ,
-       NUM_QPS,
        QP_LINKS,
+       NUM_QPS,
+       NUM_MWS,
+       MW0_SZ_HIGH,
+       MW0_SZ_LOW,
+       MW1_SZ_HIGH,
+       MW1_SZ_LOW,
        MAX_SPAD,
 };
 
@@ -297,7 +300,7 @@ int ntb_register_client_dev(char *device_name)
 {
        struct ntb_transport_client_dev *client_dev;
        struct ntb_transport *nt;
-       int rc;
+       int rc, i = 0;
 
        if (list_empty(&ntb_transport_list))
                return -ENODEV;
@@ -315,7 +318,7 @@ int ntb_register_client_dev(char *device_name)
                dev = &client_dev->dev;
 
                /* setup and register client devices */
-               dev_set_name(dev, "%s", device_name);
+               dev_set_name(dev, "%s%d", device_name, i);
                dev->bus = &ntb_bus_type;
                dev->release = ntb_client_release;
                dev->parent = &ntb_query_pdev(nt->ndev)->dev;
@@ -327,6 +330,7 @@ int ntb_register_client_dev(char *device_name)
                }
 
                list_add_tail(&client_dev->entry, &nt->client_devs);
+               i++;
        }
 
        return 0;
@@ -486,12 +490,13 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
                             (qp_num / NTB_NUM_MW * rx_size);
        rx_size -= sizeof(struct ntb_rx_info);
 
-       qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info);
-       qp->rx_max_frame = min(transport_mtu, rx_size);
+       qp->rx_buff = qp->remote_rx_info + 1;
+       /* Due to housekeeping, there must be atleast 2 buffs */
+       qp->rx_max_frame = min(transport_mtu, rx_size / 2);
        qp->rx_max_entry = rx_size / qp->rx_max_frame;
        qp->rx_index = 0;
 
-       qp->remote_rx_info->entry = qp->rx_max_entry;
+       qp->remote_rx_info->entry = qp->rx_max_entry - 1;
 
        /* setup the hdr offsets with 0's */
        for (i = 0; i < qp->rx_max_entry; i++) {
@@ -502,6 +507,19 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
 
        qp->rx_pkts = 0;
        qp->tx_pkts = 0;
+       qp->tx_index = 0;
+}
+
+static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
+{
+       struct ntb_transport_mw *mw = &nt->mw[num_mw];
+       struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+
+       if (!mw->virt_addr)
+               return;
+
+       dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
+       mw->virt_addr = NULL;
 }
 
 static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
@@ -509,12 +527,20 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
        struct ntb_transport_mw *mw = &nt->mw[num_mw];
        struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
 
+       /* No need to re-setup */
+       if (mw->size == ALIGN(size, 4096))
+               return 0;
+
+       if (mw->size != 0)
+               ntb_free_mw(nt, num_mw);
+
        /* Alloc memory for receiving data.  Must be 4k aligned */
        mw->size = ALIGN(size, 4096);
 
        mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
                                           GFP_KERNEL);
        if (!mw->virt_addr) {
+               mw->size = 0;
                dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
                       (int) mw->size);
                return -ENOMEM;
@@ -604,25 +630,31 @@ static void ntb_transport_link_work(struct work_struct *work)
        u32 val;
        int rc, i;
 
-       /* send the local info */
-       rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
-       if (rc) {
-               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       0, VERSION);
-               goto out;
-       }
+       /* send the local info, in the opposite order of the way we read it */
+       for (i = 0; i < NTB_NUM_MW; i++) {
+               rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
+                                          ntb_get_mw_size(ndev, i) >> 32);
+               if (rc) {
+                       dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
+                               (u32)(ntb_get_mw_size(ndev, i) >> 32),
+                               MW0_SZ_HIGH + (i * 2));
+                       goto out;
+               }
 
-       rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
-       if (rc) {
-               dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       (u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
-               goto out;
+               rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
+                                          (u32) ntb_get_mw_size(ndev, i));
+               if (rc) {
+                       dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
+                               (u32) ntb_get_mw_size(ndev, i),
+                               MW0_SZ_LOW + (i * 2));
+                       goto out;
+               }
        }
 
-       rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
+       rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW);
        if (rc) {
                dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       (u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
+                       NTB_NUM_MW, NUM_MWS);
                goto out;
        }
 
@@ -633,16 +665,10 @@ static void ntb_transport_link_work(struct work_struct *work)
                goto out;
        }
 
-       rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
-       if (rc) {
-               dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
-               goto out;
-       }
-
-       rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
+       rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
        if (rc) {
                dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
-                       val, QP_LINKS);
+                       NTB_TRANSPORT_VERSION, VERSION);
                goto out;
        }
 
@@ -667,33 +693,43 @@ static void ntb_transport_link_work(struct work_struct *work)
                goto out;
        dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
 
-       rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
+       rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
        if (rc) {
-               dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
+               dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
                goto out;
        }
 
-       if (!val)
+       if (val != NTB_NUM_MW)
                goto out;
-       dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
+       dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
 
-       rc = ntb_set_mw(nt, 0, val);
-       if (rc)
-               goto out;
+       for (i = 0; i < NTB_NUM_MW; i++) {
+               u64 val64;
 
-       rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
-       if (rc) {
-               dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
-               goto out;
-       }
+               rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
+               if (rc) {
+                       dev_err(&pdev->dev, "Error reading remote spad %d\n",
+                               MW0_SZ_HIGH + (i * 2));
+                       goto out1;
+               }
 
-       if (!val)
-               goto out;
-       dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
+               val64 = (u64) val << 32;
 
-       rc = ntb_set_mw(nt, 1, val);
-       if (rc)
-               goto out;
+               rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
+               if (rc) {
+                       dev_err(&pdev->dev, "Error reading remote spad %d\n",
+                               MW0_SZ_LOW + (i * 2));
+                       goto out1;
+               }
+
+               val64 |= val;
+
+               dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
+
+               rc = ntb_set_mw(nt, i, val64);
+               if (rc)
+                       goto out1;
+       }
 
        nt->transport_link = NTB_LINK_UP;
 
@@ -708,6 +744,9 @@ static void ntb_transport_link_work(struct work_struct *work)
 
        return;
 
+out1:
+       for (i = 0; i < NTB_NUM_MW; i++)
+               ntb_free_mw(nt, i);
 out:
        if (ntb_hw_link_status(ndev))
                schedule_delayed_work(&nt->link_work,
@@ -780,10 +819,10 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
                      (qp_num / NTB_NUM_MW * tx_size);
        tx_size -= sizeof(struct ntb_rx_info);
 
-       qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info);
-       qp->tx_max_frame = min(transport_mtu, tx_size);
+       qp->tx_mw = qp->rx_info + 1;
+       /* Due to housekeeping, there must be atleast 2 buffs */
+       qp->tx_max_frame = min(transport_mtu, tx_size / 2);
        qp->tx_max_entry = tx_size / qp->tx_max_frame;
-       qp->tx_index = 0;
 
        if (nt->debugfs_dir) {
                char debugfs_name[4];
@@ -897,10 +936,7 @@ void ntb_transport_free(void *transport)
        pdev = ntb_query_pdev(nt->ndev);
 
        for (i = 0; i < NTB_NUM_MW; i++)
-               if (nt->mw[i].virt_addr)
-                       dma_free_coherent(&pdev->dev, nt->mw[i].size,
-                                         nt->mw[i].virt_addr,
-                                         nt->mw[i].dma_addr);
+               ntb_free_mw(nt, i);
 
        kfree(nt->qps);
        ntb_unregister_transport(nt->ndev);
@@ -999,11 +1035,16 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
 static void ntb_transport_rx(unsigned long data)
 {
        struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
-       int rc;
+       int rc, i;
 
-       do {
+       /* Limit the number of packets processed in a single interrupt to
+        * provide fairness to others
+        */
+       for (i = 0; i < qp->rx_max_entry; i++) {
                rc = ntb_process_rxc(qp);
-       } while (!rc);
+               if (rc)
+                       break;
+       }
 }
 
 static void ntb_transport_rxc_db(void *data, int db_num)
@@ -1210,12 +1251,14 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
  */
 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
 {
-       struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+       struct pci_dev *pdev;
        struct ntb_queue_entry *entry;
 
        if (!qp)
                return;
 
+       pdev = ntb_query_pdev(qp->ndev);
+
        cancel_delayed_work_sync(&qp->link_work);
 
        ntb_unregister_db_callback(qp->ndev, qp->qp_num);
@@ -1371,12 +1414,13 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
  */
 void ntb_transport_link_down(struct ntb_transport_qp *qp)
 {
-       struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+       struct pci_dev *pdev;
        int rc, val;
 
        if (!qp)
                return;
 
+       pdev = ntb_query_pdev(qp->ndev);
        qp->client_ready = NTB_LINK_DOWN;
 
        rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
@@ -1408,6 +1452,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_down);
  */
 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
 {
+       if (!qp)
+               return false;
+
        return qp->qp_link == NTB_LINK_UP;
 }
 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
@@ -1422,6 +1469,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_query);
  */
 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
 {
+       if (!qp)
+               return 0;
+
        return qp->qp_num;
 }
 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
@@ -1436,6 +1486,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
  */
 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
 {
+       if (!qp)
+               return 0;
+
        return qp->tx_max_frame - sizeof(struct ntb_payload_header);
 }
 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
index c76d16c972cc6cfaf6ce08fd56ebe89020508032..f53b992f060a1eaa50fb9de6afda8edb6bf49e69 100644 (file)
@@ -1208,11 +1208,11 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
                                out_args->args_count = count;
                                for (i = 0; i < count; i++)
                                        out_args->args[i] = be32_to_cpup(list++);
+                       } else {
+                               of_node_put(node);
                        }
 
                        /* Found it! return success */
-                       if (node)
-                               of_node_put(node);
                        return 0;
                }
 
index 2ef7103270bb7eb250ae15e16650c17a46a643d7..1f05913ae677e2724c78729a324a01ac1d2afbbd 100644 (file)
@@ -668,7 +668,7 @@ lba_fixup_bus(struct pci_bus *bus)
                        BUG();
                }
 
-               if (ldev->hba.elmmio_space.start) {
+               if (ldev->hba.elmmio_space.flags) {
                        err = request_resource(&iomem_resource,
                                        &(ldev->hba.elmmio_space));
                        if (err < 0) {
@@ -993,7 +993,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
 
                case PAT_LMMIO:
                        /* used to fix up pre-initialized MEM BARs */
-                       if (!lba_dev->hba.lmmio_space.start) {
+                       if (!lba_dev->hba.lmmio_space.flags) {
                                sprintf(lba_dev->hba.lmmio_name,
                                                "PCI%02x LMMIO",
                                                (int)lba_dev->hba.bus_num.start);
@@ -1001,7 +1001,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
                                        io->start;
                                r = &lba_dev->hba.lmmio_space;
                                r->name = lba_dev->hba.lmmio_name;
-                       } else if (!lba_dev->hba.elmmio_space.start) {
+                       } else if (!lba_dev->hba.elmmio_space.flags) {
                                sprintf(lba_dev->hba.elmmio_name,
                                                "PCI%02x ELMMIO",
                                                (int)lba_dev->hba.bus_num.start);
@@ -1096,6 +1096,7 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
        r->name = "LBA PCI Busses";
        r->start = lba_num & 0xff;
        r->end = (lba_num>>8) & 0xff;
+       r->flags = IORESOURCE_BUS;
 
        /* Set up local PCI Bus resources - we don't need them for
        ** Legacy boxes but it's nice to see in /proc/iomem.
@@ -1494,7 +1495,7 @@ lba_driver_probe(struct parisc_device *dev)
 
        pci_add_resource_offset(&resources, &lba_dev->hba.io_space,
                                HBA_PORT_BASE(lba_dev->hba.hba_num));
-       if (lba_dev->hba.elmmio_space.start)
+       if (lba_dev->hba.elmmio_space.flags)
                pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space,
                                        lba_dev->hba.lmmio_space_offset);
        if (lba_dev->hba.lmmio_space.flags)
index ac6e8e7a02df079222725fc35074045f249719b2..a042d065a0c757bdee271718808c349c6a07898e 100644 (file)
@@ -494,15 +494,4 @@ static struct pci_driver superio_driver = {
        .probe =        superio_probe,
 };
 
-static int __init superio_modinit(void)
-{
-       return pci_register_driver(&superio_driver);
-}
-
-static void __exit superio_exit(void)
-{
-       pci_unregister_driver(&superio_driver);
-}
-
-module_init(superio_modinit);
-module_exit(superio_exit);
+module_pci_driver(superio_driver);
index 24e12d4d17699ab41601676ccb1f342b9cfdbc87..a50576081b34dd998fbbd8aca4fd0c154df5420e 100644 (file)
@@ -71,7 +71,7 @@ config PARPORT_PC_FIFO
 
 config PARPORT_PC_SUPERIO
        bool "SuperIO chipset support"
-       depends on PARPORT_PC
+       depends on PARPORT_PC && !PARISC
        help
          Saying Y here enables some probes for Super-IO chipsets in order to
          find out things like base addresses, IRQ lines and DMA channels.  It
index a5251cb5fb0c616f4254c025bbd77d5925014a2f..6e3a60c788736a1e01c6d56377a4222d5e880e4a 100644 (file)
@@ -234,7 +234,7 @@ static int parport_PS2_supported(struct parport *pb)
 
 struct parport *parport_gsc_probe_port(unsigned long base,
                                       unsigned long base_hi, int irq,
-                                      int dma, struct pci_dev *dev)
+                                      int dma, struct parisc_device *padev)
 {
        struct parport_gsc_private *priv;
        struct parport_operations *ops;
@@ -258,7 +258,6 @@ struct parport *parport_gsc_probe_port(unsigned long base,
        priv->ctr_writable = 0xff;
        priv->dma_buf = 0;
        priv->dma_handle = 0;
-       priv->dev = dev;
        p->base = base;
        p->base_hi = base_hi;
        p->irq = irq;
@@ -282,6 +281,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
                return NULL;
        }
 
+       p->dev = &padev->dev;
        p->base_hi = base_hi;
        p->modes = tmp.modes;
        p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
@@ -373,7 +373,7 @@ static int parport_init_chip(struct parisc_device *dev)
        }
        
        p = parport_gsc_probe_port(port, 0, dev->irq,
-                       /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, NULL);
+                       /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, dev);
        if (p)
                parport_count++;
        dev_set_drvdata(&dev->dev, p);
index fc9c37c5402222db388e294f50092387f6de5bb6..812214768d27e511b42e18c24d7048a8df57c4c2 100644 (file)
@@ -217,6 +217,6 @@ extern void parport_gsc_dec_use_count(void);
 extern struct parport *parport_gsc_probe_port(unsigned long base,
                                                unsigned long base_hi,
                                                int irq, int dma,
-                                               struct pci_dev *dev);
+                                               struct parisc_device *padev);
 
 #endif /* __DRIVERS_PARPORT_PARPORT_GSC_H */
index 96fed19c6d90358833e37d1e9f55b1899b2d11b8..716aa93fff76437ab0038de385d7c4fa7ec04834 100644 (file)
@@ -950,6 +950,20 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
        return AE_OK ;
 }
 
+void acpiphp_check_host_bridge(acpi_handle handle)
+{
+       struct acpiphp_bridge *bridge;
+
+       bridge = acpiphp_handle_to_bridge(handle);
+       if (bridge) {
+               acpiphp_check_bridge(bridge);
+               put_bridge(bridge);
+       }
+
+       acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
+               ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
+}
+
 static void _handle_hotplug_event_bridge(struct work_struct *work)
 {
        struct acpiphp_bridge *bridge;
index 8ec8b4f485604e384de9bf7eddfe2c97c4cae5d5..0f4554e48cc5f52850d766078dff76f8c1cdc4d0 100644 (file)
@@ -580,6 +580,7 @@ struct aer_recover_entry
        u8      devfn;
        u16     domain;
        int     severity;
+       struct aer_capability_regs *regs;
 };
 
 static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
@@ -593,7 +594,7 @@ static DEFINE_SPINLOCK(aer_recover_ring_lock);
 static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
 
 void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
-                      int severity)
+                      int severity, struct aer_capability_regs *aer_regs)
 {
        unsigned long flags;
        struct aer_recover_entry entry = {
@@ -601,6 +602,7 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
                .devfn          = devfn,
                .domain         = domain,
                .severity       = severity,
+               .regs           = aer_regs,
        };
 
        spin_lock_irqsave(&aer_recover_ring_lock, flags);
@@ -627,6 +629,7 @@ static void aer_recover_work_func(struct work_struct *work)
                               PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
                        continue;
                }
+               cper_print_aer(pdev, entry.severity, entry.regs);
                do_recovery(pdev, entry.severity);
                pci_dev_put(pdev);
        }
index 5ab14251839d0f06c395ac0dfc064c166cf3b75a..2c7c9f5f592caa4253b862775bfbb2ee3854af7d 100644 (file)
@@ -220,7 +220,7 @@ int cper_severity_to_aer(int cper_severity)
 }
 EXPORT_SYMBOL_GPL(cper_severity_to_aer);
 
-void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity,
+void cper_print_aer(struct pci_dev *dev, int cper_severity,
                    struct aer_capability_regs *aer)
 {
        int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
@@ -244,7 +244,7 @@ void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity,
        agent = AER_GET_AGENT(aer_severity, status);
        dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
               status, mask);
-       cper_print_bits(prefix, status, status_strs, status_strs_size);
+       cper_print_bits("", status, status_strs, status_strs_size);
        dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
               aer_error_layer[layer], aer_agent_string[agent]);
        if (aer_severity != AER_CORRECTABLE)
index a3a851e49321542ac84659f4f3315219163430ae..18c0d8d1ddf779dbba8f09d7b898b7b687bd0a74 100644 (file)
@@ -68,12 +68,6 @@ MODULE_LICENSE("Dual MPL/GPL");
 
 #if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B)
 
-/* The RPX series use SLOT_B */
-#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
-#define CONFIG_PCMCIA_SLOT_B
-#define CONFIG_BD_IS_MHZ
-#endif
-
 /* The ADS board use SLOT_A */
 #ifdef CONFIG_ADS
 #define CONFIG_PCMCIA_SLOT_A
@@ -253,81 +247,6 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev);
 
 #define PCMCIA_BMT_LIMIT (15*4)        /* Bus Monitor Timeout value */
 
-/* ------------------------------------------------------------------------- */
-/* board specific stuff:                                                     */
-/* voltage_set(), hardware_enable() and hardware_disable()                   */
-/* ------------------------------------------------------------------------- */
-/* RPX Boards from Embedded Planet                                           */
-
-#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
-
-/* The RPX boards seems to have it's bus monitor timeout set to 6*8 clocks.
- * SYPCR is write once only, therefore must the slowest memory be faster
- * than the bus monitor or we will get a machine check due to the bus timeout.
- */
-
-#define PCMCIA_BOARD_MSG "RPX CLASSIC or RPX LITE"
-
-#undef PCMCIA_BMT_LIMIT
-#define PCMCIA_BMT_LIMIT (6*8)
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
-       u32 reg = 0;
-
-       switch (vcc) {
-       case 0:
-               break;
-       case 33:
-               reg |= BCSR1_PCVCTL4;
-               break;
-       case 50:
-               reg |= BCSR1_PCVCTL5;
-               break;
-       default:
-               return 1;
-       }
-
-       switch (vpp) {
-       case 0:
-               break;
-       case 33:
-       case 50:
-               if (vcc == vpp)
-                       reg |= BCSR1_PCVCTL6;
-               else
-                       return 1;
-               break;
-       case 120:
-               reg |= BCSR1_PCVCTL7;
-       default:
-               return 1;
-       }
-
-       if (!((vcc == 50) || (vcc == 0)))
-               return 1;
-
-       /* first, turn off all power */
-
-       out_be32(((u32 *) RPX_CSR_ADDR),
-                in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 |
-                                                    BCSR1_PCVCTL5 |
-                                                    BCSR1_PCVCTL6 |
-                                                    BCSR1_PCVCTL7));
-
-       /* enable new powersettings */
-
-       out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg);
-
-       return 0;
-}
-
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_)        /* No hardware to enable */
-#define hardware_disable(_slot_)       /* No hardware to disable */
-
-#endif                         /* CONFIG_RPXCLASSIC */
-
 /* FADS Boards from Motorola                                               */
 
 #if defined(CONFIG_FADS)
@@ -419,65 +338,6 @@ static inline int voltage_set(int slot, int vcc, int vpp)
 
 #endif
 
-/* ------------------------------------------------------------------------- */
-/* Motorola MBX860                                                           */
-
-#if defined(CONFIG_MBX)
-
-#define PCMCIA_BOARD_MSG "MBX"
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
-       u8 reg = 0;
-
-       switch (vcc) {
-       case 0:
-               break;
-       case 33:
-               reg |= CSR2_VCC_33;
-               break;
-       case 50:
-               reg |= CSR2_VCC_50;
-               break;
-       default:
-               return 1;
-       }
-
-       switch (vpp) {
-       case 0:
-               break;
-       case 33:
-       case 50:
-               if (vcc == vpp)
-                       reg |= CSR2_VPP_VCC;
-               else
-                       return 1;
-               break;
-       case 120:
-               if ((vcc == 33) || (vcc == 50))
-                       reg |= CSR2_VPP_12;
-               else
-                       return 1;
-       default:
-               return 1;
-       }
-
-       /* first, turn off all power */
-       out_8((u8 *) MBX_CSR2_ADDR,
-             in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
-
-       /* enable new powersettings */
-       out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg);
-
-       return 0;
-}
-
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_)        /* No hardware to enable */
-#define hardware_disable(_slot_)       /* No hardware to disable */
-
-#endif                         /* CONFIG_MBX */
-
 #if defined(CONFIG_PRxK)
 #include <asm/cpld.h>
 extern volatile fpga_pc_regs *fpga_pc;
index c67c37e23dd77846d2fd3bbd18bf102349064b57..694c3ace45204c11eeed4518f82b901c92489b59 100644 (file)
@@ -610,7 +610,7 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
        bool found = false;
        unsigned long config;
 
-       mutex_lock(&pctldev->mutex);
+       mutex_lock(&pinctrl_maps_mutex);
 
        /* Parse the pinctrl map and look for the elected pin/state */
        for_each_maps(maps_node, i, map) {
@@ -659,7 +659,7 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
                confops->pin_config_config_dbg_show(pctldev, s, config);
 
 exit:
-       mutex_unlock(&pctldev->mutex);
+       mutex_unlock(&pinctrl_maps_mutex);
 
        return 0;
 }
index aa17f7580f617b05d7469c372656922f229a9803..6d4532702f809b5cc52629fbdc7ad238412153f3 100644 (file)
@@ -851,23 +851,12 @@ static int abx500_gpio_probe(struct platform_device *pdev)
 
        if (abx500_pdata)
                pdata = abx500_pdata->gpio;
-       if (!pdata) {
-               if (np) {
-                       const struct of_device_id *match;
 
-                       match = of_match_device(abx500_gpio_match, &pdev->dev);
-                       if (!match)
-                               return -ENODEV;
-                       id = (unsigned long)match->data;
-               } else {
-                       dev_err(&pdev->dev, "gpio dt and platform data missing\n");
-                       return -ENODEV;
-               }
+       if (!(pdata || np)) {
+               dev_err(&pdev->dev, "gpio dt and platform data missing\n");
+               return -ENODEV;
        }
 
-       if (platid)
-               id = platid->driver_data;
-
        pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl),
                                   GFP_KERNEL);
        if (pct == NULL) {
@@ -882,6 +871,16 @@ static int abx500_gpio_probe(struct platform_device *pdev)
        pct->chip.dev = &pdev->dev;
        pct->chip.base = (np) ? -1 : pdata->gpio_base;
 
+       if (platid)
+               id = platid->driver_data;
+       else if (np) {
+               const struct of_device_id *match;
+
+               match = of_match_device(abx500_gpio_match, &pdev->dev);
+               if (match)
+                       id = (unsigned long)match->data;
+       }
+
        /* initialize the lock */
        mutex_init(&pct->lock);
 
@@ -900,8 +899,7 @@ static int abx500_gpio_probe(struct platform_device *pdev)
                abx500_pinctrl_ab8505_init(&pct->soc);
                break;
        default:
-               dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n",
-                               (int) platid->driver_data);
+               dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", id);
                mutex_destroy(&pct->lock);
                return -EINVAL;
        }
index edde3acc41864b5d9770be7696e553558d428902..d6b41747d687e13c188340ed08a3027c9646245d 100644 (file)
@@ -713,11 +713,6 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
        gpio->dev = &pdev->dev;
 
        memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!memres) {
-               dev_err(gpio->dev, "could not get GPIO memory resource\n");
-               return -ENODEV;
-       }
-
        gpio->base = devm_ioremap_resource(&pdev->dev, memres);
        if (IS_ERR(gpio->base))
                return PTR_ERR(gpio->base);
@@ -835,7 +830,8 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
        return 0;
 
 err_no_range:
-       err = gpiochip_remove(&gpio->chip);
+       if (gpiochip_remove(&gpio->chip))
+               dev_err(&pdev->dev, "failed to remove gpio chip\n");
 err_no_chip:
 err_no_domain:
 err_no_port:
index ac742817ebceeebf1797a27b6f1995931a9df363..2d76f66a2e0b90c3a3ac1cfe9036a42840c15a43 100644 (file)
@@ -196,6 +196,12 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+struct exynos_eint_gpio_save {
+       u32 eint_con;
+       u32 eint_fltcon0;
+       u32 eint_fltcon1;
+};
+
 /*
  * exynos_eint_gpio_init() - setup handling of external gpio interrupts.
  * @d: driver data of samsung pinctrl driver.
@@ -204,8 +210,8 @@ static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
 {
        struct samsung_pin_bank *bank;
        struct device *dev = d->dev;
-       unsigned int ret;
-       unsigned int i;
+       int ret;
+       int i;
 
        if (!d->irq) {
                dev_err(dev, "irq number not available\n");
@@ -227,11 +233,29 @@ static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
                                bank->nr_pins, &exynos_gpio_irqd_ops, bank);
                if (!bank->irq_domain) {
                        dev_err(dev, "gpio irq domain add failed\n");
-                       return -ENXIO;
+                       ret = -ENXIO;
+                       goto err_domains;
+               }
+
+               bank->soc_priv = devm_kzalloc(d->dev,
+                       sizeof(struct exynos_eint_gpio_save), GFP_KERNEL);
+               if (!bank->soc_priv) {
+                       irq_domain_remove(bank->irq_domain);
+                       ret = -ENOMEM;
+                       goto err_domains;
                }
        }
 
        return 0;
+
+err_domains:
+       for (--i, --bank; i >= 0; --i, --bank) {
+               if (bank->eint_type != EINT_TYPE_GPIO)
+                       continue;
+               irq_domain_remove(bank->irq_domain);
+       }
+
+       return ret;
 }
 
 static void exynos_wkup_irq_unmask(struct irq_data *irqd)
@@ -326,6 +350,28 @@ static int exynos_wkup_irq_set_type(struct irq_data *irqd, unsigned int type)
        return 0;
 }
 
+static u32 exynos_eint_wake_mask = 0xffffffff;
+
+u32 exynos_get_eint_wake_mask(void)
+{
+       return exynos_eint_wake_mask;
+}
+
+static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
+{
+       struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+       unsigned long bit = 1UL << (2 * bank->eint_offset + irqd->hwirq);
+
+       pr_info("wake %s for irq %d\n", on ? "enabled" : "disabled", irqd->irq);
+
+       if (!on)
+               exynos_eint_wake_mask |= bit;
+       else
+               exynos_eint_wake_mask &= ~bit;
+
+       return 0;
+}
+
 /*
  * irq_chip for wakeup interrupts
  */
@@ -335,6 +381,7 @@ static struct irq_chip exynos_wkup_irq_chip = {
        .irq_mask       = exynos_wkup_irq_mask,
        .irq_ack        = exynos_wkup_irq_ack,
        .irq_set_type   = exynos_wkup_irq_set_type,
+       .irq_set_wake   = exynos_wkup_irq_set_wake,
 };
 
 /* interrupt handler for wakeup interrupts 0..15 */
@@ -505,6 +552,72 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
        return 0;
 }
 
+static void exynos_pinctrl_suspend_bank(
+                               struct samsung_pinctrl_drv_data *drvdata,
+                               struct samsung_pin_bank *bank)
+{
+       struct exynos_eint_gpio_save *save = bank->soc_priv;
+       void __iomem *regs = drvdata->virt_base;
+
+       save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
+                                               + bank->eint_offset);
+       save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+                                               + 2 * bank->eint_offset);
+       save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+                                               + 2 * bank->eint_offset + 4);
+
+       pr_debug("%s: save     con %#010x\n", bank->name, save->eint_con);
+       pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0);
+       pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1);
+}
+
+static void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
+{
+       struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
+       struct samsung_pin_bank *bank = ctrl->pin_banks;
+       int i;
+
+       for (i = 0; i < ctrl->nr_banks; ++i, ++bank)
+               if (bank->eint_type == EINT_TYPE_GPIO)
+                       exynos_pinctrl_suspend_bank(drvdata, bank);
+}
+
+static void exynos_pinctrl_resume_bank(
+                               struct samsung_pinctrl_drv_data *drvdata,
+                               struct samsung_pin_bank *bank)
+{
+       struct exynos_eint_gpio_save *save = bank->soc_priv;
+       void __iomem *regs = drvdata->virt_base;
+
+       pr_debug("%s:     con %#010x => %#010x\n", bank->name,
+                       readl(regs + EXYNOS_GPIO_ECON_OFFSET
+                       + bank->eint_offset), save->eint_con);
+       pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name,
+                       readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+                       + 2 * bank->eint_offset), save->eint_fltcon0);
+       pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
+                       readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+                       + 2 * bank->eint_offset + 4), save->eint_fltcon1);
+
+       writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
+                                               + bank->eint_offset);
+       writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+                                               + 2 * bank->eint_offset);
+       writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+                                               + 2 * bank->eint_offset + 4);
+}
+
+static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
+{
+       struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
+       struct samsung_pin_bank *bank = ctrl->pin_banks;
+       int i;
+
+       for (i = 0; i < ctrl->nr_banks; ++i, ++bank)
+               if (bank->eint_type == EINT_TYPE_GPIO)
+                       exynos_pinctrl_resume_bank(drvdata, bank);
+}
+
 /* pin banks of exynos4210 pin-controller 0 */
 static struct samsung_pin_bank exynos4210_pin_banks0[] = {
        EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -568,6 +681,8 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos4210-gpio-ctrl0",
        }, {
                /* pin-controller instance 1 data */
@@ -582,6 +697,8 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = {
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
                .eint_wkup_init = exynos_eint_wkup_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos4210-gpio-ctrl1",
        }, {
                /* pin-controller instance 2 data */
@@ -663,6 +780,8 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos4x12-gpio-ctrl0",
        }, {
                /* pin-controller instance 1 data */
@@ -677,6 +796,8 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
                .eint_wkup_init = exynos_eint_wkup_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos4x12-gpio-ctrl1",
        }, {
                /* pin-controller instance 2 data */
@@ -687,6 +808,8 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos4x12-gpio-ctrl2",
        }, {
                /* pin-controller instance 3 data */
@@ -697,6 +820,8 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos4x12-gpio-ctrl3",
        },
 };
@@ -775,6 +900,8 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
                .eint_wkup_init = exynos_eint_wkup_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos5250-gpio-ctrl0",
        }, {
                /* pin-controller instance 1 data */
@@ -785,6 +912,8 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos5250-gpio-ctrl1",
        }, {
                /* pin-controller instance 2 data */
@@ -795,6 +924,8 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos5250-gpio-ctrl2",
        }, {
                /* pin-controller instance 3 data */
@@ -805,6 +936,8 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
                .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
                .svc            = EXYNOS_SVC_OFFSET,
                .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
                .label          = "exynos5250-gpio-ctrl3",
        },
 };
index 9b1f77a5bf0fd2e071aab24a238a3f0dc5092695..3c91c357792ff0f5f5b7ad9739f934f4337e0a07 100644 (file)
@@ -19,6 +19,7 @@
 
 /* External GPIO and wakeup interrupt related definitions */
 #define EXYNOS_GPIO_ECON_OFFSET                0x700
+#define EXYNOS_GPIO_EFLTCON_OFFSET     0x800
 #define EXYNOS_GPIO_EMASK_OFFSET       0x900
 #define EXYNOS_GPIO_EPEND_OFFSET       0xA00
 #define EXYNOS_WKUP_ECON_OFFSET                0xE00
index 6038503ed929cc5c9332e7f4903412cf2d1d3611..32a48f44f574264f5c1b320e00db8065f1ba0616 100644 (file)
@@ -1000,11 +1000,6 @@ static int exynos5440_pinctrl_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "cannot find IO resource\n");
-               return -ENOENT;
-       }
-
        priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->reg_base))
                return PTR_ERR(priv->reg_base);
index 615c5002b757e1515187a1a87278a69d9a980d8f..d22ca252b80d41b6b1d10b59f83d9e33e9f9b1ef 100644 (file)
@@ -52,7 +52,8 @@ static void ltq_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
        int i;
 
        for (i = 0; i < num_maps; i++)
-               if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+               if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN ||
+                   map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
                        kfree(map[i].data.configs.configs);
        kfree(map);
 }
index 976366899f68831f6765f1422ce996a1c8dec3b8..63ac22e89678c8bd9833be95b44b7c525afab620 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/gpio.h>
 #include <linux/irqdomain.h>
 #include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
 
 #include "core.h"
 #include "pinctrl-samsung.h"
@@ -48,6 +49,9 @@ static struct pin_config {
        { "samsung,pin-pud-pdn", PINCFG_TYPE_PUD_PDN },
 };
 
+/* Global list of devices (struct samsung_pinctrl_drv_data) */
+LIST_HEAD(drvdata_list);
+
 static unsigned int pin_base;
 
 static inline struct samsung_pin_bank *gc_to_pin_bank(struct gpio_chip *gc)
@@ -932,11 +936,6 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
        drvdata->dev = dev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "cannot find IO resource\n");
-               return -ENOENT;
-       }
-
        drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(drvdata->virt_base))
                return PTR_ERR(drvdata->virt_base);
@@ -961,9 +960,151 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
                ctrl->eint_wkup_init(drvdata);
 
        platform_set_drvdata(pdev, drvdata);
+
+       /* Add to the global list */
+       list_add_tail(&drvdata->node, &drvdata_list);
+
        return 0;
 }
 
+#ifdef CONFIG_PM
+
+/**
+ * samsung_pinctrl_suspend_dev - save pinctrl state for suspend for a device
+ *
+ * Save data for all banks handled by this device.
+ */
+static void samsung_pinctrl_suspend_dev(
+       struct samsung_pinctrl_drv_data *drvdata)
+{
+       struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
+       void __iomem *virt_base = drvdata->virt_base;
+       int i;
+
+       for (i = 0; i < ctrl->nr_banks; i++) {
+               struct samsung_pin_bank *bank = &ctrl->pin_banks[i];
+               void __iomem *reg = virt_base + bank->pctl_offset;
+
+               u8 *offs = bank->type->reg_offset;
+               u8 *widths = bank->type->fld_width;
+               enum pincfg_type type;
+
+               /* Registers without a powerdown config aren't lost */
+               if (!widths[PINCFG_TYPE_CON_PDN])
+                       continue;
+
+               for (type = 0; type < PINCFG_TYPE_NUM; type++)
+                       if (widths[type])
+                               bank->pm_save[type] = readl(reg + offs[type]);
+
+               if (widths[PINCFG_TYPE_FUNC] * bank->nr_pins > 32) {
+                       /* Some banks have two config registers */
+                       bank->pm_save[PINCFG_TYPE_NUM] =
+                               readl(reg + offs[PINCFG_TYPE_FUNC] + 4);
+                       pr_debug("Save %s @ %p (con %#010x %08x)\n",
+                                bank->name, reg,
+                                bank->pm_save[PINCFG_TYPE_FUNC],
+                                bank->pm_save[PINCFG_TYPE_NUM]);
+               } else {
+                       pr_debug("Save %s @ %p (con %#010x)\n", bank->name,
+                                reg, bank->pm_save[PINCFG_TYPE_FUNC]);
+               }
+       }
+
+       if (ctrl->suspend)
+               ctrl->suspend(drvdata);
+}
+
+/**
+ * samsung_pinctrl_resume_dev - restore pinctrl state from suspend for a device
+ *
+ * Restore one of the banks that was saved during suspend.
+ *
+ * We don't bother doing anything complicated to avoid glitching lines since
+ * we're called before pad retention is turned off.
+ */
+static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata)
+{
+       struct samsung_pin_ctrl *ctrl = drvdata->ctrl;
+       void __iomem *virt_base = drvdata->virt_base;
+       int i;
+
+       if (ctrl->resume)
+               ctrl->resume(drvdata);
+
+       for (i = 0; i < ctrl->nr_banks; i++) {
+               struct samsung_pin_bank *bank = &ctrl->pin_banks[i];
+               void __iomem *reg = virt_base + bank->pctl_offset;
+
+               u8 *offs = bank->type->reg_offset;
+               u8 *widths = bank->type->fld_width;
+               enum pincfg_type type;
+
+               /* Registers without a powerdown config aren't lost */
+               if (!widths[PINCFG_TYPE_CON_PDN])
+                       continue;
+
+               if (widths[PINCFG_TYPE_FUNC] * bank->nr_pins > 32) {
+                       /* Some banks have two config registers */
+                       pr_debug("%s @ %p (con %#010x %08x => %#010x %08x)\n",
+                                bank->name, reg,
+                                readl(reg + offs[PINCFG_TYPE_FUNC]),
+                                readl(reg + offs[PINCFG_TYPE_FUNC] + 4),
+                                bank->pm_save[PINCFG_TYPE_FUNC],
+                                bank->pm_save[PINCFG_TYPE_NUM]);
+                       writel(bank->pm_save[PINCFG_TYPE_NUM],
+                              reg + offs[PINCFG_TYPE_FUNC] + 4);
+               } else {
+                       pr_debug("%s @ %p (con %#010x => %#010x)\n", bank->name,
+                                reg, readl(reg + offs[PINCFG_TYPE_FUNC]),
+                                bank->pm_save[PINCFG_TYPE_FUNC]);
+               }
+               for (type = 0; type < PINCFG_TYPE_NUM; type++)
+                       if (widths[type])
+                               writel(bank->pm_save[type], reg + offs[type]);
+       }
+}
+
+/**
+ * samsung_pinctrl_suspend - save pinctrl state for suspend
+ *
+ * Save data for all banks across all devices.
+ */
+static int samsung_pinctrl_suspend(void)
+{
+       struct samsung_pinctrl_drv_data *drvdata;
+
+       list_for_each_entry(drvdata, &drvdata_list, node) {
+               samsung_pinctrl_suspend_dev(drvdata);
+       }
+
+       return 0;
+}
+
+/**
+ * samsung_pinctrl_resume - restore pinctrl state for suspend
+ *
+ * Restore data for all banks across all devices.
+ */
+static void samsung_pinctrl_resume(void)
+{
+       struct samsung_pinctrl_drv_data *drvdata;
+
+       list_for_each_entry_reverse(drvdata, &drvdata_list, node) {
+               samsung_pinctrl_resume_dev(drvdata);
+       }
+}
+
+#else
+#define samsung_pinctrl_suspend                NULL
+#define samsung_pinctrl_resume         NULL
+#endif
+
+static struct syscore_ops samsung_pinctrl_syscore_ops = {
+       .suspend        = samsung_pinctrl_suspend,
+       .resume         = samsung_pinctrl_resume,
+};
+
 static const struct of_device_id samsung_pinctrl_dt_match[] = {
 #ifdef CONFIG_PINCTRL_EXYNOS
        { .compatible = "samsung,exynos4210-pinctrl",
@@ -992,6 +1133,14 @@ static struct platform_driver samsung_pinctrl_driver = {
 
 static int __init samsung_pinctrl_drv_register(void)
 {
+       /*
+        * Register syscore ops for save/restore of registers across suspend.
+        * It's important to ensure that this driver is running at an earlier
+        * initcall level than any arch-specific init calls that install syscore
+        * ops that turn off pad retention (like exynos_pm_resume).
+        */
+       register_syscore_ops(&samsung_pinctrl_syscore_ops);
+
        return platform_driver_register(&samsung_pinctrl_driver);
 }
 postcore_initcall(samsung_pinctrl_drv_register);
index 7c7f9ebcd05b13d183889f6bb360f080ccc7d8ca..26d3519240c9c7f93bcb3628abf7feb6f6707535 100644 (file)
@@ -127,6 +127,7 @@ struct samsung_pin_bank_type {
  * @gpio_chip: GPIO chip of the bank.
  * @grange: linux gpio pin range supported by this bank.
  * @slock: spinlock protecting bank registers
+ * @pm_save: saved register values during suspend
  */
 struct samsung_pin_bank {
        struct samsung_pin_bank_type *type;
@@ -138,12 +139,15 @@ struct samsung_pin_bank {
        u32             eint_mask;
        u32             eint_offset;
        char            *name;
+       void            *soc_priv;
        struct device_node *of_node;
        struct samsung_pinctrl_drv_data *drvdata;
        struct irq_domain *irq_domain;
        struct gpio_chip gpio_chip;
        struct pinctrl_gpio_range grange;
        spinlock_t slock;
+
+       u32 pm_save[PINCFG_TYPE_NUM + 1]; /* +1 to handle double CON registers*/
 };
 
 /**
@@ -184,11 +188,15 @@ struct samsung_pin_ctrl {
 
        int             (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
        int             (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
+       void            (*suspend)(struct samsung_pinctrl_drv_data *);
+       void            (*resume)(struct samsung_pinctrl_drv_data *);
+
        char            *label;
 };
 
 /**
  * struct samsung_pinctrl_drv_data: wrapper for holding driver data together.
+ * @node: global list node
  * @virt_base: register base address of the controller.
  * @dev: device instance representing the controller.
  * @irq: interrpt number used by the controller to notify gpio interrupts.
@@ -201,6 +209,7 @@ struct samsung_pin_ctrl {
  * @nr_function: number of such pin functions.
  */
 struct samsung_pinctrl_drv_data {
+       struct list_head                node;
        void __iomem                    *virt_base;
        struct device                   *dev;
        int                             irq;
index 5f2d2bfd356e92c6bfac0ff54dde45fb472f7fbe..b9fa046186011bb0e3c781bbc6b0b7787b272c97 100644 (file)
@@ -1166,7 +1166,8 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
        (*map)->data.mux.function = np->name;
 
        if (pcs->is_pinconf) {
-               if (pcs_parse_pinconf(pcs, np, function, map))
+               res = pcs_parse_pinconf(pcs, np, function, map);
+               if (res)
                        goto free_pingroups;
                *num_maps = 2;
        } else {
index c52fc2c087327c6239c867bec7553a4eadd99d0c..b7d8c890514c7429c7533ce134f09cbbd440d333 100644 (file)
@@ -1990,8 +1990,10 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev)
        }
 
        clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
                goto gpiochip_error;
+       }
 
        clk_prepare_enable(clk);
 
@@ -2000,7 +2002,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev)
        return 0;
 
 gpiochip_error:
-       ret = gpiochip_remove(pctl->chip);
+       if (gpiochip_remove(pctl->chip))
+               dev_err(&pdev->dev, "failed to remove gpio chip\n");
 pinctrl_error:
        pinctrl_unregister(pctl->pctl_dev);
        return ret;
index f2977cff8366f4b89f88987f1080039a39f2f110..e92132c76a6b56cf02415f18b890c259f377c404 100644 (file)
@@ -716,10 +716,6 @@ static int pinmux_xway_probe(struct platform_device *pdev)
 
        /* get and remap our register range */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get resource\n");
-               return -ENOENT;
-       }
        xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(xway_info.membase[0]))
                return PTR_ERR(xway_info.membase[0]);
index b964cc5505681c9a590de10852e6187a2b558d79..de43262398db483cfd9b21cc3cabb22bc4903011 100644 (file)
@@ -53,7 +53,7 @@ static const struct wmt_pinctrl_bank_registers wm8750_banks[] = {
 #define WMT_PIN_EXTGPIO6       WMT_PIN(0, 6)
 #define WMT_PIN_EXTGPIO7       WMT_PIN(0, 7)
 #define WMT_PIN_WAKEUP0                WMT_PIN(0, 16)
-#define WMT_PIN_WAKEUP1                WMT_PIN(0, 16)
+#define WMT_PIN_WAKEUP1                WMT_PIN(0, 17)
 #define WMT_PIN_SD0CD          WMT_PIN(0, 28)
 #define WMT_PIN_VDOUT0         WMT_PIN(1, 0)
 #define WMT_PIN_VDOUT1         WMT_PIN(1, 1)
index ab63104e8dc98eb148378b859b6217c93170c304..70d986e04afb205d6b908fb95d25b87b0d0c6804 100644 (file)
@@ -609,8 +609,7 @@ int wmt_pinctrl_probe(struct platform_device *pdev,
        return 0;
 
 fail_range:
-       err = gpiochip_remove(&data->gpio_chip);
-       if (err)
+       if (gpiochip_remove(&data->gpio_chip))
                dev_err(&pdev->dev, "failed to remove gpio chip\n");
 fail_gpio:
        pinctrl_unregister(data->pctl_dev);
index 3338437b559b7e696e305b08e54ab1d874e67b4d..85772616efbf02b82c61af7e0846baed894c273e 100644 (file)
@@ -781,4 +781,12 @@ config APPLE_GMUX
          graphics as well as the backlight. Currently only backlight
          control is supported by the driver.
 
+config PVPANIC
+       tristate "pvpanic device support"
+       depends on ACPI
+       ---help---
+         This driver provides support for the pvpanic device.  pvpanic is
+         a paravirtualized device provided by QEMU; it lets a virtual machine
+         (guest) communicate panic events to the host.
+
 endif # X86_PLATFORM_DEVICES
index ace2b38942fe27f9cc02697f0f9543ca5eeff595..ef0ec746f78c49e5f6bf01fe28099ebbbee08318 100644 (file)
@@ -51,3 +51,5 @@ obj-$(CONFIG_INTEL_OAKTRAIL)  += intel_oaktrail.o
 obj-$(CONFIG_SAMSUNG_Q10)      += samsung-q10.o
 obj-$(CONFIG_APPLE_GMUX)       += apple-gmux.o
 obj-$(CONFIG_CHROMEOS_LAPTOP)  += chromeos_laptop.o
+
+obj-$(CONFIG_PVPANIC)           += pvpanic.o
index 210b5b872125e8aef1d751e56968f953686c4904..8fcb41e18b9cd6ad508001d33d86665f4955816c 100644 (file)
@@ -171,6 +171,15 @@ static struct dmi_system_id asus_quirks[] = {
                },
                .driver_data = &quirk_asus_x401u,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUSTeK COMPUTER INC. X75A",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X75A"),
+               },
+               .driver_data = &quirk_asus_x401u,
+       },
        {},
 };
 
index fa3ee6209572976b71d623585ecfb026842d5645..1134119521ac2e4da9708703d8b4252ac807ce15 100644 (file)
@@ -284,6 +284,7 @@ static void __init parse_da_table(const struct dmi_header *dm)
 {
        /* Final token is a terminator, so we don't want to copy it */
        int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
+       struct calling_interface_token *new_da_tokens;
        struct calling_interface_structure *table =
                container_of(dm, struct calling_interface_structure, header);
 
@@ -296,12 +297,13 @@ static void __init parse_da_table(const struct dmi_header *dm)
        da_command_address = table->cmdIOAddress;
        da_command_code = table->cmdIOCode;
 
-       da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
-                            sizeof(struct calling_interface_token),
-                            GFP_KERNEL);
+       new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
+                                sizeof(struct calling_interface_token),
+                                GFP_KERNEL);
 
-       if (!da_tokens)
+       if (!new_da_tokens)
                return;
+       da_tokens = new_da_tokens;
 
        memcpy(da_tokens+da_num_tokens, table->tokens,
               sizeof(struct calling_interface_token) * tokens);
index 3f945457f71c218116e7f8eb9d4a7400fcdc8b62..bcf8cc6b5537cca1a1c3ad016d700ca3f30104a4 100644 (file)
@@ -34,6 +34,14 @@ MODULE_LICENSE("GPL");
 #define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4"
 #define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8"
 
+struct dell_wmi_event {
+       u16     length;
+       /* 0x000: A hot key pressed or an event occurred
+        * 0x00F: A sequence of hot keys are pressed */
+       u16     type;
+       u16     event[];
+};
+
 static const char *dell_wmi_aio_guids[] = {
        EVENT_GUID1,
        EVENT_GUID2,
@@ -46,15 +54,41 @@ MODULE_ALIAS("wmi:"EVENT_GUID2);
 static const struct key_entry dell_wmi_aio_keymap[] = {
        { KE_KEY, 0xc0, { KEY_VOLUMEUP } },
        { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } },
+       { KE_KEY, 0xe030, { KEY_VOLUMEUP } },
+       { KE_KEY, 0xe02e, { KEY_VOLUMEDOWN } },
+       { KE_KEY, 0xe020, { KEY_MUTE } },
+       { KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } },
+       { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
+       { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
+       { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
        { KE_END, 0 }
 };
 
 static struct input_dev *dell_wmi_aio_input_dev;
 
+/*
+ * The new WMI event data format will follow the dell_wmi_event structure
+ * So, we will check if the buffer matches the format
+ */
+static bool dell_wmi_aio_event_check(u8 *buffer, int length)
+{
+       struct dell_wmi_event *event = (struct dell_wmi_event *)buffer;
+
+       if (event == NULL || length < 6)
+               return false;
+
+       if ((event->type == 0 || event->type == 0xf) &&
+                       event->length >= 2)
+               return true;
+
+       return false;
+}
+
 static void dell_wmi_aio_notify(u32 value, void *context)
 {
        struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
+       struct dell_wmi_event *event;
        acpi_status status;
 
        status = wmi_get_event_data(value, &response);
@@ -65,7 +99,7 @@ static void dell_wmi_aio_notify(u32 value, void *context)
 
        obj = (union acpi_object *)response.pointer;
        if (obj) {
-               unsigned int scancode;
+               unsigned int scancode = 0;
 
                switch (obj->type) {
                case ACPI_TYPE_INTEGER:
@@ -75,13 +109,22 @@ static void dell_wmi_aio_notify(u32 value, void *context)
                                scancode, 1, true);
                        break;
                case ACPI_TYPE_BUFFER:
-                       /* Broken machines return the scancode in a buffer */
-                       if (obj->buffer.pointer && obj->buffer.length > 0) {
-                               scancode = obj->buffer.pointer[0];
+                       if (dell_wmi_aio_event_check(obj->buffer.pointer,
+                                               obj->buffer.length)) {
+                               event = (struct dell_wmi_event *)
+                                       obj->buffer.pointer;
+                               scancode = event->event[0];
+                       } else {
+                               /* Broken machines return the scancode in a
+                                  buffer */
+                               if (obj->buffer.pointer &&
+                                               obj->buffer.length > 0)
+                                       scancode = obj->buffer.pointer[0];
+                       }
+                       if (scancode)
                                sparse_keymap_report_event(
                                        dell_wmi_aio_input_dev,
                                        scancode, 1, true);
-                       }
                        break;
                }
        }
index 1a779bbfb87d2b8de61e964734981f4f9a6c891e..8df0c5a21be27d7a2044b06b8ee21bc8fab90359 100644 (file)
@@ -71,6 +71,14 @@ enum hp_wmi_event_ids {
        HPWMI_WIRELESS = 5,
        HPWMI_CPU_BATTERY_THROTTLE = 6,
        HPWMI_LOCK_SWITCH = 7,
+       HPWMI_LID_SWITCH = 8,
+       HPWMI_SCREEN_ROTATION = 9,
+       HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A,
+       HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B,
+       HPWMI_PROXIMITY_SENSOR = 0x0C,
+       HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
+       HPWMI_PEAKSHIFT_PERIOD = 0x0F,
+       HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
 };
 
 struct bios_args {
@@ -536,6 +544,22 @@ static void hp_wmi_notify(u32 value, void *context)
                break;
        case HPWMI_LOCK_SWITCH:
                break;
+       case HPWMI_LID_SWITCH:
+               break;
+       case HPWMI_SCREEN_ROTATION:
+               break;
+       case HPWMI_COOLSENSE_SYSTEM_MOBILE:
+               break;
+       case HPWMI_COOLSENSE_SYSTEM_HOT:
+               break;
+       case HPWMI_PROXIMITY_SENSOR:
+               break;
+       case HPWMI_BACKLIT_KB_BRIGHTNESS:
+               break;
+       case HPWMI_PEAKSHIFT_PERIOD:
+               break;
+       case HPWMI_BATTERY_CHARGE_PERIOD:
+               break;
        default:
                pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
                break;
index e64a7a870d42146c910ffb39b955c29a11a88080..a8e43cf70fac96309d7b689265b7c2b6ba2a19db 100644 (file)
@@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct device *dev)
 
 static int lis3lv02d_resume(struct device *dev)
 {
-       return lis3lv02d_poweron(&lis3_dev);
+       lis3lv02d_poweron(&lis3_dev);
+       return 0;
 }
 
 static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
index 17f00b8dc5cbcb841540f07c7330775bcb5393df..89c4519d48ac80d2a54f42ceabf7a48b792e871c 100644 (file)
@@ -640,7 +640,8 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
        for (bit = 0; bit < 16; bit++) {
                if (test_bit(bit, &value)) {
                        switch (bit) {
-                       case 6:
+                       case 0: /* Z580 */
+                       case 6: /* Z570 */
                                /* Thermal Management button */
                                ideapad_input_report(priv, 65);
                                break;
@@ -648,6 +649,9 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
                                /* OneKey Theater button */
                                ideapad_input_report(priv, 64);
                                break;
+                       default:
+                               pr_info("Unknown special button: %lu\n", bit);
+                               break;
                        }
                }
        }
diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c
new file mode 100644 (file)
index 0000000..47ae0c4
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ *  pvpanic.c - pvpanic Device Support
+ *
+ *  Copyright (C) 2013 Fujitsu.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
+MODULE_DESCRIPTION("pvpanic device driver");
+MODULE_LICENSE("GPL");
+
+static int pvpanic_add(struct acpi_device *device);
+static int pvpanic_remove(struct acpi_device *device);
+
+static const struct acpi_device_id pvpanic_device_ids[] = {
+       { "QEMU0001", 0 },
+       { "", 0 },
+};
+MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids);
+
+#define PVPANIC_PANICKED       (1 << 0)
+
+static u16 port;
+
+static struct acpi_driver pvpanic_driver = {
+       .name =         "pvpanic",
+       .class =        "QEMU",
+       .ids =          pvpanic_device_ids,
+       .ops =          {
+                               .add =          pvpanic_add,
+                               .remove =       pvpanic_remove,
+                       },
+       .owner =        THIS_MODULE,
+};
+
+static void
+pvpanic_send_event(unsigned int event)
+{
+       outb(event, port);
+}
+
+static int
+pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
+                    void *unused)
+{
+       pvpanic_send_event(PVPANIC_PANICKED);
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block pvpanic_panic_nb = {
+       .notifier_call = pvpanic_panic_notify,
+};
+
+
+static acpi_status
+pvpanic_walk_resources(struct acpi_resource *res, void *context)
+{
+       switch (res->type) {
+       case ACPI_RESOURCE_TYPE_END_TAG:
+               return AE_OK;
+
+       case ACPI_RESOURCE_TYPE_IO:
+               port = res->data.io.minimum;
+               return AE_OK;
+
+       default:
+               return AE_ERROR;
+       }
+}
+
+static int pvpanic_add(struct acpi_device *device)
+{
+       acpi_status status;
+       u64 ret;
+
+       status = acpi_evaluate_integer(device->handle, "_STA", NULL,
+                                      &ret);
+
+       if (ACPI_FAILURE(status) || (ret & 0x0B) != 0x0B)
+               return -ENODEV;
+
+       acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+                           pvpanic_walk_resources, NULL);
+
+       if (!port)
+               return -ENODEV;
+
+       atomic_notifier_chain_register(&panic_notifier_list,
+                                      &pvpanic_panic_nb);
+
+       return 0;
+}
+
+static int pvpanic_remove(struct acpi_device *device)
+{
+
+       atomic_notifier_chain_unregister(&panic_notifier_list,
+                                        &pvpanic_panic_nb);
+       return 0;
+}
+
+module_acpi_driver(pvpanic_driver);
index 5f770059fd4d3aa3115140fd5afcc7a7832eca53..1a90b62a71c66400268cad23c7d6690b1eec30d0 100644 (file)
@@ -176,10 +176,7 @@ static int __init samsungq10_init(void)
                                                   samsungq10_probe,
                                                   NULL, 0, NULL, 0);
 
-       if (IS_ERR(samsungq10_device))
-               return PTR_ERR(samsungq10_device);
-
-       return 0;
+       return PTR_RET(samsungq10_device);
 }
 
 static void __exit samsungq10_exit(void)
index d544e3aaf76141754b622aada806b04050fe2886..2ac045f27f10112aa467b867a9b97a9a1790c189 100644 (file)
@@ -1255,6 +1255,11 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
                        real_ev = __sony_nc_gfx_switch_status_get();
                        break;
 
+               case 0x015B:
+                       /* Hybrid GFX switching SVS151290S */
+                       ev_type = GFX_SWITCH;
+                       real_ev = __sony_nc_gfx_switch_status_get();
+                       break;
                default:
                        dprintk("Unknown event 0x%x for handle 0x%x\n",
                                        event, handle);
@@ -1353,6 +1358,7 @@ static void sony_nc_function_setup(struct acpi_device *device,
                        break;
                case 0x0128:
                case 0x0146:
+               case 0x015B:
                        result = sony_nc_gfx_switch_setup(pf_device, handle);
                        if (result)
                                pr_err("couldn't set up GFX Switch status (%d)\n",
@@ -1375,6 +1381,7 @@ static void sony_nc_function_setup(struct acpi_device *device,
                case 0x0143:
                case 0x014b:
                case 0x014c:
+               case 0x0163:
                        result = sony_nc_kbd_backlight_setup(pf_device, handle);
                        if (result)
                                pr_err("couldn't set up keyboard backlight function (%d)\n",
@@ -1426,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
                        break;
                case 0x0128:
                case 0x0146:
+               case 0x015B:
                        sony_nc_gfx_switch_cleanup(pd);
                        break;
                case 0x0131:
@@ -1439,6 +1447,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
                case 0x0143:
                case 0x014b:
                case 0x014c:
+               case 0x0163:
                        sony_nc_kbd_backlight_cleanup(pd);
                        break;
                default:
@@ -1485,6 +1494,7 @@ static void sony_nc_function_resume(void)
                case 0x0143:
                case 0x014b:
                case 0x014c:
+               case 0x0163:
                        sony_nc_kbd_backlight_resume();
                        break;
                default:
@@ -2390,7 +2400,9 @@ static int __sony_nc_gfx_switch_status_get(void)
 {
        unsigned int result;
 
-       if (sony_call_snc_handle(gfxs_ctl->handle, 0x0100, &result))
+       if (sony_call_snc_handle(gfxs_ctl->handle,
+                               gfxs_ctl->handle == 0x015B ? 0x0000 : 0x0100,
+                               &result))
                return -EIO;
 
        switch (gfxs_ctl->handle) {
@@ -2400,6 +2412,12 @@ static int __sony_nc_gfx_switch_status_get(void)
                 */
                return result & 0x1 ? SPEED : STAMINA;
                break;
+       case 0x015B:
+               /* 0: discrete GFX (speed)
+                * 1: integrated GFX (stamina)
+                */
+               return result & 0x1 ? STAMINA : SPEED;
+               break;
        case 0x0128:
                /* it's a more elaborated bitmask, for now:
                 * 2: integrated GFX (stamina)
index 0d0b5d7d19d02f9e2c0a245de4da43f084e3340e..7b8979c63f4882e6c3e5375b9f9c35c40e5a829c 100644 (file)
@@ -152,6 +152,7 @@ config BATTERY_SBS
 
 config BATTERY_BQ27x00
        tristate "BQ27x00 battery driver"
+       depends on I2C || I2C=n
        help
          Say Y here to enable support for batteries with BQ27x00 (I2C/HDQ) chips.
 
@@ -284,6 +285,7 @@ config CHARGER_LP8788
        tristate "TI LP8788 charger driver"
        depends on MFD_LP8788
        depends on LP8788_ADC
+       depends on IIO
        help
          Say Y to enable support for the LP8788 linear charger.
 
index a44175139bbf3a619d65fe611cc9fa961388cd76..fef56e2041b325cf9e086d434f1755fbca0a8857 100644 (file)
@@ -1269,5 +1269,5 @@ module_exit(pm2xxx_charger_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay");
-MODULE_ALIAS("platform:pm2xxx-charger");
+MODULE_ALIAS("i2c:pm2xxx-charger");
 MODULE_DESCRIPTION("PM2xxx charger management driver");
index 58cbb009b74f144564b069bf9344e801a5f02743..56fb509f4be00834f964fa2636b35c97eebf6849 100644 (file)
@@ -207,7 +207,6 @@ static int wm831x_backup_remove(struct platform_device *pdev)
        struct wm831x_backup *devdata = platform_get_drvdata(pdev);
 
        power_supply_unregister(&devdata->backup);
-       kfree(devdata->backup.name);
 
        return 0;
 }
index bea94510ad2d4c0d3faf7cc703c9b8883ec9cf97..71a2559278d7a0d41bf773edcf5219f35462ddb1 100644 (file)
@@ -628,9 +628,10 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        chip->caps = ptp_pch_caps;
        chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev);
-
-       if (IS_ERR(chip->ptp_clock))
-               return PTR_ERR(chip->ptp_clock);
+       if (IS_ERR(chip->ptp_clock)) {
+               ret = PTR_ERR(chip->ptp_clock);
+               goto err_ptp_clock_reg;
+       }
 
        spin_lock_init(&chip->register_lock);
 
@@ -669,6 +670,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 err_req_irq:
        ptp_clock_unregister(chip->ptp_clock);
+err_ptp_clock_reg:
        iounmap(chip->regs);
        chip->regs = NULL;
 
index ec287989eafc56b89d509e48b3f8ad8999fdc0a7..c938bae18812ea5768e77b48ee2b0f1a6e39b24f 100644 (file)
@@ -265,11 +265,6 @@ static int imx_pwm_probe(struct platform_device *pdev)
        imx->chip.npwm = 1;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (r == NULL) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(imx->mmio_base))
                return PTR_ERR(imx->mmio_base);
index d1eb499fb15d2a3bbf3fec0198a3de7c9451e43b..ed6007b27585a9512d5c2470248343b3e80882d4 100644 (file)
@@ -117,11 +117,6 @@ static int pwm_probe(struct platform_device *pdev)
                return PTR_ERR(puv3->clk);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (r == NULL) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        puv3->base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(puv3->base))
                return PTR_ERR(puv3->base);
index dee6ab552a0a9439c968c695574e14e001a67339..dc9717551d395be96800008bdee1f2379b04759a 100644 (file)
@@ -147,11 +147,6 @@ static int pwm_probe(struct platform_device *pdev)
        pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (r == NULL) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(pwm->mmio_base))
                return PTR_ERR(pwm->mmio_base);
index 3d75f4a88f982395a148aca6906dd1fbf27c87cf..a5402933001f9cf446192d79b1ddf204e748e2a1 100644 (file)
@@ -181,11 +181,6 @@ static int tegra_pwm_probe(struct platform_device *pdev)
        pwm->dev = &pdev->dev;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "no memory resources defined\n");
-               return -ENODEV;
-       }
-
        pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(pwm->mmio_base))
                return PTR_ERR(pwm->mmio_base);
index 0d65fb2e02c7897348862bebe2ac27be8449c432..72ca42dfa733312e408f155db136d81eb1822ace 100644 (file)
@@ -240,11 +240,6 @@ static int ecap_pwm_probe(struct platform_device *pdev)
        pc->chip.npwm = 1;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(pc->mmio_base))
                return PTR_ERR(pc->mmio_base);
index 6a217596942f0d6badeeff620b50d182459554a5..48a485c2e4224a334b49dda3f1a8848aaecb2bfa 100644 (file)
@@ -471,11 +471,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
        pc->chip.npwm = NUM_PWM_CHANNEL;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(pc->mmio_base))
                return PTR_ERR(pc->mmio_base);
index c9c3d3a1e0eb2693ac9b1fad89f620c077323bca..3b119bc2c3c606f3c9ca697d9fa1dc5e66144e59 100644 (file)
@@ -70,11 +70,6 @@ static int pwmss_probe(struct platform_device *pdev)
        mutex_init(&info->pwmss_lock);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(info->mmio_base))
                return PTR_ERR(info->mmio_base);
index 69effd19afc700144ad35ab4fce44ccf991f8dae..323125abf3f4ddae2d6f2c48abe956c8b497b40e 100644 (file)
@@ -230,11 +230,6 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
        }
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (r == NULL) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
        chip->base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(chip->base))
                return PTR_ERR(chip->base);
index 6194d35ebb9740c0af7f999dcb7b73aec75d91ca..5ab056494bbefc63d3e3a0a3daffcedd7c0a9ef4 100644 (file)
@@ -47,4 +47,24 @@ config RAPIDIO_DEBUG
 
          If you are unsure about this, say N here.
 
+choice
+       prompt "Enumeration method"
+       depends on RAPIDIO
+       default RAPIDIO_ENUM_BASIC
+       help
+         There are different enumeration and discovery mechanisms offered
+         for RapidIO subsystem. You may select single built-in method or
+         or any number of methods to be built as modules.
+         Selecting a built-in method disables use of loadable methods.
+
+         If unsure, select Basic built-in.
+
+config RAPIDIO_ENUM_BASIC
+       tristate "Basic"
+       help
+         This option includes basic RapidIO fabric enumeration and discovery
+         mechanism similar to one described in RapidIO specification Annex 1.
+
+endchoice
+
 source "drivers/rapidio/switches/Kconfig"
index ec3fb81210041e532206faac578cc9c897edaf82..3036702ffe8b5950e83c9979c6e596fb9aa57db3 100644 (file)
@@ -1,7 +1,8 @@
 #
 # Makefile for RapidIO interconnect services
 #
-obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o
+obj-y += rio.o rio-access.o rio-driver.o rio-sysfs.o
+obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o
 
 obj-$(CONFIG_RAPIDIO)          += switches/
 obj-$(CONFIG_RAPIDIO)          += devices/
index 6faba406b6e9f705bb047232b796d8476e89e1db..a8b2c23a7ef4b7acbe4a771fbf72a2ddffded52f 100644 (file)
@@ -471,6 +471,10 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
        u32 intval;
        u32 ch_inte;
 
+       /* For MSI mode disable all device-level interrupts */
+       if (priv->flags & TSI721_USING_MSI)
+               iowrite32(0, priv->regs + TSI721_DEV_INTE);
+
        dev_int = ioread32(priv->regs + TSI721_DEV_INT);
        if (!dev_int)
                return IRQ_NONE;
@@ -560,6 +564,14 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
                }
        }
 #endif
+
+       /* For MSI mode re-enable device-level interrupts */
+       if (priv->flags & TSI721_USING_MSI) {
+               dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
+                       TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
+               iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
+       }
+
        return IRQ_HANDLED;
 }
 
index 0f4a53bdaa3cf9b8e60c4c8dfbf5d6f80b3cf882..a0c875563d7669fbf69916c95bd0b0219417d985 100644 (file)
@@ -164,6 +164,13 @@ void rio_unregister_driver(struct rio_driver *rdrv)
        driver_unregister(&rdrv->driver);
 }
 
+void rio_attach_device(struct rio_dev *rdev)
+{
+       rdev->dev.bus = &rio_bus_type;
+       rdev->dev.parent = &rio_bus;
+}
+EXPORT_SYMBOL_GPL(rio_attach_device);
+
 /**
  *  rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure
  *  @dev: the standard device structure to match against
@@ -200,6 +207,7 @@ struct bus_type rio_bus_type = {
        .name = "rapidio",
        .match = rio_match_bus,
        .dev_attrs = rio_dev_attrs,
+       .bus_attrs = rio_bus_attrs,
        .probe = rio_device_probe,
        .remove = rio_device_remove,
 };
index a965acd3c0e4e9f6903f95c4fdb7d8f6331669d3..4c15dbf810871e04f55bb28d30d0f7e2d1cc826d 100644 (file)
 
 #include "rio.h"
 
-LIST_HEAD(rio_devices);
-
 static void rio_init_em(struct rio_dev *rdev);
 
-DEFINE_SPINLOCK(rio_global_list_lock);
-
 static int next_destid = 0;
 static int next_comptag = 1;
 
@@ -326,127 +322,6 @@ static int rio_is_switch(struct rio_dev *rdev)
        return 0;
 }
 
-/**
- * rio_switch_init - Sets switch operations for a particular vendor switch
- * @rdev: RIO device
- * @do_enum: Enumeration/Discovery mode flag
- *
- * Searches the RIO switch ops table for known switch types. If the vid
- * and did match a switch table entry, then call switch initialization
- * routine to setup switch-specific routines.
- */
-static void rio_switch_init(struct rio_dev *rdev, int do_enum)
-{
-       struct rio_switch_ops *cur = __start_rio_switch_ops;
-       struct rio_switch_ops *end = __end_rio_switch_ops;
-
-       while (cur < end) {
-               if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
-                       pr_debug("RIO: calling init routine for %s\n",
-                                rio_name(rdev));
-                       cur->init_hook(rdev, do_enum);
-                       break;
-               }
-               cur++;
-       }
-
-       if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
-               pr_debug("RIO: adding STD routing ops for %s\n",
-                       rio_name(rdev));
-               rdev->rswitch->add_entry = rio_std_route_add_entry;
-               rdev->rswitch->get_entry = rio_std_route_get_entry;
-               rdev->rswitch->clr_table = rio_std_route_clr_table;
-       }
-
-       if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
-               printk(KERN_ERR "RIO: missing routing ops for %s\n",
-                      rio_name(rdev));
-}
-
-/**
- * rio_add_device- Adds a RIO device to the device model
- * @rdev: RIO device
- *
- * Adds the RIO device to the global device list and adds the RIO
- * device to the RIO device list.  Creates the generic sysfs nodes
- * for an RIO device.
- */
-static int rio_add_device(struct rio_dev *rdev)
-{
-       int err;
-
-       err = device_add(&rdev->dev);
-       if (err)
-               return err;
-
-       spin_lock(&rio_global_list_lock);
-       list_add_tail(&rdev->global_list, &rio_devices);
-       spin_unlock(&rio_global_list_lock);
-
-       rio_create_sysfs_dev_files(rdev);
-
-       return 0;
-}
-
-/**
- * rio_enable_rx_tx_port - enable input receiver and output transmitter of
- * given port
- * @port: Master port associated with the RIO network
- * @local: local=1 select local port otherwise a far device is reached
- * @destid: Destination ID of the device to check host bit
- * @hopcount: Number of hops to reach the target
- * @port_num: Port (-number on switch) to enable on a far end device
- *
- * Returns 0 or 1 from on General Control Command and Status Register
- * (EXT_PTR+0x3C)
- */
-inline int rio_enable_rx_tx_port(struct rio_mport *port,
-                                int local, u16 destid,
-                                u8 hopcount, u8 port_num) {
-#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
-       u32 regval;
-       u32 ext_ftr_ptr;
-
-       /*
-       * enable rx input tx output port
-       */
-       pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
-                "%d, port_num = %d)\n", local, destid, hopcount, port_num);
-
-       ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
-
-       if (local) {
-               rio_local_read_config_32(port, ext_ftr_ptr +
-                               RIO_PORT_N_CTL_CSR(0),
-                               &regval);
-       } else {
-               if (rio_mport_read_config_32(port, destid, hopcount,
-               ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
-                       return -EIO;
-       }
-
-       if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
-               /* serial */
-               regval = regval | RIO_PORT_N_CTL_EN_RX_SER
-                               | RIO_PORT_N_CTL_EN_TX_SER;
-       } else {
-               /* parallel */
-               regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
-                               | RIO_PORT_N_CTL_EN_TX_PAR;
-       }
-
-       if (local) {
-               rio_local_write_config_32(port, ext_ftr_ptr +
-                                         RIO_PORT_N_CTL_CSR(0), regval);
-       } else {
-               if (rio_mport_write_config_32(port, destid, hopcount,
-                   ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
-                       return -EIO;
-       }
-#endif
-       return 0;
-}
-
 /**
  * rio_setup_device- Allocates and sets up a RIO device
  * @net: RIO network
@@ -587,8 +462,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
                             rdev->destid);
        }
 
-       rdev->dev.bus = &rio_bus_type;
-       rdev->dev.parent = &rio_bus;
+       rio_attach_device(rdev);
 
        device_initialize(&rdev->dev);
        rdev->dev.release = rio_release_dev;
@@ -1260,19 +1134,30 @@ static void rio_pw_enable(struct rio_mport *port, int enable)
 /**
  * rio_enum_mport- Start enumeration through a master port
  * @mport: Master port to send transactions
+ * @flags: Enumeration control flags
  *
  * Starts the enumeration process. If somebody has enumerated our
  * master port device, then give up. If not and we have an active
  * link, then start recursive peer enumeration. Returns %0 if
  * enumeration succeeds or %-EBUSY if enumeration fails.
  */
-int rio_enum_mport(struct rio_mport *mport)
+int rio_enum_mport(struct rio_mport *mport, u32 flags)
 {
        struct rio_net *net = NULL;
        int rc = 0;
 
        printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id,
               mport->name);
+
+       /*
+        * To avoid multiple start requests (repeat enumeration is not supported
+        * by this method) check if enumeration/discovery was performed for this
+        * mport: if mport was added into the list of mports for a net exit
+        * with error.
+        */
+       if (mport->nnode.next || mport->nnode.prev)
+               return -EBUSY;
+
        /* If somebody else enumerated our master port device, bail. */
        if (rio_enum_host(mport) < 0) {
                printk(KERN_INFO
@@ -1362,14 +1247,16 @@ static void rio_build_route_tables(struct rio_net *net)
 /**
  * rio_disc_mport- Start discovery through a master port
  * @mport: Master port to send transactions
+ * @flags: discovery control flags
  *
  * Starts the discovery process. If we have an active link,
- * then wait for the signal that enumeration is complete.
+ * then wait for the signal that enumeration is complete (if wait
+ * is allowed).
  * When enumeration completion is signaled, start recursive
  * peer discovery. Returns %0 if discovery succeeds or %-EBUSY
  * on failure.
  */
-int rio_disc_mport(struct rio_mport *mport)
+int rio_disc_mport(struct rio_mport *mport, u32 flags)
 {
        struct rio_net *net = NULL;
        unsigned long to_end;
@@ -1379,6 +1266,11 @@ int rio_disc_mport(struct rio_mport *mport)
 
        /* If master port has an active link, allocate net and discover peers */
        if (rio_mport_is_active(mport)) {
+               if (rio_enum_complete(mport))
+                       goto enum_done;
+               else if (flags & RIO_SCAN_ENUM_NO_WAIT)
+                       return -EAGAIN;
+
                pr_debug("RIO: wait for enumeration to complete...\n");
 
                to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ;
@@ -1421,3 +1313,41 @@ int rio_disc_mport(struct rio_mport *mport)
 bail:
        return -EBUSY;
 }
+
+static struct rio_scan rio_scan_ops = {
+       .enumerate = rio_enum_mport,
+       .discover = rio_disc_mport,
+};
+
+static bool scan;
+module_param(scan, bool, 0);
+MODULE_PARM_DESC(scan, "Start RapidIO network enumeration/discovery "
+                       "(default = 0)");
+
+/**
+ * rio_basic_attach:
+ *
+ * When this enumeration/discovery method is loaded as a module this function
+ * registers its specific enumeration and discover routines for all available
+ * RapidIO mport devices. The "scan" command line parameter controls ability of
+ * the module to start RapidIO enumeration/discovery automatically.
+ *
+ * Returns 0 for success or -EIO if unable to register itself.
+ *
+ * This enumeration/discovery method cannot be unloaded and therefore does not
+ * provide a matching cleanup_module routine.
+ */
+
+static int __init rio_basic_attach(void)
+{
+       if (rio_register_scan(RIO_MPORT_ANY, &rio_scan_ops))
+               return -EIO;
+       if (scan)
+               rio_init_mports();
+       return 0;
+}
+
+late_initcall(rio_basic_attach);
+
+MODULE_DESCRIPTION("Basic RapidIO enumeration/discovery");
+MODULE_LICENSE("GPL");
index 4dbe360989be8b3ed1b156e7f61aeaafb5b68fff..66d4acd5e18fd8f230cbb732a12dce1b90f98978 100644 (file)
@@ -285,3 +285,48 @@ void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
                        rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
        }
 }
+
+static ssize_t bus_scan_store(struct bus_type *bus, const char *buf,
+                               size_t count)
+{
+       long val;
+       struct rio_mport *port = NULL;
+       int rc;
+
+       if (kstrtol(buf, 0, &val) < 0)
+               return -EINVAL;
+
+       if (val == RIO_MPORT_ANY) {
+               rc = rio_init_mports();
+               goto exit;
+       }
+
+       if (val < 0 || val >= RIO_MAX_MPORTS)
+               return -EINVAL;
+
+       port = rio_find_mport((int)val);
+
+       if (!port) {
+               pr_debug("RIO: %s: mport_%d not available\n",
+                        __func__, (int)val);
+               return -EINVAL;
+       }
+
+       if (!port->nscan)
+               return -EINVAL;
+
+       if (port->host_deviceid >= 0)
+               rc = port->nscan->enumerate(port, 0);
+       else
+               rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT);
+exit:
+       if (!rc)
+               rc = count;
+
+       return rc;
+}
+
+struct bus_attribute rio_bus_attrs[] = {
+       __ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store),
+       __ATTR_NULL
+};
index d553b5d137224434bc2466582d5004644039b25b..cb1c08996fbb133a2a49b68635a563abf3a2742c 100644 (file)
 
 #include "rio.h"
 
+static LIST_HEAD(rio_devices);
+static DEFINE_SPINLOCK(rio_global_list_lock);
+
 static LIST_HEAD(rio_mports);
+static DEFINE_MUTEX(rio_mport_list_lock);
 static unsigned char next_portid;
 static DEFINE_SPINLOCK(rio_mmap_lock);
 
@@ -52,6 +56,32 @@ u16 rio_local_get_device_id(struct rio_mport *port)
        return (RIO_GET_DID(port->sys_size, result));
 }
 
+/**
+ * rio_add_device- Adds a RIO device to the device model
+ * @rdev: RIO device
+ *
+ * Adds the RIO device to the global device list and adds the RIO
+ * device to the RIO device list.  Creates the generic sysfs nodes
+ * for an RIO device.
+ */
+int rio_add_device(struct rio_dev *rdev)
+{
+       int err;
+
+       err = device_add(&rdev->dev);
+       if (err)
+               return err;
+
+       spin_lock(&rio_global_list_lock);
+       list_add_tail(&rdev->global_list, &rio_devices);
+       spin_unlock(&rio_global_list_lock);
+
+       rio_create_sysfs_dev_files(rdev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rio_add_device);
+
 /**
  * rio_request_inb_mbox - request inbound mailbox service
  * @mport: RIO master port from which to allocate the mailbox resource
@@ -489,6 +519,7 @@ rio_mport_get_physefb(struct rio_mport *port, int local,
 
        return ext_ftr_ptr;
 }
+EXPORT_SYMBOL_GPL(rio_mport_get_physefb);
 
 /**
  * rio_get_comptag - Begin or continue searching for a RIO device by component tag
@@ -521,6 +552,7 @@ struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
        spin_unlock(&rio_global_list_lock);
        return rdev;
 }
+EXPORT_SYMBOL_GPL(rio_get_comptag);
 
 /**
  * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port.
@@ -545,6 +577,107 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
                                  regval);
        return 0;
 }
+EXPORT_SYMBOL_GPL(rio_set_port_lockout);
+
+/**
+ * rio_switch_init - Sets switch operations for a particular vendor switch
+ * @rdev: RIO device
+ * @do_enum: Enumeration/Discovery mode flag
+ *
+ * Searches the RIO switch ops table for known switch types. If the vid
+ * and did match a switch table entry, then call switch initialization
+ * routine to setup switch-specific routines.
+ */
+void rio_switch_init(struct rio_dev *rdev, int do_enum)
+{
+       struct rio_switch_ops *cur = __start_rio_switch_ops;
+       struct rio_switch_ops *end = __end_rio_switch_ops;
+
+       while (cur < end) {
+               if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
+                       pr_debug("RIO: calling init routine for %s\n",
+                                rio_name(rdev));
+                       cur->init_hook(rdev, do_enum);
+                       break;
+               }
+               cur++;
+       }
+
+       if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
+               pr_debug("RIO: adding STD routing ops for %s\n",
+                       rio_name(rdev));
+               rdev->rswitch->add_entry = rio_std_route_add_entry;
+               rdev->rswitch->get_entry = rio_std_route_get_entry;
+               rdev->rswitch->clr_table = rio_std_route_clr_table;
+       }
+
+       if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
+               printk(KERN_ERR "RIO: missing routing ops for %s\n",
+                      rio_name(rdev));
+}
+EXPORT_SYMBOL_GPL(rio_switch_init);
+
+/**
+ * rio_enable_rx_tx_port - enable input receiver and output transmitter of
+ * given port
+ * @port: Master port associated with the RIO network
+ * @local: local=1 select local port otherwise a far device is reached
+ * @destid: Destination ID of the device to check host bit
+ * @hopcount: Number of hops to reach the target
+ * @port_num: Port (-number on switch) to enable on a far end device
+ *
+ * Returns 0 or 1 from on General Control Command and Status Register
+ * (EXT_PTR+0x3C)
+ */
+int rio_enable_rx_tx_port(struct rio_mport *port,
+                         int local, u16 destid,
+                         u8 hopcount, u8 port_num)
+{
+#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
+       u32 regval;
+       u32 ext_ftr_ptr;
+
+       /*
+       * enable rx input tx output port
+       */
+       pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
+                "%d, port_num = %d)\n", local, destid, hopcount, port_num);
+
+       ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
+
+       if (local) {
+               rio_local_read_config_32(port, ext_ftr_ptr +
+                               RIO_PORT_N_CTL_CSR(0),
+                               &regval);
+       } else {
+               if (rio_mport_read_config_32(port, destid, hopcount,
+               ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
+                       return -EIO;
+       }
+
+       if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
+               /* serial */
+               regval = regval | RIO_PORT_N_CTL_EN_RX_SER
+                               | RIO_PORT_N_CTL_EN_TX_SER;
+       } else {
+               /* parallel */
+               regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
+                               | RIO_PORT_N_CTL_EN_TX_PAR;
+       }
+
+       if (local) {
+               rio_local_write_config_32(port, ext_ftr_ptr +
+                                         RIO_PORT_N_CTL_CSR(0), regval);
+       } else {
+               if (rio_mport_write_config_32(port, destid, hopcount,
+                   ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
+                       return -EIO;
+       }
+#endif
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rio_enable_rx_tx_port);
+
 
 /**
  * rio_chk_dev_route - Validate route to the specified device.
@@ -610,6 +743,7 @@ rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(rio_mport_chk_dev_access);
 
 /**
  * rio_chk_dev_access - Validate access to the specified device.
@@ -941,6 +1075,7 @@ rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
                return RIO_GET_BLOCK_ID(reg_val);
        }
 }
+EXPORT_SYMBOL_GPL(rio_mport_get_efb);
 
 /**
  * rio_mport_get_feature - query for devices' extended features
@@ -997,6 +1132,7 @@ rio_mport_get_feature(struct rio_mport * port, int local, u16 destid,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(rio_mport_get_feature);
 
 /**
  * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did
@@ -1246,6 +1382,95 @@ EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
 
 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
 
+/**
+ * rio_find_mport - find RIO mport by its ID
+ * @mport_id: number (ID) of mport device
+ *
+ * Given a RIO mport number, the desired mport is located
+ * in the global list of mports. If the mport is found, a pointer to its
+ * data structure is returned.  If no mport is found, %NULL is returned.
+ */
+struct rio_mport *rio_find_mport(int mport_id)
+{
+       struct rio_mport *port;
+
+       mutex_lock(&rio_mport_list_lock);
+       list_for_each_entry(port, &rio_mports, node) {
+               if (port->id == mport_id)
+                       goto found;
+       }
+       port = NULL;
+found:
+       mutex_unlock(&rio_mport_list_lock);
+
+       return port;
+}
+
+/**
+ * rio_register_scan - enumeration/discovery method registration interface
+ * @mport_id: mport device ID for which fabric scan routine has to be set
+ *            (RIO_MPORT_ANY = set for all available mports)
+ * @scan_ops: enumeration/discovery control structure
+ *
+ * Assigns enumeration or discovery method to the specified mport device (or all
+ * available mports if RIO_MPORT_ANY is specified).
+ * Returns error if the mport already has an enumerator attached to it.
+ * In case of RIO_MPORT_ANY ignores ports with valid scan routines and returns
+ * an error if was unable to find at least one available mport.
+ */
+int rio_register_scan(int mport_id, struct rio_scan *scan_ops)
+{
+       struct rio_mport *port;
+       int rc = -EBUSY;
+
+       mutex_lock(&rio_mport_list_lock);
+       list_for_each_entry(port, &rio_mports, node) {
+               if (port->id == mport_id || mport_id == RIO_MPORT_ANY) {
+                       if (port->nscan && mport_id == RIO_MPORT_ANY)
+                               continue;
+                       else if (port->nscan)
+                               break;
+
+                       port->nscan = scan_ops;
+                       rc = 0;
+
+                       if (mport_id != RIO_MPORT_ANY)
+                               break;
+               }
+       }
+       mutex_unlock(&rio_mport_list_lock);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(rio_register_scan);
+
+/**
+ * rio_unregister_scan - removes enumeration/discovery method from mport
+ * @mport_id: mport device ID for which fabric scan routine has to be
+ *            unregistered (RIO_MPORT_ANY = set for all available mports)
+ *
+ * Removes enumeration or discovery method assigned to the specified mport
+ * device (or all available mports if RIO_MPORT_ANY is specified).
+ */
+int rio_unregister_scan(int mport_id)
+{
+       struct rio_mport *port;
+
+       mutex_lock(&rio_mport_list_lock);
+       list_for_each_entry(port, &rio_mports, node) {
+               if (port->id == mport_id || mport_id == RIO_MPORT_ANY) {
+                       if (port->nscan)
+                               port->nscan = NULL;
+                       if (mport_id != RIO_MPORT_ANY)
+                               break;
+               }
+       }
+       mutex_unlock(&rio_mport_list_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rio_unregister_scan);
+
 static void rio_fixup_device(struct rio_dev *dev)
 {
 }
@@ -1274,7 +1499,7 @@ static void disc_work_handler(struct work_struct *_work)
        work = container_of(_work, struct rio_disc_work, work);
        pr_debug("RIO: discovery work for mport %d %s\n",
                 work->mport->id, work->mport->name);
-       rio_disc_mport(work->mport);
+       work->mport->nscan->discover(work->mport, 0);
 }
 
 int rio_init_mports(void)
@@ -1290,12 +1515,15 @@ int rio_init_mports(void)
         * First, run enumerations and check if we need to perform discovery
         * on any of the registered mports.
         */
+       mutex_lock(&rio_mport_list_lock);
        list_for_each_entry(port, &rio_mports, node) {
-               if (port->host_deviceid >= 0)
-                       rio_enum_mport(port);
-               else
+               if (port->host_deviceid >= 0) {
+                       if (port->nscan)
+                               port->nscan->enumerate(port, 0);
+               } else
                        n++;
        }
+       mutex_unlock(&rio_mport_list_lock);
 
        if (!n)
                goto no_disc;
@@ -1322,14 +1550,16 @@ int rio_init_mports(void)
        }
 
        n = 0;
+       mutex_lock(&rio_mport_list_lock);
        list_for_each_entry(port, &rio_mports, node) {
-               if (port->host_deviceid < 0) {
+               if (port->host_deviceid < 0 && port->nscan) {
                        work[n].mport = port;
                        INIT_WORK(&work[n].work, disc_work_handler);
                        queue_work(rio_wq, &work[n].work);
                        n++;
                }
        }
+       mutex_unlock(&rio_mport_list_lock);
 
        flush_workqueue(rio_wq);
        pr_debug("RIO: destroy discovery workqueue\n");
@@ -1342,8 +1572,6 @@ int rio_init_mports(void)
        return 0;
 }
 
-device_initcall_sync(rio_init_mports);
-
 static int hdids[RIO_MAX_MPORTS + 1];
 
 static int rio_get_hdid(int index)
@@ -1371,7 +1599,10 @@ int rio_register_mport(struct rio_mport *port)
 
        port->id = next_portid++;
        port->host_deviceid = rio_get_hdid(port->id);
+       port->nscan = NULL;
+       mutex_lock(&rio_mport_list_lock);
        list_add_tail(&port->node, &rio_mports);
+       mutex_unlock(&rio_mport_list_lock);
        return 0;
 }
 
@@ -1386,3 +1617,4 @@ EXPORT_SYMBOL_GPL(rio_request_inb_mbox);
 EXPORT_SYMBOL_GPL(rio_release_inb_mbox);
 EXPORT_SYMBOL_GPL(rio_request_outb_mbox);
 EXPORT_SYMBOL_GPL(rio_release_outb_mbox);
+EXPORT_SYMBOL_GPL(rio_init_mports);
index b1af414f15e60f8e0cee81270321fa2d6ddf5c84..c14f864dea5cbf420fd247ec6d4584c89e932703 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/rio.h>
 
 #define RIO_MAX_CHK_RETRY      3
+#define RIO_MPORT_ANY          (-1)
 
 /* Functions internal to the RIO core code */
 
@@ -27,8 +28,6 @@ extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
 extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
                                    u8 hopcount);
 extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
-extern int rio_enum_mport(struct rio_mport *mport);
-extern int rio_disc_mport(struct rio_mport *mport);
 extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid,
                                   u8 hopcount, u16 table, u16 route_destid,
                                   u8 route_port);
@@ -39,10 +38,18 @@ extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
                                   u8 hopcount, u16 table);
 extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
 extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from);
+extern int rio_add_device(struct rio_dev *rdev);
+extern void rio_switch_init(struct rio_dev *rdev, int do_enum);
+extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid,
+                                u8 hopcount, u8 port_num);
+extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops);
+extern int rio_unregister_scan(int mport_id);
+extern void rio_attach_device(struct rio_dev *rdev);
+extern struct rio_mport *rio_find_mport(int mport_id);
 
 /* Structures internal to the RIO core code */
 extern struct device_attribute rio_dev_attrs[];
-extern spinlock_t rio_global_list_lock;
+extern struct bus_attribute rio_bus_attrs[];
 
 extern struct rio_switch_ops __start_rio_switch_ops[];
 extern struct rio_switch_ops __end_rio_switch_ops[];
index 6e501784158266d990fc6cad8fb6e0ee7733d923..815d6df8bd5f70fab479247c81c6afe7fa746841 100644 (file)
@@ -1539,7 +1539,10 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
 }
 
 /**
- * Balance enable_count of each GPIO and actual GPIO pin control.
+ * regulator_ena_gpio_ctrl - balance enable_count of each GPIO and actual GPIO pin control
+ * @rdev: regulator_dev structure
+ * @enable: enable GPIO at initial use?
+ *
  * GPIO is enabled in case of initial use. (enable_count is 0)
  * GPIO is disabled when it is not shared any more. (enable_count <= 1)
  */
@@ -2702,7 +2705,7 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage);
 /**
  * regulator_set_current_limit - set regulator output current limit
  * @regulator: regulator source
- * @min_uA: Minimuum supported current in uA
+ * @min_uA: Minimum supported current in uA
  * @max_uA: Maximum supported current in uA
  *
  * Sets current sink to the desired output current. This can be set during
index 89bd2faaef8cf627cddcd9f1b7a03c9b4cab5fae..ce89f7848a57f00d58c6f879cb2b3157d28fd303 100644 (file)
 static int power_state_active_cnt; /* will initialize to zero */
 static DEFINE_SPINLOCK(power_state_active_lock);
 
-int power_state_active_get(void)
-{
-       unsigned long flags;
-       int cnt;
-
-       spin_lock_irqsave(&power_state_active_lock, flags);
-       cnt = power_state_active_cnt;
-       spin_unlock_irqrestore(&power_state_active_lock, flags);
-
-       return cnt;
-}
-
 void power_state_active_enable(void)
 {
        unsigned long flags;
@@ -65,6 +53,18 @@ int power_state_active_disable(void)
 
 #ifdef CONFIG_REGULATOR_DEBUG
 
+static int power_state_active_get(void)
+{
+       unsigned long flags;
+       int cnt;
+
+       spin_lock_irqsave(&power_state_active_lock, flags);
+       cnt = power_state_active_cnt;
+       spin_unlock_irqrestore(&power_state_active_lock, flags);
+
+       return cnt;
+}
+
 static struct ux500_regulator_debug {
        struct dentry *dir;
        struct dentry *status_file;
index 92ceed0fc65e780216861143c8ccc25f9c16fa53..3ae44ac12a94c40a9f6fac05df05ad882b2be53b 100644 (file)
@@ -840,7 +840,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                        break;
                }
 
-               if ((id == PALMAS_REG_SMPS6) && (id == PALMAS_REG_SMPS8))
+               if ((id == PALMAS_REG_SMPS6) || (id == PALMAS_REG_SMPS8))
                        ramp_delay_support = true;
 
                if (ramp_delay_support) {
@@ -878,7 +878,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                        pmic->desc[id].vsel_mask = SMPS10_VSEL;
                        pmic->desc[id].enable_reg =
                                        PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
-                                                       PALMAS_SMPS10_STATUS);
+                                                       PALMAS_SMPS10_CTRL);
                        pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
                        pmic->desc[id].min_uV = 3750000;
                        pmic->desc[id].uV_step = 1250000;
index 0c81915b19975278daa15f99dea370bced46333c..b9838130a7b0da42a3bc0f35724c9b53b553096f 100644 (file)
@@ -20,7 +20,6 @@ if RTC_CLASS
 config RTC_HCTOSYS
        bool "Set system time from RTC on startup and resume"
        default y
-       depends on !ALWAYS_USE_PERSISTENT_CLOCK
        help
          If you say yes here, the system time (wall clock) will be set using
          the value read from a specified RTC device. This is useful to avoid
@@ -29,7 +28,6 @@ config RTC_HCTOSYS
 config RTC_SYSTOHC
        bool "Set the RTC time based on NTP synchronization"
        default y
-       depends on !ALWAYS_USE_PERSISTENT_CLOCK
        help
          If you say yes here, the system time (wall clock) will be stored
          in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11
index 48b6612fae7fb59b13804972e2709884f79da92a..d5af7baa48b56c893e449b418d158f7b25448628 100644 (file)
@@ -285,7 +285,7 @@ static int max8998_rtc_probe(struct platform_device *pdev)
                        info->irq, ret);
 
        dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name);
-       if (pdata->rtc_delay) {
+       if (pdata && pdata->rtc_delay) {
                info->lp3974_bug_workaround = true;
                dev_warn(&pdev->dev, "LP3974 with RTC REGERR option."
                                " RTC updates will be extremely slow.\n");
index f5dfb6e5e7d9b927e69e52cd3bce2ae801c1171e..d592e2fe43f7a7fd29be1592080c03d854ed22d3 100644 (file)
@@ -234,11 +234,6 @@ static int __init nuc900_rtc_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "platform_get_resource failed\n");
-               return -ENXIO;
-       }
-
        nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(nuc900_rtc->rtc_reg))
                return PTR_ERR(nuc900_rtc->rtc_reg);
index 4e1bdb832e37cc9f50018343b22628ca8c364e83..b0ba3fc991ea2352de71d1839480f967ef8a52b0 100644 (file)
@@ -347,11 +347,6 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               pr_debug("%s: RTC resource data missing\n", pdev->name);
-               return -ENOENT;
-       }
-
        rtc_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(rtc_base))
                return PTR_ERR(rtc_base);
index 8900ea784817a66d12300f844a162026580e42c0..0f0609b1aa2ccfead83ec98db660a58fac5736de 100644 (file)
@@ -306,7 +306,7 @@ static int pl031_remove(struct amba_device *adev)
        struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
 
        amba_set_drvdata(adev, NULL);
-       free_irq(adev->irq[0], ldata->rtc);
+       free_irq(adev->irq[0], ldata);
        rtc_device_unregister(ldata->rtc);
        iounmap(ldata->base);
        kfree(ldata);
index 14040b22888de32607eef344e31efe574cec5250..0b495e8b8e66958d0781086264b7c8653b3d0eed 100644 (file)
@@ -477,11 +477,6 @@ static int s3c_rtc_probe(struct platform_device *pdev)
        /* get the memory region */
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res == NULL) {
-               dev_err(&pdev->dev, "failed to get memory region resource\n");
-               return -ENOENT;
-       }
-
        s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(s3c_rtc_base))
                return PTR_ERR(s3c_rtc_base);
index a34315d25478131c00ef1b308bb91f4152f25cae..76af92ad5a8ac418fa846e91935a0ae80fdbfc72 100644 (file)
@@ -322,12 +322,6 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev,
-                       "Unable to allocate resources for device.\n");
-               return -EBUSY;
-       }
-
        info->rtc_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(info->rtc_base))
                return PTR_ERR(info->rtc_base);
index 4361d9772c42ad8482e3ddbab2c7edf36294b579..d72a9216ee2e970d73a4758fbf1ccd1ccc366066 100644 (file)
@@ -3440,8 +3440,16 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
                        device->path_data.opm &= ~eventlpm;
                        device->path_data.ppm &= ~eventlpm;
                        device->path_data.npm &= ~eventlpm;
-                       if (oldopm && !device->path_data.opm)
-                               dasd_generic_last_path_gone(device);
+                       if (oldopm && !device->path_data.opm) {
+                               dev_warn(&device->cdev->dev,
+                                        "No verified channel paths remain "
+                                        "for the device\n");
+                               DBF_DEV_EVENT(DBF_WARNING, device,
+                                             "%s", "last verified path gone");
+                               dasd_eer_write(device, NULL, DASD_EER_NOPATH);
+                               dasd_device_set_stop_bits(device,
+                                                         DASD_STOPPED_DC_WAIT);
+                       }
                }
                if (path_event[chp] & PE_PATH_AVAILABLE) {
                        device->path_data.opm &= ~eventlpm;
index 690c3338a8ae5a6e4a9b9a5ee3fbedb0b1ec8c82..464dd29d06c07dcbeb1e6ee96a439dbb037a25a9 100644 (file)
@@ -343,6 +343,7 @@ static int __init xpram_setup_blkdev(void)
                        put_disk(xpram_disks[i]);
                        goto out;
                }
+               queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]);
                blk_queue_make_request(xpram_queues[i], xpram_make_request);
                blk_queue_logical_block_size(xpram_queues[i], 4096);
        }
index 21fabc6d5a9c83bac80fda8d78495c26f9febe64..6c440d4349d4f69506fc2615074d6989d37a7c3b 100644 (file)
@@ -352,12 +352,48 @@ static ssize_t chp_shared_show(struct device *dev,
 
 static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
 
+static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct channel_path *chp = to_channelpath(dev);
+       ssize_t rc;
+
+       mutex_lock(&chp->lock);
+       if (chp->desc_fmt1.flags & 0x10)
+               rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
+       else
+               rc = 0;
+       mutex_unlock(&chp->lock);
+
+       return rc;
+}
+static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
+
+static ssize_t chp_chid_external_show(struct device *dev,
+                                     struct device_attribute *attr, char *buf)
+{
+       struct channel_path *chp = to_channelpath(dev);
+       ssize_t rc;
+
+       mutex_lock(&chp->lock);
+       if (chp->desc_fmt1.flags & 0x10)
+               rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
+       else
+               rc = 0;
+       mutex_unlock(&chp->lock);
+
+       return rc;
+}
+static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
+
 static struct attribute *chp_attrs[] = {
        &dev_attr_status.attr,
        &dev_attr_configure.attr,
        &dev_attr_type.attr,
        &dev_attr_cmg.attr,
        &dev_attr_shared.attr,
+       &dev_attr_chid.attr,
+       &dev_attr_chid_external.attr,
        NULL,
 };
 static struct attribute_group chp_attr_group = {
index 349d5fc471963a20d7f1dd4fec393ef44877d6a6..e7ef2a683b8fbc617368ad855f4f18d6832b3eb6 100644 (file)
@@ -43,7 +43,9 @@ struct channel_path_desc_fmt1 {
        u8 chpid;
        u32:24;
        u8 chpp;
-       u32 unused[3];
+       u32 unused[2];
+       u16 chid;
+       u32:16;
        u16 mdc;
        u16:13;
        u8 r:1;
index db95c547c09d6781d68dca561abe230c5d0f57ce..86af29f53bbebefec7d58ca0c595c3852e6fdf79 100644 (file)
@@ -1353,6 +1353,8 @@ config SCSI_LPFC
        tristate "Emulex LightPulse Fibre Channel Support"
        depends on PCI && SCSI
        select SCSI_FC_ATTRS
+       select GENERIC_CSUM
+       select CRC_T10DIF
        help
           This lpfc driver supports the Emulex LightPulse
           Family of Fibre Channel PCI host adapters.
index 64136c56e7065052bf24d0aa53e500f69f3957d0..33072388ea166a0cce3a2019782b4f64e5a91e33 100644 (file)
@@ -84,7 +84,7 @@ static void asd_set_ddb_type(struct domain_device *dev)
        struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
        int ddb = (int) (unsigned long) dev->lldd_dev;
 
-       if (dev->dev_type == SATA_PM_PORT)
+       if (dev->dev_type == SAS_SATA_PM_PORT)
                asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT);
        else if (dev->tproto)
                asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET);
@@ -116,7 +116,7 @@ void asd_set_dmamode(struct domain_device *dev)
        int ddb = (int) (unsigned long) dev->lldd_dev;
        u32 qdepth = 0;
 
-       if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) {
+       if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) {
                if (ata_id_has_ncq(ata_dev->id))
                        qdepth = ata_id_queue_depth(ata_dev->id);
                asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
@@ -140,8 +140,8 @@ static int asd_init_sata(struct domain_device *dev)
        int ddb = (int) (unsigned long) dev->lldd_dev;
 
        asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
-       if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
-           dev->dev_type == SATA_PM_PORT) {
+       if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
+           dev->dev_type == SAS_SATA_PM_PORT) {
                struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
                        dev->frame_rcvd;
                asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
@@ -174,7 +174,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
        asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask);
        if (dev->port->oob_mode != SATA_OOB_MODE) {
                flags |= OPEN_REQUIRED;
-               if ((dev->dev_type == SATA_DEV) ||
+               if ((dev->dev_type == SAS_SATA_DEV) ||
                    (dev->tproto & SAS_PROTOCOL_STP)) {
                        struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
                        if (rps_resp->frame_type == SMP_RESPONSE &&
@@ -188,8 +188,8 @@ static int asd_init_target_ddb(struct domain_device *dev)
                } else {
                        flags |= CONCURRENT_CONN_SUPP;
                        if (!dev->parent &&
-                           (dev->dev_type == EDGE_DEV ||
-                            dev->dev_type == FANOUT_DEV))
+                           (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                            dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE))
                                asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
                                                       4);
                        else
@@ -198,7 +198,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
                        asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1);
                }
        }
-       if (dev->dev_type == SATA_PM)
+       if (dev->dev_type == SAS_SATA_PM)
                flags |= SATA_MULTIPORT;
        asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
 
@@ -211,7 +211,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
        asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
        asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
 
-       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+       if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
                i = asd_init_sata(dev);
                if (i < 0) {
                        asd_free_ddb(asd_ha, ddb);
@@ -219,7 +219,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
                }
        }
 
-       if (dev->dev_type == SAS_END_DEV) {
+       if (dev->dev_type == SAS_END_DEVICE) {
                struct sas_end_device *rdev = rphy_to_end_device(dev->rphy);
                if (rdev->I_T_nexus_loss_timeout > 0)
                        asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
@@ -328,10 +328,10 @@ int asd_dev_found(struct domain_device *dev)
 
        spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
        switch (dev->dev_type) {
-       case SATA_PM:
+       case SAS_SATA_PM:
                res = asd_init_sata_pm_ddb(dev);
                break;
-       case SATA_PM_PORT:
+       case SAS_SATA_PM_PORT:
                res = asd_init_sata_pm_port_ddb(dev);
                break;
        default:
index 81b736c76fffab7bf40be168ff1ae3fb2388faad..4df867e07b2022df773ce0f7f0115e2b7bf4a5e7 100644 (file)
@@ -74,7 +74,7 @@ static void asd_init_phy_identify(struct asd_phy *phy)
 
        memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
 
-       phy->identify_frame->dev_type = SAS_END_DEV;
+       phy->identify_frame->dev_type = SAS_END_DEVICE;
        if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
                phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
        if (phy->sas_phy.role & PHY_ROLE_TARGET)
index cf9040933da6497ca500454274fe163c50ec324d..d4c35df3d4ae600f9e5d0d7a02ed547f4c8ff905 100644 (file)
@@ -184,7 +184,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
        struct sas_phy *phy = sas_get_local_phy(dev);
        /* Standard mandates link reset for ATA  (type 0) and
         * hard reset for SSP (type 1) */
-       int reset_type = (dev->dev_type == SATA_DEV ||
+       int reset_type = (dev->dev_type == SAS_SATA_DEV ||
                          (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
 
        asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
index f1733dfa3ae24489b44b3f6bee92568589c7ade1..777e7c0bbb4b327d6624a2d771a78fa5033d432c 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
index 5c87768c109c0fa9a8fc0dda6f8bcb6c357ca083..e66aa7c11a8a21e041f8956c6ed0f758401c8420 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -155,6 +155,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
        uint16_t status = 0, addl_status = 0, wrb_num = 0;
        struct be_mcc_wrb *temp_wrb;
        struct be_cmd_req_hdr *ioctl_hdr;
+       struct be_cmd_resp_hdr *ioctl_resp_hdr;
        struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
        if (beiscsi_error(phba))
@@ -204,6 +205,12 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                            ioctl_hdr->subsystem,
                            ioctl_hdr->opcode,
                            status, addl_status);
+
+               if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+                       ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
+                       if (ioctl_resp_hdr->response_length)
+                               goto release_mcc_tag;
+               }
                rc = -EAGAIN;
        }
 
@@ -267,6 +274,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
+       struct be_cmd_resp_hdr *resp_hdr;
 
        be_dws_le_to_cpu(compl, 4);
 
@@ -284,6 +292,11 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
                            hdr->subsystem, hdr->opcode,
                            compl_status, extd_status);
 
+               if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+                       resp_hdr = (struct be_cmd_resp_hdr *) hdr;
+                       if (resp_hdr->response_length)
+                               return 0;
+               }
                return -EBUSY;
        }
        return 0;
@@ -335,30 +348,26 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
 void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
                struct be_async_event_link_state *evt)
 {
-       switch (evt->port_link_status) {
-       case ASYNC_EVENT_LINK_DOWN:
+       if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
+           ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
+            (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
+               phba->state = BE_ADAPTER_LINK_DOWN;
+
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Link Down on Physical Port %d\n",
+                           "BC_%d : Link Down on Port %d\n",
                            evt->physical_port);
 
-               phba->state |= BE_ADAPTER_LINK_DOWN;
                iscsi_host_for_each_session(phba->shost,
                                            be2iscsi_fail_session);
-               break;
-       case ASYNC_EVENT_LINK_UP:
+       } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
+                   ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
+                    (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
                phba->state = BE_ADAPTER_UP;
+
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Link UP on Physical Port %d\n",
-                           evt->physical_port);
-               break;
-       default:
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Unexpected Async Notification %d on"
-                           "Physical Port %d\n",
-                           evt->port_link_status,
+                           "BC_%d : Link UP on Port %d\n",
                            evt->physical_port);
        }
 }
@@ -479,7 +488,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
 {
        void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
-       int wait = 0;
+       uint32_t wait = 0;
        u32 ready;
 
        do {
@@ -527,6 +536,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
        struct be_mcc_compl *compl = &mbox->compl;
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 
+       status = be_mbox_db_ready_wait(ctrl);
+       if (status)
+               return status;
+
        val &= ~MPU_MAILBOX_DB_RDY_MASK;
        val |= MPU_MAILBOX_DB_HI_MASK;
        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -580,6 +593,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
        struct be_mcc_compl *compl = &mbox->compl;
        struct be_ctrl_info *ctrl = &phba->ctrl;
 
+       status = be_mbox_db_ready_wait(ctrl);
+       if (status)
+               return status;
+
        val |= MPU_MAILBOX_DB_HI_MASK;
        /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -732,6 +749,16 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
        return status;
 }
 
+/**
+ * be_cmd_fw_initialize()- Initialize FW
+ * @ctrl: Pointer to function control structure
+ *
+ * Send FW initialize pattern for the function.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ **/
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
 {
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -762,6 +789,47 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
        return status;
 }
 
+/**
+ * be_cmd_fw_uninit()- Uinitialize FW
+ * @ctrl: Pointer to function control structure
+ *
+ * Send FW uninitialize pattern for the function
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ **/
+int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
+{
+       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+       int status;
+       u8 *endian_check;
+
+       spin_lock(&ctrl->mbox_lock);
+       memset(wrb, 0, sizeof(*wrb));
+
+       endian_check = (u8 *) wrb;
+       *endian_check++ = 0xFF;
+       *endian_check++ = 0xAA;
+       *endian_check++ = 0xBB;
+       *endian_check++ = 0xFF;
+       *endian_check++ = 0xFF;
+       *endian_check++ = 0xCC;
+       *endian_check++ = 0xDD;
+       *endian_check = 0xFF;
+
+       be_dws_cpu_to_le(wrb, sizeof(*wrb));
+
+       status = be_mbox_notify(ctrl);
+       if (status)
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BC_%d : be_cmd_fw_uninit Failed\n");
+
+       spin_unlock(&ctrl->mbox_lock);
+       return status;
+}
+
 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                          struct be_queue_info *cq, struct be_queue_info *eq,
                          bool sol_evts, bool no_delay, int coalesce_wm)
@@ -783,20 +851,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                        OPCODE_COMMON_CQ_CREATE, sizeof(*req));
 
        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
-       if (chip_skh_r(ctrl->pdev)) {
-               req->hdr.version = MBX_CMD_VER2;
-               req->page_size = 1;
-               AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
-                             ctxt, coalesce_wm);
-               AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
-                             ctxt, no_delay);
-               AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
-                             __ilog2_u32(cq->len / 256));
-               AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
-               AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
-               AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
-               AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
-       } else {
+       if (is_chip_be2_be3r(phba)) {
                AMAP_SET_BITS(struct amap_cq_context, coalescwm,
                              ctxt, coalesce_wm);
                AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
@@ -809,6 +864,19 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
                AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
                              PCI_FUNC(ctrl->pdev->devfn));
+       } else {
+               req->hdr.version = MBX_CMD_VER2;
+               req->page_size = 1;
+               AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
+                             ctxt, coalesce_wm);
+               AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
+                             ctxt, no_delay);
+               AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
+                             __ilog2_u32(cq->len / 256));
+               AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
+               AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
        }
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -949,6 +1017,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct be_defq_create_req *req = embedded_payload(wrb);
        struct be_dma_mem *q_mem = &dq->dma_mem;
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        void *ctxt = &req->context;
        int status;
 
@@ -961,17 +1030,36 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
                           OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
 
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
-                     1);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
-                     PCI_FUNC(ctrl->pdev->devfn));
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
-                     be_encoded_q_len(length / sizeof(struct phys_addr)));
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
-                     ctxt, entry_size);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
-                     cq->id);
+
+       if (is_chip_be2_be3r(phba)) {
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             rx_pdid, ctxt, 0);
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             rx_pdid_valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             ring_size, ctxt,
+                             be_encoded_q_len(length /
+                             sizeof(struct phys_addr)));
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             default_buffer_size, ctxt, entry_size);
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             cq_id_recv, ctxt, cq->id);
+       } else {
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             rx_pdid, ctxt, 0);
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             rx_pdid_valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             ring_size, ctxt,
+                             be_encoded_q_len(length /
+                             sizeof(struct phys_addr)));
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             default_buffer_size, ctxt, entry_size);
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             cq_id_recv, ctxt, cq->id);
+       }
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 
index 23397d51ac54f5f9550f15b3b4571ed8342ee762..99073086dfe06522621e57b4e8a8e2d968f7c1c7 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -52,6 +52,10 @@ struct be_mcc_wrb {
 
 /* Completion Status */
 #define MCC_STATUS_SUCCESS 0x0
+#define MCC_STATUS_FAILED 0x1
+#define MCC_STATUS_ILLEGAL_REQUEST 0x2
+#define MCC_STATUS_ILLEGAL_FIELD 0x3
+#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
 
 #define CQE_STATUS_COMPL_MASK 0xFFFF
 #define CQE_STATUS_COMPL_SHIFT 0       /* bits 0 - 15 */
@@ -118,7 +122,8 @@ struct be_async_event_trailer {
 
 enum {
        ASYNC_EVENT_LINK_DOWN = 0x0,
-       ASYNC_EVENT_LINK_UP = 0x1
+       ASYNC_EVENT_LINK_UP = 0x1,
+       ASYNC_EVENT_LOGICAL = 0x2
 };
 
 /**
@@ -130,6 +135,9 @@ struct be_async_event_link_state {
        u8 port_link_status;
        u8 port_duplex;
        u8 port_speed;
+#define BEISCSI_PHY_LINK_FAULT_NONE    0x00
+#define BEISCSI_PHY_LINK_FAULT_LOCAL   0x01
+#define BEISCSI_PHY_LINK_FAULT_REMOTE  0x02
        u8 port_fault;
        u8 rsvd0[7];
        struct be_async_event_trailer trailer;
@@ -697,6 +705,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                        uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
 /*ISCSI Functuions */
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
+int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
 
 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
 struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
@@ -751,6 +760,18 @@ struct amap_be_default_pdu_context {
        u8 rsvd4[32];           /* dword 3 */
 } __packed;
 
+struct amap_default_pdu_context_ext {
+       u8 rsvd0[16];   /* dword 0 */
+       u8 ring_size[4];    /* dword 0 */
+       u8 rsvd1[12];   /* dword 0 */
+       u8 rsvd2[22];   /* dword 1 */
+       u8 rx_pdid[9];  /* dword 1 */
+       u8 rx_pdid_valid;   /* dword 1 */
+       u8 default_buffer_size[16]; /* dword 2 */
+       u8 cq_id_recv[16];  /* dword 2 */
+       u8 rsvd3[32];   /* dword 3 */
+} __packed;
+
 struct be_defq_create_req {
        struct be_cmd_req_hdr hdr;
        u16 num_pages;
@@ -896,7 +917,7 @@ struct amap_it_dmsg_cqe_v2 {
  * stack to notify the
  * controller of a posted Work Request Block
  */
-#define DB_WRB_POST_CID_MASK           0x3FF   /* bits 0 - 9 */
+#define DB_WRB_POST_CID_MASK           0xFFFF  /* bits 0 - 16 */
 #define DB_DEF_PDU_WRB_INDEX_MASK      0xFF    /* bits 0 - 9 */
 
 #define DB_DEF_PDU_WRB_INDEX_SHIFT     16
index 9014690fe841f08e8993bcfd2153cba785f6135e..ef36be003f67a49f0d37cf1ed1136c87894bd942 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -161,7 +161,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
                                struct beiscsi_conn *beiscsi_conn,
                                unsigned int cid)
 {
-       if (phba->conn_table[cid]) {
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+       if (phba->conn_table[cri_index]) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : Connection table already occupied. Detected clash\n");
 
@@ -169,9 +171,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
        } else {
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
                            "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
-                           cid, beiscsi_conn);
+                           cri_index, beiscsi_conn);
 
-               phba->conn_table[cid] = beiscsi_conn;
+               phba->conn_table[cri_index] = beiscsi_conn;
        }
        return 0;
 }
@@ -990,9 +992,27 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
 static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
 {
        struct beiscsi_hba *phba = beiscsi_ep->phba;
+       struct beiscsi_conn *beiscsi_conn;
 
        beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
        beiscsi_ep->phba = NULL;
+       phba->ep_array[BE_GET_CRI_FROM_CID
+                      (beiscsi_ep->ep_cid)] = NULL;
+
+       /**
+        * Check if any connection resource allocated by driver
+        * is to be freed.This case occurs when target redirection
+        * or connection retry is done.
+        **/
+       if (!beiscsi_ep->conn)
+               return;
+
+       beiscsi_conn = beiscsi_ep->conn;
+       if (beiscsi_conn->login_in_progress) {
+               beiscsi_free_mgmt_task_handles(beiscsi_conn,
+                                              beiscsi_conn->task);
+               beiscsi_conn->login_in_progress = 0;
+       }
 }
 
 /**
@@ -1009,7 +1029,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
 {
        struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
        struct beiscsi_hba *phba = beiscsi_ep->phba;
-       struct be_mcc_wrb *wrb;
        struct tcp_connect_and_offload_out *ptcpcnct_out;
        struct be_dma_mem nonemb_cmd;
        unsigned int tag;
@@ -1029,15 +1048,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                    "BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
                    beiscsi_ep->ep_cid);
 
-       phba->ep_array[beiscsi_ep->ep_cid -
-                      phba->fw_config.iscsi_cid_start] = ep;
-       if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
-                                 phba->params.cxns_per_ctrl * 2)) {
-
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : Failed in allocate iscsi cid\n");
-               goto free_ep;
-       }
+       phba->ep_array[BE_GET_CRI_FROM_CID
+                      (beiscsi_ep->ep_cid)] = ep;
 
        beiscsi_ep->cid_vld = 0;
        nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -1049,24 +1061,24 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                            "BS_%d : Failed to allocate memory for"
                            " mgmt_open_connection\n");
 
-               beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
+               beiscsi_free_ep(beiscsi_ep);
                return -ENOMEM;
        }
        nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
        memset(nonemb_cmd.va, 0, nonemb_cmd.size);
        tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
-       if (!tag) {
+       if (tag <= 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : mgmt_open_connection Failed for cid=%d\n",
                            beiscsi_ep->ep_cid);
 
-               beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
+               beiscsi_free_ep(beiscsi_ep);
                return -EAGAIN;
        }
 
-       ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+       ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
        if (ret) {
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -1074,10 +1086,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
 
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                            nonemb_cmd.va, nonemb_cmd.dma);
-               goto free_ep;
+               beiscsi_free_ep(beiscsi_ep);
+               return -EBUSY;
        }
 
-       ptcpcnct_out = embedded_payload(wrb);
+       ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
        beiscsi_ep = ep->dd_data;
        beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
        beiscsi_ep->cid_vld = 1;
@@ -1087,10 +1100,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
        pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                            nonemb_cmd.va, nonemb_cmd.dma);
        return 0;
-
-free_ep:
-       beiscsi_free_ep(beiscsi_ep);
-       return -EBUSY;
 }
 
 /**
@@ -1119,6 +1128,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
                return ERR_PTR(ret);
        }
 
+       if (beiscsi_error(phba)) {
+               ret = -EIO;
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BS_%d : The FW state Not Stable!!!\n");
+               return ERR_PTR(ret);
+       }
+
        if (phba->state != BE_ADAPTER_UP) {
                ret = -EBUSY;
                beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
@@ -1201,8 +1217,10 @@ static int beiscsi_close_conn(struct  beiscsi_endpoint *beiscsi_ep, int flag)
 static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
                                      unsigned int cid)
 {
-       if (phba->conn_table[cid])
-               phba->conn_table[cid] = NULL;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+       if (phba->conn_table[cri_index])
+               phba->conn_table[cri_index] = NULL;
        else {
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
                            "BS_%d : Connection table Not occupied.\n");
index 38eab7232159dd7ec412e798a3a521cc8bab862f..31ddc84943989b94eaa4682b91d1a8461c7d066c 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
index 4e2733d2300365761e7dba5df79e3a1b606c67e2..d24a2867bc21cb885506f9d35d0fbe397e27400a 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -153,10 +153,14 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
 
 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
+DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
+DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
 struct device_attribute *beiscsi_attrs[] = {
        &dev_attr_beiscsi_log_enable,
        &dev_attr_beiscsi_drvr_ver,
        &dev_attr_beiscsi_adapter_family,
+       &dev_attr_beiscsi_fw_ver,
+       &dev_attr_beiscsi_active_cid_count,
        NULL,
 };
 
@@ -702,7 +706,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
                                    + BE2_TMFS
                                    + BE2_NOPOUT_REQ));
        phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
-       phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
+       phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;
        phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
        phba->params.num_sge_per_io = BE2_SGE;
        phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
@@ -1032,7 +1036,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba,
 static unsigned int
 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
                          struct beiscsi_hba *phba,
-                         unsigned short cid,
                          struct pdu_base *ppdu,
                          unsigned long pdu_len,
                          void *pbuffer, unsigned long buf_len)
@@ -1144,9 +1147,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
        struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pwrb_context = &phwi_ctrlr->wrb_context[cid];
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        if (pwrb_context->wrb_handles_available >= 2) {
                pwrb_handle = pwrb_context->pwrb_handle_base[
                                            pwrb_context->alloc_index];
@@ -1322,8 +1326,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
        hdr->t2retain = 0;
        hdr->flags = csol_cqe->i_flags;
        hdr->response = csol_cqe->i_resp;
-       hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
-       hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1);
+       hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+       hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+                                    csol_cqe->cmd_wnd - 1);
 
        hdr->dlength[0] = 0;
        hdr->dlength[1] = 0;
@@ -1346,9 +1351,9 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
        hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
        hdr->flags = csol_cqe->i_flags;
        hdr->response = csol_cqe->i_resp;
-       hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
-       hdr->max_cmdsn = (csol_cqe->exp_cmdsn +
-                         csol_cqe->cmd_wnd - 1);
+       hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+       hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+                                    csol_cqe->cmd_wnd - 1);
 
        hdr->itt = io_task->libiscsi_itt;
        __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -1363,35 +1368,29 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
        struct hwi_controller *phwi_ctrlr;
        struct iscsi_task *task;
        struct beiscsi_io_task *io_task;
-       struct iscsi_conn *conn = beiscsi_conn->conn;
-       struct iscsi_session *session = conn->session;
-       uint16_t wrb_index, cid;
+       uint16_t wrb_index, cid, cri_index;
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       if (chip_skh_r(phba->pcidev)) {
-               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+       if (is_chip_be2_be3r(phba)) {
+               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
                                          wrb_idx, psol);
-               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
                                    cid, psol);
        } else {
-               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
                                          wrb_idx, psol);
-               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
                                    cid, psol);
        }
 
-       pwrb_context = &phwi_ctrlr->wrb_context[
-                       cid - phba->fw_config.iscsi_cid_start];
+       cri_index = BE_GET_CRI_FROM_CID(cid);
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
        task = pwrb_handle->pio_handle;
 
        io_task = task->dd_data;
-       spin_lock_bh(&phba->mgmt_sgl_lock);
-       free_mgmt_sgl_handle(phba, io_task->psgl_handle);
-       spin_unlock_bh(&phba->mgmt_sgl_lock);
-       spin_lock_bh(&session->lock);
-       free_wrb_handle(phba, pwrb_context, pwrb_handle);
-       spin_unlock_bh(&session->lock);
+       memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
+       iscsi_put_task(task);
 }
 
 static void
@@ -1406,8 +1405,8 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
        hdr = (struct iscsi_nopin *)task->hdr;
        hdr->flags = csol_cqe->i_flags;
        hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
-       hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn +
-                        csol_cqe->cmd_wnd - 1);
+       hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+                                    csol_cqe->cmd_wnd - 1);
 
        hdr->opcode = ISCSI_OP_NOOP_IN;
        hdr->itt = io_task->libiscsi_itt;
@@ -1418,7 +1417,26 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
                struct sol_cqe *psol,
                struct common_sol_cqe *csol_cqe)
 {
-       if (chip_skh_r(phba->pcidev)) {
+       if (is_chip_be2_be3r(phba)) {
+               csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                   i_exp_cmd_sn, psol);
+               csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                 i_res_cnt, psol);
+               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                 i_cmd_wnd, psol);
+               csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                   wrb_index, psol);
+               csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
+                                             cid, psol);
+               csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                hw_sts, psol);
+               csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                i_resp, psol);
+               csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+                                               i_sts, psol);
+               csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                 i_flags, psol);
+       } else {
                csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                    i_exp_cmd_sn, psol);
                csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
@@ -1429,7 +1447,7 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
                                              cid, psol);
                csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                 hw_sts, psol);
-               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                  i_cmd_wnd, psol);
                if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                  cmd_cmpl, psol))
@@ -1445,25 +1463,6 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
                if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                  o, psol))
                        csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
-       } else {
-               csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                   i_exp_cmd_sn, psol);
-               csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                 i_res_cnt, psol);
-               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                 i_cmd_wnd, psol);
-               csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                   wrb_index, psol);
-               csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
-                                             cid, psol);
-               csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                hw_sts, psol);
-               csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                i_resp, psol);
-               csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
-                                               i_sts, psol);
-               csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                 i_flags, psol);
        }
 }
 
@@ -1480,14 +1479,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
        struct iscsi_conn *conn = beiscsi_conn->conn;
        struct iscsi_session *session = conn->session;
        struct common_sol_cqe csol_cqe = {0};
+       uint16_t cri_index = 0;
 
        phwi_ctrlr = phba->phwi_ctrlr;
 
        /* Copy the elements to a common structure */
        adapter_get_sol_cqe(phba, psol, &csol_cqe);
 
-       pwrb_context = &phwi_ctrlr->wrb_context[
-                       csol_cqe.cid - phba->fw_config.iscsi_cid_start];
+       cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 
        pwrb_handle = pwrb_context->pwrb_handle_basestd[
                      csol_cqe.wrb_index];
@@ -1561,15 +1561,15 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
        unsigned char is_header = 0;
        unsigned int index, dpl;
 
-       if (chip_skh_r(phba->pcidev)) {
-               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+       if (is_chip_be2_be3r(phba)) {
+               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
                                    dpl, pdpdu_cqe);
-               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
                                      index, pdpdu_cqe);
        } else {
-               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
                                    dpl, pdpdu_cqe);
-               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
                                      index, pdpdu_cqe);
        }
 
@@ -1613,8 +1613,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
 
        WARN_ON(!pasync_handle);
 
-       pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
-                                            phba->fw_config.iscsi_cid_start;
+       pasync_handle->cri =
+                       BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
        pasync_handle->is_header = is_header;
        pasync_handle->buffer_len = dpl;
        *pcq_index = index;
@@ -1856,8 +1856,6 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
        }
 
        status = beiscsi_process_async_pdu(beiscsi_conn, phba,
-                                          (beiscsi_conn->beiscsi_conn_cid -
-                                           phba->fw_config.iscsi_cid_start),
                                            phdr, hdr_len, pfirst_buffer,
                                            offset);
 
@@ -2011,6 +2009,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
        unsigned int num_processed = 0;
        unsigned int tot_nump = 0;
        unsigned short code = 0, cid = 0;
+       uint16_t cri_index = 0;
        struct beiscsi_conn *beiscsi_conn;
        struct beiscsi_endpoint *beiscsi_ep;
        struct iscsi_endpoint *ep;
@@ -2028,7 +2027,9 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                         32] & CQE_CODE_MASK);
 
                 /* Get the CID */
-               if (chip_skh_r(phba->pcidev)) {
+               if (is_chip_be2_be3r(phba)) {
+                       cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+               } else {
                        if ((code == DRIVERMSG_NOTIFY) ||
                            (code == UNSOL_HDR_NOTIFY) ||
                            (code == UNSOL_DATA_NOTIFY))
@@ -2038,10 +2039,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                         else
                                 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                     cid, sol);
-                  } else
-                        cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+               }
 
-               ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
+               cri_index = BE_GET_CRI_FROM_CID(cid);
+               ep = phba->ep_array[cri_index];
                beiscsi_ep = ep->dd_data;
                beiscsi_conn = beiscsi_ep->conn;
 
@@ -2191,7 +2192,7 @@ void beiscsi_process_all_cqs(struct work_struct *work)
 
 static int be_iopoll(struct blk_iopoll *iop, int budget)
 {
-       static unsigned int ret;
+       unsigned int ret;
        struct beiscsi_hba *phba;
        struct be_eq_obj *pbe_eq;
 
@@ -2416,11 +2417,11 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
                /* Check for the data_count */
                dsp_value = (task->data_count) ? 1 : 0;
 
-               if (chip_skh_r(phba->pcidev))
-                       AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
+               if (is_chip_be2_be3r(phba))
+                       AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
                                      pwrb, dsp_value);
                else
-                       AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
+                       AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
                                      pwrb, dsp_value);
 
                /* Map addr only if there is data_count */
@@ -2538,8 +2539,9 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
 
 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
 {
-       struct be_mem_descriptor *mem_descr;
        dma_addr_t bus_add;
+       struct hwi_controller *phwi_ctrlr;
+       struct be_mem_descriptor *mem_descr;
        struct mem_array *mem_arr, *mem_arr_orig;
        unsigned int i, j, alloc_size, curr_alloc_size;
 
@@ -2547,9 +2549,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
        if (!phba->phwi_ctrlr)
                return -ENOMEM;
 
+       /* Allocate memory for wrb_context */
+       phwi_ctrlr = phba->phwi_ctrlr;
+       phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
+                                         phba->params.cxns_per_ctrl,
+                                         GFP_KERNEL);
+       if (!phwi_ctrlr->wrb_context)
+               return -ENOMEM;
+
        phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
                                 GFP_KERNEL);
        if (!phba->init_mem) {
+               kfree(phwi_ctrlr->wrb_context);
                kfree(phba->phwi_ctrlr);
                return -ENOMEM;
        }
@@ -2558,6 +2569,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
                               GFP_KERNEL);
        if (!mem_arr_orig) {
                kfree(phba->init_mem);
+               kfree(phwi_ctrlr->wrb_context);
                kfree(phba->phwi_ctrlr);
                return -ENOMEM;
        }
@@ -2628,6 +2640,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
        }
        kfree(mem_arr_orig);
        kfree(phba->init_mem);
+       kfree(phba->phwi_ctrlr->wrb_context);
        kfree(phba->phwi_ctrlr);
        return -ENOMEM;
 }
@@ -2666,6 +2679,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba)
 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
 {
        struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
+       struct hwi_context_memory *phwi_ctxt;
        struct wrb_handle *pwrb_handle = NULL;
        struct hwi_controller *phwi_ctrlr;
        struct hwi_wrb_context *pwrb_context;
@@ -2680,7 +2694,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
        mem_descr_wrb += HWI_MEM_WRB;
        phwi_ctrlr = phba->phwi_ctrlr;
 
-       for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+       /* Allocate memory for WRBQ */
+       phwi_ctxt = phwi_ctrlr->phwi_ctxt;
+       phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
+                                    phba->fw_config.iscsi_cid_count,
+                                    GFP_KERNEL);
+       if (!phwi_ctxt->be_wrbq) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : WRBQ Mem Alloc Failed\n");
+               return -ENOMEM;
+       }
+
+       for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                pwrb_context->pwrb_handle_base =
                                kzalloc(sizeof(struct wrb_handle *) *
@@ -2723,7 +2748,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
                }
        }
        idx = 0;
-       for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+       for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                if (!num_cxn_wrb) {
                        pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
@@ -2752,7 +2777,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
        return -ENOMEM;
 }
 
-static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
+static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 {
        struct hwi_controller *phwi_ctrlr;
        struct hba_parameters *p = &phba->params;
@@ -2770,6 +2795,15 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
        memset(pasync_ctx, 0, sizeof(*pasync_ctx));
 
+       pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) *
+                                         phba->fw_config.iscsi_cid_count,
+                                         GFP_KERNEL);
+       if (!pasync_ctx->async_entry) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
+               return -ENOMEM;
+       }
+
        pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
        pasync_ctx->buffer_size = p->defpdu_hdr_sz;
 
@@ -2934,6 +2968,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        pasync_ctx->async_header.ep_read_ptr = -1;
        pasync_ctx->async_data.host_write_ptr = 0;
        pasync_ctx->async_data.ep_read_ptr = -1;
+
+       return 0;
 }
 
 static int
@@ -3293,6 +3329,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
        void *wrb_vaddr;
        struct be_dma_mem sgl;
        struct be_mem_descriptor *mem_descr;
+       struct hwi_wrb_context *pwrb_context;
        int status;
 
        idx = 0;
@@ -3351,8 +3388,9 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
                        kfree(pwrb_arr);
                        return status;
                }
-               phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
-                                                                  id;
+               pwrb_context = &phwi_ctrlr->wrb_context[i];
+               pwrb_context->cid = phwi_context->be_wrbq[i].id;
+               BE_SET_CID_TO_CRI(i, pwrb_context->cid);
        }
        kfree(pwrb_arr);
        return 0;
@@ -3365,7 +3403,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
        struct hwi_wrb_context *pwrb_context;
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+       for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                kfree(pwrb_context->pwrb_handle_base);
                kfree(pwrb_context->pwrb_handle_basestd);
@@ -3394,6 +3432,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
+       struct hwi_async_pdu_context *pasync_ctx;
        int i, eq_num;
 
        phwi_ctrlr = phba->phwi_ctrlr;
@@ -3403,6 +3442,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
                if (q->created)
                        beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
        }
+       kfree(phwi_context->be_wrbq);
        free_wrb_handles(phba);
 
        q = &phwi_context->be_def_hdrq;
@@ -3430,6 +3470,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
                        beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
        }
        be_mcc_queues_destroy(phba);
+
+       pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
+       kfree(pasync_ctx->async_entry);
+       be_cmd_fw_uninit(ctrl);
 }
 
 static int be_mcc_queues_create(struct beiscsi_hba *phba,
@@ -3607,7 +3651,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
        if (beiscsi_init_wrb_handle(phba))
                return -ENOMEM;
 
-       hwi_init_async_pdu_ctx(phba);
+       if (hwi_init_async_pdu_ctx(phba)) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx failed\n");
+               return -ENOMEM;
+       }
+
        if (hwi_init_port(phba) != 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : hwi_init_controller failed\n");
@@ -3637,6 +3686,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
                mem_descr++;
        }
        kfree(phba->init_mem);
+       kfree(phba->phwi_ctrlr->wrb_context);
        kfree(phba->phwi_ctrlr);
 }
 
@@ -3769,7 +3819,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
 
 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
 {
-       int i, new_cid;
+       int i;
 
        phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
                                  GFP_KERNEL);
@@ -3780,19 +3830,33 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
                return -ENOMEM;
        }
        phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
-                                phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
+                                phba->params.cxns_per_ctrl, GFP_KERNEL);
        if (!phba->ep_array) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : Failed to allocate memory in "
                            "hba_setup_cid_tbls\n");
                kfree(phba->cid_array);
+               phba->cid_array = NULL;
                return -ENOMEM;
        }
-       new_cid = phba->fw_config.iscsi_cid_start;
-       for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
-               phba->cid_array[i] = new_cid;
-               new_cid += 2;
+
+       phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
+                                  phba->params.cxns_per_ctrl, GFP_KERNEL);
+       if (!phba->conn_table) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Failed to allocate memory in"
+                           "hba_setup_cid_tbls\n");
+
+               kfree(phba->cid_array);
+               kfree(phba->ep_array);
+               phba->cid_array = NULL;
+               phba->ep_array = NULL;
+               return -ENOMEM;
        }
+
+       for (i = 0; i < phba->params.cxns_per_ctrl; i++)
+               phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
+
        phba->avlbl_cids = phba->params.cxns_per_ctrl;
        return 0;
 }
@@ -4062,6 +4126,53 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
        kfree(phba->eh_sgl_hndl_base);
        kfree(phba->cid_array);
        kfree(phba->ep_array);
+       kfree(phba->conn_table);
+}
+
+/**
+ * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
+ * @beiscsi_conn: ptr to the conn to be cleaned up
+ * @task: ptr to iscsi_task resource to be freed.
+ *
+ * Free driver mgmt resources binded to CXN.
+ **/
+void
+beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
+                               struct iscsi_task *task)
+{
+       struct beiscsi_io_task *io_task;
+       struct beiscsi_hba *phba = beiscsi_conn->phba;
+       struct hwi_wrb_context *pwrb_context;
+       struct hwi_controller *phwi_ctrlr;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(
+                               beiscsi_conn->beiscsi_conn_cid);
+
+       phwi_ctrlr = phba->phwi_ctrlr;
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+
+       io_task = task->dd_data;
+
+       if (io_task->pwrb_handle) {
+               memset(io_task->pwrb_handle->pwrb, 0,
+                      sizeof(struct iscsi_wrb));
+               free_wrb_handle(phba, pwrb_context,
+                               io_task->pwrb_handle);
+               io_task->pwrb_handle = NULL;
+       }
+
+       if (io_task->psgl_handle) {
+               spin_lock_bh(&phba->mgmt_sgl_lock);
+               free_mgmt_sgl_handle(phba,
+                                    io_task->psgl_handle);
+               io_task->psgl_handle = NULL;
+               spin_unlock_bh(&phba->mgmt_sgl_lock);
+       }
+
+       if (io_task->mtask_addr)
+               pci_unmap_single(phba->pcidev,
+                                io_task->mtask_addr,
+                                io_task->mtask_data_count,
+                                PCI_DMA_TODEVICE);
 }
 
 /**
@@ -4078,10 +4189,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
        struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(
+                            beiscsi_conn->beiscsi_conn_cid);
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
-                       - phba->fw_config.iscsi_cid_start];
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 
        if (io_task->cmd_bhs) {
                pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
@@ -4103,27 +4215,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
                        io_task->psgl_handle = NULL;
                }
        } else {
-               if (!beiscsi_conn->login_in_progress) {
-                       if (io_task->pwrb_handle) {
-                               free_wrb_handle(phba, pwrb_context,
-                                               io_task->pwrb_handle);
-                               io_task->pwrb_handle = NULL;
-                       }
-                       if (io_task->psgl_handle) {
-                               spin_lock(&phba->mgmt_sgl_lock);
-                               free_mgmt_sgl_handle(phba,
-                                                    io_task->psgl_handle);
-                               spin_unlock(&phba->mgmt_sgl_lock);
-                               io_task->psgl_handle = NULL;
-                       }
-                       if (io_task->mtask_addr) {
-                               pci_unmap_single(phba->pcidev,
-                                                io_task->mtask_addr,
-                                                io_task->mtask_data_count,
-                                                PCI_DMA_TODEVICE);
-                               io_task->mtask_addr = 0;
-                       }
-               }
+               if (!beiscsi_conn->login_in_progress)
+                       beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
        }
 }
 
@@ -4146,15 +4239,14 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
        beiscsi_cleanup_task(task);
        spin_unlock_bh(&session->lock);
 
-       pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
-                                      phba->fw_config.iscsi_cid_start));
+       pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
 
        /* Check for the adapter family */
-       if (chip_skh_r(phba->pcidev))
-               beiscsi_offload_cxn_v2(params, pwrb_handle);
-       else
+       if (is_chip_be2_be3r(phba))
                beiscsi_offload_cxn_v0(params, pwrb_handle,
                                       phba->init_mem);
+       else
+               beiscsi_offload_cxn_v2(params, pwrb_handle);
 
        be_dws_le_to_cpu(pwrb_handle->pwrb,
                         sizeof(struct iscsi_target_context_update_wrb));
@@ -4194,6 +4286,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
        itt_t itt;
+       uint16_t cri_index = 0;
        struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
        dma_addr_t paddr;
 
@@ -4223,8 +4316,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                        goto free_hndls;
                }
                io_task->pwrb_handle = alloc_wrb_handle(phba,
-                                       beiscsi_conn->beiscsi_conn_cid -
-                                       phba->fw_config.iscsi_cid_start);
+                                       beiscsi_conn->beiscsi_conn_cid);
                if (!io_task->pwrb_handle) {
                        beiscsi_log(phba, KERN_ERR,
                                    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4236,6 +4328,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
        } else {
                io_task->scsi_cmnd = NULL;
                if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
+                       beiscsi_conn->task = task;
                        if (!beiscsi_conn->login_in_progress) {
                                spin_lock(&phba->mgmt_sgl_lock);
                                io_task->psgl_handle = (struct sgl_handle *)
@@ -4257,8 +4350,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                                                        io_task->psgl_handle;
                                io_task->pwrb_handle =
                                        alloc_wrb_handle(phba,
-                                       beiscsi_conn->beiscsi_conn_cid -
-                                       phba->fw_config.iscsi_cid_start);
+                                       beiscsi_conn->beiscsi_conn_cid);
                                if (!io_task->pwrb_handle) {
                                        beiscsi_log(phba, KERN_ERR,
                                                    BEISCSI_LOG_IO |
@@ -4278,7 +4370,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                                io_task->pwrb_handle =
                                                beiscsi_conn->plogin_wrb_handle;
                        }
-                       beiscsi_conn->task = task;
                } else {
                        spin_lock(&phba->mgmt_sgl_lock);
                        io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
@@ -4295,8 +4386,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                        }
                        io_task->pwrb_handle =
                                        alloc_wrb_handle(phba,
-                                       beiscsi_conn->beiscsi_conn_cid -
-                                       phba->fw_config.iscsi_cid_start);
+                                       beiscsi_conn->beiscsi_conn_cid);
                        if (!io_task->pwrb_handle) {
                                beiscsi_log(phba, KERN_ERR,
                                            BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4324,12 +4414,13 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
 free_mgmt_hndls:
        spin_lock(&phba->mgmt_sgl_lock);
        free_mgmt_sgl_handle(phba, io_task->psgl_handle);
+       io_task->psgl_handle = NULL;
        spin_unlock(&phba->mgmt_sgl_lock);
 free_hndls:
        phwi_ctrlr = phba->phwi_ctrlr;
-       pwrb_context = &phwi_ctrlr->wrb_context[
-                       beiscsi_conn->beiscsi_conn_cid -
-                       phba->fw_config.iscsi_cid_start];
+       cri_index = BE_GET_CRI_FROM_CID(
+       beiscsi_conn->beiscsi_conn_cid);
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        if (io_task->pwrb_handle)
                free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
        io_task->pwrb_handle = NULL;
@@ -4351,7 +4442,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
        unsigned int doorbell = 0;
 
        pwrb = io_task->pwrb_handle->pwrb;
-       memset(pwrb, 0, sizeof(*pwrb));
 
        io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
        io_task->bhs_len = sizeof(struct be_cmd_bhs);
@@ -4465,19 +4555,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
        pwrb = io_task->pwrb_handle->pwrb;
        memset(pwrb, 0, sizeof(*pwrb));
 
-       if (chip_skh_r(phba->pcidev)) {
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
-                             be32_to_cpu(task->cmdsn));
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
-                             io_task->pwrb_handle->wrb_index);
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
-                             io_task->psgl_handle->sgl_index);
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
-                             task->data_count);
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
-                             io_task->pwrb_handle->nxt_wrb_index);
-               pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
-       } else {
+       if (is_chip_be2_be3r(phba)) {
                AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
                              be32_to_cpu(task->cmdsn));
                AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
@@ -4489,6 +4567,18 @@ static int beiscsi_mtask(struct iscsi_task *task)
                AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
                              io_task->pwrb_handle->nxt_wrb_index);
                pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
+       } else {
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+                             be32_to_cpu(task->cmdsn));
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+                             io_task->pwrb_handle->wrb_index);
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+                             io_task->psgl_handle->sgl_index);
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
+                             task->data_count);
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+                             io_task->pwrb_handle->nxt_wrb_index);
+               pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
        }
 
 
@@ -4501,19 +4591,19 @@ static int beiscsi_mtask(struct iscsi_task *task)
        case ISCSI_OP_NOOP_OUT:
                if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
                        ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
-                       if (chip_skh_r(phba->pcidev))
-                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+                       if (is_chip_be2_be3r(phba))
+                               AMAP_SET_BITS(struct amap_iscsi_wrb,
                                              dmsg, pwrb, 1);
                        else
-                               AMAP_SET_BITS(struct amap_iscsi_wrb,
+                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
                                              dmsg, pwrb, 1);
                } else {
                        ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
-                       if (chip_skh_r(phba->pcidev))
-                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+                       if (is_chip_be2_be3r(phba))
+                               AMAP_SET_BITS(struct amap_iscsi_wrb,
                                              dmsg, pwrb, 0);
                        else
-                               AMAP_SET_BITS(struct amap_iscsi_wrb,
+                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
                                              dmsg, pwrb, 0);
                }
                hwi_write_buffer(pwrb, task);
@@ -4540,9 +4630,9 @@ static int beiscsi_mtask(struct iscsi_task *task)
        }
 
        /* Set the task type */
-       io_task->wrb_type = (chip_skh_r(phba->pcidev)) ?
-               AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) :
-               AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb);
+       io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
+               AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
+               AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
 
        doorbell |= cid & DB_WRB_POST_CID_MASK;
        doorbell |= (io_task->pwrb_handle->wrb_index &
@@ -4834,6 +4924,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        case OC_SKH_ID1:
                phba->generation = BE_GEN4;
                phba->iotask_fn = beiscsi_iotask_v2;
+               break;
        default:
                phba->generation = 0;
        }
index 5946577d79d6579f6f4fa4ff8c8eaf156caf9ac5..2c06ef3c02aca49974f6bee2487f09b98c2b1d4d 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -36,7 +36,7 @@
 
 #include "be.h"
 #define DRV_NAME               "be2iscsi"
-#define BUILD_STR              "10.0.272.0"
+#define BUILD_STR              "10.0.467.0"
 #define BE_NAME                        "Emulex OneConnect" \
                                "Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC               BE_NAME " " "Driver"
@@ -66,8 +66,9 @@
 
 #define MAX_CPUS               64
 #define BEISCSI_MAX_NUM_CPUS   7
-#define OC_SKH_MAX_NUM_CPUS    63
+#define OC_SKH_MAX_NUM_CPUS    31
 
+#define BEISCSI_VER_STRLEN 32
 
 #define BEISCSI_SGLIST_ELEMENTS        30
 
@@ -265,7 +266,9 @@ struct invalidate_command_table {
        unsigned short cid;
 } __packed;
 
-#define chip_skh_r(pdev)       (pdev->device == OC_SKH_ID1)
+#define chip_be2(phba)      (phba->generation == BE_GEN2)
+#define chip_be3_r(phba)    (phba->generation == BE_GEN3)
+#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
 struct beiscsi_hba {
        struct hba_parameters params;
        struct hwi_controller *phwi_ctrlr;
@@ -304,10 +307,15 @@ struct beiscsi_hba {
        unsigned short avlbl_cids;
        unsigned short cid_alloc;
        unsigned short cid_free;
-       struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2];
        struct list_head hba_queue;
+#define BE_MAX_SESSION 2048
+#define BE_SET_CID_TO_CRI(cri_index, cid) \
+                         (phba->cid_to_cri_map[cid] = cri_index)
+#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
+       unsigned short cid_to_cri_map[BE_MAX_SESSION];
        unsigned short *cid_array;
        struct iscsi_endpoint **ep_array;
+       struct beiscsi_conn **conn_table;
        struct iscsi_boot_kset *boot_kset;
        struct Scsi_Host *shost;
        struct iscsi_iface *ipv4_iface;
@@ -339,6 +347,7 @@ struct beiscsi_hba {
        struct delayed_work beiscsi_hw_check_task;
 
        u8 mac_address[ETH_ALEN];
+       char fw_ver_str[BEISCSI_VER_STRLEN];
        char wq_name[20];
        struct workqueue_struct *wq;    /* The actuak work queue */
        struct be_ctrl_info ctrl;
@@ -563,7 +572,7 @@ struct hwi_async_pdu_context {
         * This is a varying size list! Do not add anything
         * after this entry!!
         */
-       struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2];
+       struct hwi_async_entry *async_entry;
 };
 
 #define PDUCQE_CODE_MASK       0x0000003F
@@ -749,6 +758,8 @@ void
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
 
 void beiscsi_process_all_cqs(struct work_struct *work);
+void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
+                                    struct iscsi_task *task);
 
 static inline bool beiscsi_error(struct beiscsi_hba *phba)
 {
@@ -933,7 +944,7 @@ struct hwi_controller {
        struct sgl_handle *psgl_handle_base;
        unsigned int wrb_mem_index;
 
-       struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2];
+       struct hwi_wrb_context *wrb_context;
        struct mcc_wrb *pmcc_wrb_base;
        struct be_ring default_pdu_hdr;
        struct be_ring default_pdu_data;
@@ -970,9 +981,7 @@ struct hwi_context_memory {
        struct be_queue_info be_def_hdrq;
        struct be_queue_info be_def_dataq;
 
-       struct be_queue_info be_wrbq[BE2_MAX_SESSIONS];
-       struct be_mcc_wrb_context *pbe_mcc_context;
-
+       struct be_queue_info *be_wrbq;
        struct hwi_async_pdu_context *pasync_ctx;
 };
 
index 55cc9902263dd84bc047fadb429ea9a45ababa50..245a9595a93a131449fd660a7b7d8bd0e9e7f3b5 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -368,6 +368,8 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
                            "BM_%d : phba->fw_config.iscsi_features = %d\n",
                            phba->fw_config.iscsi_features);
+               memcpy(phba->fw_ver_str, resp->params.hba_attribs.
+                      firmware_version_string, BEISCSI_VER_STRLEN);
        } else
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BG_%d :  Failed in mgmt_check_supported_fw\n");
@@ -1259,6 +1261,45 @@ beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr,
        return snprintf(buf, PAGE_SIZE, BE_NAME "\n");
 }
 
+/**
+ * beiscsi_fw_ver_disp()- Display Firmware Version
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Firmware version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
+                    char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str);
+}
+
+/**
+ * beiscsi_active_cid_disp()- Display Sessions Active
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Session Count
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                      (phba->params.cxns_per_ctrl - phba->avlbl_cids));
+}
+
 /**
  * beiscsi_adap_family_disp()- Display adapter family.
  * @dev: ptr to device to get priv structure
index 2e4968add799f7e91ab53e38edd21cd5e74eaa44..04af7e74fe4884a54a773f0471b264412de726d7 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -156,25 +156,25 @@ union invalidate_commands_params {
 } __packed;
 
 struct mgmt_hba_attributes {
-       u8 flashrom_version_string[32];
-       u8 manufacturer_name[32];
+       u8 flashrom_version_string[BEISCSI_VER_STRLEN];
+       u8 manufacturer_name[BEISCSI_VER_STRLEN];
        u32 supported_modes;
        u8 seeprom_version_lo;
        u8 seeprom_version_hi;
        u8 rsvd0[2];
        u32 fw_cmd_data_struct_version;
        u32 ep_fw_data_struct_version;
-       u32 future_reserved[12];
+       u8 ncsi_version_string[12];
        u32 default_extended_timeout;
-       u8 controller_model_number[32];
+       u8 controller_model_number[BEISCSI_VER_STRLEN];
        u8 controller_description[64];
-       u8 controller_serial_number[32];
-       u8 ip_version_string[32];
-       u8 firmware_version_string[32];
-       u8 bios_version_string[32];
-       u8 redboot_version_string[32];
-       u8 driver_version_string[32];
-       u8 fw_on_flash_version_string[32];
+       u8 controller_serial_number[BEISCSI_VER_STRLEN];
+       u8 ip_version_string[BEISCSI_VER_STRLEN];
+       u8 firmware_version_string[BEISCSI_VER_STRLEN];
+       u8 bios_version_string[BEISCSI_VER_STRLEN];
+       u8 redboot_version_string[BEISCSI_VER_STRLEN];
+       u8 driver_version_string[BEISCSI_VER_STRLEN];
+       u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN];
        u32 functionalities_supported;
        u16 max_cdblength;
        u8 asic_revision;
@@ -190,7 +190,8 @@ struct mgmt_hba_attributes {
        u32 firmware_post_status;
        u32 hba_mtu[8];
        u8 iscsi_features;
-       u8 future_u8[3];
+       u8 asic_generation;
+       u8 future_u8[2];
        u32 future_u32[3];
 } __packed;
 
@@ -207,7 +208,7 @@ struct mgmt_controller_attributes {
        u64 unique_identifier;
        u8 netfilters;
        u8 rsvd0[3];
-       u8 future_u32[4];
+       u32 future_u32[4];
 } __packed;
 
 struct be_mgmt_controller_attributes {
@@ -311,6 +312,12 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
 ssize_t beiscsi_drvr_ver_disp(struct device *dev,
                               struct device_attribute *attr, char *buf);
 
+ssize_t beiscsi_fw_ver_disp(struct device *dev,
+                            struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_active_cid_disp(struct device *dev,
+                                struct device_attribute *attr, char *buf);
+
 ssize_t beiscsi_adap_family_disp(struct device *dev,
                                  struct device_attribute *attr, char *buf);
 
index 11596b2c4702ae952c7063ab93494a0d5f9bb59f..08b22a901c2590e3aca12ebcfe4484e143c54460 100644 (file)
@@ -2,7 +2,7 @@
 #define _BNX2FC_H_
 /* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME            "bnx2fc"
-#define BNX2FC_VERSION         "1.0.13"
+#define BNX2FC_VERSION         "1.0.14"
 
 #define PFX                    "bnx2fc: "
 
+#define BCM_CHIP_LEN           16
+
 #define BNX2X_DOORBELL_PCI_BAR         2
 
 #define BNX2FC_MAX_BD_LEN              0xffff
@@ -241,6 +243,8 @@ struct bnx2fc_hba {
        int wait_for_link_down;
        int num_ofld_sess;
        struct list_head vports;
+
+       char chip_num[BCM_CHIP_LEN];
 };
 
 struct bnx2fc_interface {
index bdbbb13b8534c2318464b1b9a803c554af2b17ac..b1c9a4f8caee852f2cbf16b3735a7f7d912bb014 100644 (file)
@@ -3,7 +3,7 @@
  * This file contains helper routines that handle ELS requests
  * and responses.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 7dffec1e57158469328e670df896767227261ae7..69ac55495c1d7479d75f79b13fdb22659875b95c 100644 (file)
@@ -3,7 +3,7 @@
  * cnic modules to create FCoE instances, send/receive non-offloaded
  * FIP/FCoE packets, listen to link events etc.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME                "bnx2fc"
 #define DRV_MODULE_VERSION     BNX2FC_VERSION
-#define DRV_MODULE_RELDATE     "Dec 21, 2012"
+#define DRV_MODULE_RELDATE     "Mar 08, 2013"
 
 
 static char version[] =
@@ -679,6 +679,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
 {
        struct fcoe_port *port = lport_priv(lport);
        struct bnx2fc_interface *interface = port->priv;
+       struct bnx2fc_hba *hba = interface->hba;
        struct Scsi_Host *shost = lport->host;
        int rc = 0;
 
@@ -699,8 +700,9 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
        }
        if (!lport->vport)
                fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
-       sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
-               BNX2FC_NAME, BNX2FC_VERSION,
+       snprintf(fc_host_symbolic_name(lport->host), 256,
+                "%s (Broadcom %s) v%s over %s",
+               BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION,
                interface->netdev->name);
 
        return 0;
@@ -1656,23 +1658,60 @@ static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba)
 static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
 {
        struct cnic_dev *cnic;
+       struct pci_dev *pdev;
 
        if (!hba->cnic) {
                printk(KERN_ERR PFX "cnic is NULL\n");
                return -ENODEV;
        }
        cnic = hba->cnic;
-       hba->pcidev = cnic->pcidev;
-       if (hba->pcidev)
-               pci_dev_get(hba->pcidev);
+       pdev = hba->pcidev = cnic->pcidev;
+       if (!hba->pcidev)
+               return -ENODEV;
 
+       switch (pdev->device) {
+       case PCI_DEVICE_ID_NX2_57710:
+               strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57711:
+               strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57712:
+       case PCI_DEVICE_ID_NX2_57712_MF:
+       case PCI_DEVICE_ID_NX2_57712_VF:
+               strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57800:
+       case PCI_DEVICE_ID_NX2_57800_MF:
+       case PCI_DEVICE_ID_NX2_57800_VF:
+               strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57810:
+       case PCI_DEVICE_ID_NX2_57810_MF:
+       case PCI_DEVICE_ID_NX2_57810_VF:
+               strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57840:
+       case PCI_DEVICE_ID_NX2_57840_MF:
+       case PCI_DEVICE_ID_NX2_57840_VF:
+       case PCI_DEVICE_ID_NX2_57840_2_20:
+       case PCI_DEVICE_ID_NX2_57840_4_10:
+               strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN);
+               break;
+       default:
+               pr_err(PFX "Unknown device id 0x%x\n", pdev->device);
+               break;
+       }
+       pci_dev_get(hba->pcidev);
        return 0;
 }
 
 static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
 {
-       if (hba->pcidev)
+       if (hba->pcidev) {
+               hba->chip_num[0] = '\0';
                pci_dev_put(hba->pcidev);
+       }
        hba->pcidev = NULL;
 }
 
index 50510ffe1bf59444b2dcf28cd8c1345b41b3269a..c0d035a8f8f9e79a08a0b313742c2f7b0009ca6f 100644 (file)
@@ -2,7 +2,7 @@
  * This file contains the code that low level functions that interact
  * with 57712 FCoE firmware.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -126,7 +126,11 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
        fcoe_init3.error_bit_map_lo = 0xffffffff;
        fcoe_init3.error_bit_map_hi = 0xffffffff;
 
-       fcoe_init3.perf_config = 1;
+       /*
+        * enable both cached connection and cached tasks
+        * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
+        */
+       fcoe_init3.perf_config = 3;
 
        kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
        kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
index 723a9a8ba5ee28f48de2e67854205f5749287cee..575142e92d9c5219b10fb286d23a12ee1580db58 100644 (file)
@@ -1,7 +1,7 @@
 /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
  * IO manager and SCSI IO processing.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1270,8 +1270,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
 
        spin_lock_bh(&tgt->tgt_lock);
        io_req->wait_for_comp = 0;
-       if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
-                                   &io_req->req_flags))) {
+       if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+               BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
+               rc = SUCCESS;
+       } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+                                     &io_req->req_flags))) {
                /* Let the scsi-ml try to recover this command */
                printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
                       io_req->xid);
index c57a3bb8a9fbfe1944e796a67df7cf81efec23c8..4d93177dfb530c4446d959bd97fb71cd5dee86d1 100644 (file)
@@ -2,7 +2,7 @@
  * Handles operations such as session offload/upload etc, and manages
  * session resources such as connection id and qp resources.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 0f9c04175b11d4894f34157bf8177d6956c2f9c7..372a67d122d38fc161743c105db229c534d26a89 100644 (file)
@@ -114,7 +114,7 @@ struct csio_lnode_stats {
        uint32_t        n_rnode_match;  /* matched rnode */
        uint32_t        n_dev_loss_tmo; /* Device loss timeout */
        uint32_t        n_fdmi_err;     /* fdmi err */
-       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO];  /* fw events */
+       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO + 1];      /* fw events */
        enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT];   /* State m/c events */
        uint32_t        n_rnode_alloc;  /* rnode allocated */
        uint32_t        n_rnode_free;   /* rnode freed */
index 65940096a80d8426eb438392bf544ecee1d5a1e1..43343422122202c9614d4b76539c5a5d20e2fab6 100644 (file)
@@ -63,7 +63,7 @@ struct csio_rnode_stats {
        uint32_t        n_err_nomem;    /* error nomem */
        uint32_t        n_evt_unexp;    /* unexpected event */
        uint32_t        n_evt_drop;     /* unexpected event */
-       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO];  /* fw events */
+       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO + 1];      /* fw events */
        enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT];  /* State m/c events */
        uint32_t        n_lun_rst;      /* Number of resets of
                                         * of LUNs under this
index 98436c3630359f7e348cb496ed4a719575675e2d..b6d1f92ed33cc17e67ff4cbe1b3c424f5f75bd0e 100644 (file)
@@ -38,7 +38,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.5.0.2"
+#define DRV_VERSION            "1.5.0.22"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
@@ -192,6 +192,18 @@ enum fnic_state {
 
 struct mempool;
 
+enum fnic_evt {
+       FNIC_EVT_START_VLAN_DISC = 1,
+       FNIC_EVT_START_FCF_DISC = 2,
+       FNIC_EVT_MAX,
+};
+
+struct fnic_event {
+       struct list_head list;
+       struct fnic *fnic;
+       enum fnic_evt event;
+};
+
 /* Per-instance private data structure */
 struct fnic {
        struct fc_lport *lport;
@@ -254,6 +266,18 @@ struct fnic {
        struct sk_buff_head frame_queue;
        struct sk_buff_head tx_queue;
 
+       /*** FIP related data members  -- start ***/
+       void (*set_vlan)(struct fnic *, u16 vlan);
+       struct work_struct      fip_frame_work;
+       struct sk_buff_head     fip_frame_queue;
+       struct timer_list       fip_timer;
+       struct list_head        vlans;
+       spinlock_t              vlans_lock;
+
+       struct work_struct      event_work;
+       struct list_head        evlist;
+       /*** FIP related data members  -- end ***/
+
        /* copy work queue cache line section */
        ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
        /* completion queue cache line section */
@@ -278,6 +302,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
 }
 
 extern struct workqueue_struct *fnic_event_queue;
+extern struct workqueue_struct *fnic_fip_queue;
 extern struct device_attribute *fnic_attrs[];
 
 void fnic_clear_intr_mode(struct fnic *fnic);
@@ -289,6 +314,7 @@ int fnic_send(struct fc_lport *, struct fc_frame *);
 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
 void fnic_handle_frame(struct work_struct *work);
 void fnic_handle_link(struct work_struct *work);
+void fnic_handle_event(struct work_struct *work);
 int fnic_rq_cmpl_handler(struct fnic *fnic, int);
 int fnic_alloc_rq_frame(struct vnic_rq *rq);
 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
@@ -321,6 +347,12 @@ void fnic_handle_link_event(struct fnic *fnic);
 
 int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
 
+void fnic_handle_fip_frame(struct work_struct *work);
+void fnic_handle_fip_event(struct fnic *fnic);
+void fnic_fcoe_reset_vlans(struct fnic *fnic);
+void fnic_fcoe_evlist_free(struct fnic *fnic);
+extern void fnic_handle_fip_timer(struct fnic *fnic);
+
 static inline int
 fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
 {
index 483eb9dbe66366377c5dd4f049c15a160ca2a500..006fa92a02df0b4178f2ce88c73dffd8635cb5fc 100644 (file)
 #include <scsi/libfc.h>
 #include "fnic_io.h"
 #include "fnic.h"
+#include "fnic_fip.h"
 #include "cq_enet_desc.h"
 #include "cq_exch_desc.h"
 
+static u8 fcoe_all_fcfs[ETH_ALEN];
+struct workqueue_struct *fnic_fip_queue;
 struct workqueue_struct *fnic_event_queue;
 
 static void fnic_set_eth_mode(struct fnic *);
+static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
+static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
+static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
+static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
 
 void fnic_handle_link(struct work_struct *work)
 {
@@ -69,6 +77,11 @@ void fnic_handle_link(struct work_struct *work)
                                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
                                             "link down\n");
                                fcoe_ctlr_link_down(&fnic->ctlr);
+                               if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+                                       /* start FCoE VLAN discovery */
+                                       fnic_fcoe_send_vlan_req(fnic);
+                                       return;
+                               }
                                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
                                             "link up\n");
                                fcoe_ctlr_link_up(&fnic->ctlr);
@@ -79,6 +92,11 @@ void fnic_handle_link(struct work_struct *work)
        } else if (fnic->link_status) {
                /* DOWN -> UP */
                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+                       /* start FCoE VLAN discovery */
+                       fnic_fcoe_send_vlan_req(fnic);
+                       return;
+               }
                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
                fcoe_ctlr_link_up(&fnic->ctlr);
        } else {
@@ -128,6 +146,441 @@ void fnic_handle_frame(struct work_struct *work)
        }
 }
 
+void fnic_fcoe_evlist_free(struct fnic *fnic)
+{
+       struct fnic_event *fevt = NULL;
+       struct fnic_event *next = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (list_empty(&fnic->evlist)) {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               return;
+       }
+
+       list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
+               list_del(&fevt->list);
+               kfree(fevt);
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+void fnic_handle_event(struct work_struct *work)
+{
+       struct fnic *fnic = container_of(work, struct fnic, event_work);
+       struct fnic_event *fevt = NULL;
+       struct fnic_event *next = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (list_empty(&fnic->evlist)) {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               return;
+       }
+
+       list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
+               if (fnic->stop_rx_link_events) {
+                       list_del(&fevt->list);
+                       kfree(fevt);
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       return;
+               }
+               /*
+                * If we're in a transitional state, just re-queue and return.
+                * The queue will be serviced when we get to a stable state.
+                */
+               if (fnic->state != FNIC_IN_FC_MODE &&
+                   fnic->state != FNIC_IN_ETH_MODE) {
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       return;
+               }
+
+               list_del(&fevt->list);
+               switch (fevt->event) {
+               case FNIC_EVT_START_VLAN_DISC:
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       fnic_fcoe_send_vlan_req(fnic);
+                       spin_lock_irqsave(&fnic->fnic_lock, flags);
+                       break;
+               case FNIC_EVT_START_FCF_DISC:
+                       FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                                 "Start FCF Discovery\n");
+                       fnic_fcoe_start_fcf_disc(fnic);
+                       break;
+               default:
+                       FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                                 "Unknown event 0x%x\n", fevt->event);
+                       break;
+               }
+               kfree(fevt);
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+/**
+ * Check if the Received FIP FLOGI frame is rejected
+ * @fip: The FCoE controller that received the frame
+ * @skb: The received FIP frame
+ *
+ * Returns non-zero if the frame is rejected with unsupported cmd with
+ * insufficient resource els explanation.
+ */
+static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
+                                        struct sk_buff *skb)
+{
+       struct fc_lport *lport = fip->lp;
+       struct fip_header *fiph;
+       struct fc_frame_header *fh = NULL;
+       struct fip_desc *desc;
+       struct fip_encaps *els;
+       enum fip_desc_type els_dtype = 0;
+       u16 op;
+       u8 els_op;
+       u8 sub;
+
+       size_t els_len = 0;
+       size_t rlen;
+       size_t dlen = 0;
+
+       if (skb_linearize(skb))
+               return 0;
+
+       if (skb->len < sizeof(*fiph))
+               return 0;
+
+       fiph = (struct fip_header *)skb->data;
+       op = ntohs(fiph->fip_op);
+       sub = fiph->fip_subcode;
+
+       if (op != FIP_OP_LS)
+               return 0;
+
+       if (sub != FIP_SC_REP)
+               return 0;
+
+       rlen = ntohs(fiph->fip_dl_len) * 4;
+       if (rlen + sizeof(*fiph) > skb->len)
+               return 0;
+
+       desc = (struct fip_desc *)(fiph + 1);
+       dlen = desc->fip_dlen * FIP_BPW;
+
+       if (desc->fip_dtype == FIP_DT_FLOGI) {
+
+               shost_printk(KERN_DEBUG, lport->host,
+                         " FIP TYPE FLOGI: fab name:%llx "
+                         "vfid:%d map:%x\n",
+                         fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
+                         fip->sel_fcf->fc_map);
+               if (dlen < sizeof(*els) + sizeof(*fh) + 1)
+                       return 0;
+
+               els_len = dlen - sizeof(*els);
+               els = (struct fip_encaps *)desc;
+               fh = (struct fc_frame_header *)(els + 1);
+               els_dtype = desc->fip_dtype;
+
+               if (!fh)
+                       return 0;
+
+               /*
+                * ELS command code, reason and explanation should be = Reject,
+                * unsupported command and insufficient resource
+                */
+               els_op = *(u8 *)(fh + 1);
+               if (els_op == ELS_LS_RJT) {
+                       shost_printk(KERN_INFO, lport->host,
+                                 "Flogi Request Rejected by Switch\n");
+                       return 1;
+               }
+               shost_printk(KERN_INFO, lport->host,
+                               "Flogi Request Accepted by Switch\n");
+       }
+       return 0;
+}
+
+static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
+{
+       struct fcoe_ctlr *fip = &fnic->ctlr;
+       struct sk_buff *skb;
+       char *eth_fr;
+       int fr_len;
+       struct fip_vlan *vlan;
+       u64 vlan_tov;
+
+       fnic_fcoe_reset_vlans(fnic);
+       fnic->set_vlan(fnic, 0);
+       FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                 "Sending VLAN request...\n");
+       skb = dev_alloc_skb(sizeof(struct fip_vlan));
+       if (!skb)
+               return;
+
+       fr_len = sizeof(*vlan);
+       eth_fr = (char *)skb->data;
+       vlan = (struct fip_vlan *)eth_fr;
+
+       memset(vlan, 0, sizeof(*vlan));
+       memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+       memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
+       vlan->eth.h_proto = htons(ETH_P_FIP);
+
+       vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+       vlan->fip.fip_op = htons(FIP_OP_VLAN);
+       vlan->fip.fip_subcode = FIP_SC_VL_REQ;
+       vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
+
+       vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
+       vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
+       memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
+
+       vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
+       vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
+       put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
+
+       skb_put(skb, sizeof(*vlan));
+       skb->protocol = htons(ETH_P_FIP);
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       fip->send(fip, skb);
+
+       /* set a timer so that we can retry if there no response */
+       vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
+       mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
+}
+
+static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
+{
+       struct fcoe_ctlr *fip = &fnic->ctlr;
+       struct fip_header *fiph;
+       struct fip_desc *desc;
+       u16 vid;
+       size_t rlen;
+       size_t dlen;
+       struct fcoe_vlan *vlan;
+       u64 sol_time;
+       unsigned long flags;
+
+       FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                 "Received VLAN response...\n");
+
+       fiph = (struct fip_header *) skb->data;
+
+       FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
+                 ntohs(fiph->fip_op), fiph->fip_subcode);
+
+       rlen = ntohs(fiph->fip_dl_len) * 4;
+       fnic_fcoe_reset_vlans(fnic);
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       desc = (struct fip_desc *)(fiph + 1);
+       while (rlen > 0) {
+               dlen = desc->fip_dlen * FIP_BPW;
+               switch (desc->fip_dtype) {
+               case FIP_DT_VLAN:
+                       vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
+                       shost_printk(KERN_INFO, fnic->lport->host,
+                                 "process_vlan_resp: FIP VLAN %d\n", vid);
+                       vlan = kmalloc(sizeof(*vlan),
+                                                       GFP_ATOMIC);
+                       if (!vlan) {
+                               /* retry from timer */
+                               spin_unlock_irqrestore(&fnic->vlans_lock,
+                                                       flags);
+                               goto out;
+                       }
+                       memset(vlan, 0, sizeof(struct fcoe_vlan));
+                       vlan->vid = vid & 0x0fff;
+                       vlan->state = FIP_VLAN_AVAIL;
+                       list_add_tail(&vlan->list, &fnic->vlans);
+                       break;
+               }
+               desc = (struct fip_desc *)((char *)desc + dlen);
+               rlen -= dlen;
+       }
+
+       /* any VLAN descriptors present ? */
+       if (list_empty(&fnic->vlans)) {
+               /* retry from timer */
+               FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                         "No VLAN descriptors in FIP VLAN response\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               goto out;
+       }
+
+       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       fnic->set_vlan(fnic, vlan->vid);
+       vlan->state = FIP_VLAN_SENT; /* sent now */
+       vlan->sol_count++;
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+       /* start the solicitation */
+       fcoe_ctlr_link_up(fip);
+
+       sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+       mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+out:
+       return;
+}
+
+static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
+{
+       unsigned long flags;
+       struct fcoe_vlan *vlan;
+       u64 sol_time;
+
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       fnic->set_vlan(fnic, vlan->vid);
+       vlan->state = FIP_VLAN_SENT; /* sent now */
+       vlan->sol_count = 1;
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+       /* start the solicitation */
+       fcoe_ctlr_link_up(&fnic->ctlr);
+
+       sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+       mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+}
+
+static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
+{
+       unsigned long flags;
+       struct fcoe_vlan *fvlan;
+
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       if (list_empty(&fnic->vlans)) {
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               return -EINVAL;
+       }
+
+       fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       if (fvlan->state == FIP_VLAN_USED) {
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               return 0;
+       }
+
+       if (fvlan->state == FIP_VLAN_SENT) {
+               fvlan->state = FIP_VLAN_USED;
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               return 0;
+       }
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+       return -EINVAL;
+}
+
+static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
+{
+       struct fnic_event *fevt;
+       unsigned long flags;
+
+       fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
+       if (!fevt)
+               return;
+
+       fevt->fnic = fnic;
+       fevt->event = ev;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       list_add_tail(&fevt->list, &fnic->evlist);
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+       schedule_work(&fnic->event_work);
+}
+
+static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
+{
+       struct fip_header *fiph;
+       int ret = 1;
+       u16 op;
+       u8 sub;
+
+       if (!skb || !(skb->data))
+               return -1;
+
+       if (skb_linearize(skb))
+               goto drop;
+
+       fiph = (struct fip_header *)skb->data;
+       op = ntohs(fiph->fip_op);
+       sub = fiph->fip_subcode;
+
+       if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
+               goto drop;
+
+       if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
+               goto drop;
+
+       if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
+               if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
+                       goto drop;
+               /* pass it on to fcoe */
+               ret = 1;
+       } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
+               /* set the vlan as used */
+               fnic_fcoe_process_vlan_resp(fnic, skb);
+               ret = 0;
+       } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
+               /* received CVL request, restart vlan disc */
+               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+               /* pass it on to fcoe */
+               ret = 1;
+       }
+drop:
+       return ret;
+}
+
+void fnic_handle_fip_frame(struct work_struct *work)
+{
+       struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
+       unsigned long flags;
+       struct sk_buff *skb;
+       struct ethhdr *eh;
+
+       while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
+               spin_lock_irqsave(&fnic->fnic_lock, flags);
+               if (fnic->stop_rx_link_events) {
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       dev_kfree_skb(skb);
+                       return;
+               }
+               /*
+                * If we're in a transitional state, just re-queue and return.
+                * The queue will be serviced when we get to a stable state.
+                */
+               if (fnic->state != FNIC_IN_FC_MODE &&
+                   fnic->state != FNIC_IN_ETH_MODE) {
+                       skb_queue_head(&fnic->fip_frame_queue, skb);
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       return;
+               }
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               eh = (struct ethhdr *)skb->data;
+               if (eh->h_proto == htons(ETH_P_FIP)) {
+                       skb_pull(skb, sizeof(*eh));
+                       if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
+                               dev_kfree_skb(skb);
+                               continue;
+                       }
+                       /*
+                        * If there's FLOGI rejects - clear all
+                        * fcf's & restart from scratch
+                        */
+                       if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
+                               shost_printk(KERN_INFO, fnic->lport->host,
+                                         "Trigger a Link down - VLAN Disc\n");
+                               fcoe_ctlr_link_down(&fnic->ctlr);
+                               /* start FCoE VLAN discovery */
+                               fnic_fcoe_send_vlan_req(fnic);
+                               dev_kfree_skb(skb);
+                               continue;
+                       }
+                       fcoe_ctlr_recv(&fnic->ctlr, skb);
+                       continue;
+               }
+       }
+}
+
 /**
  * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  * @fnic:      fnic instance.
@@ -150,8 +603,14 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
                skb_reset_mac_header(skb);
        }
        if (eh->h_proto == htons(ETH_P_FIP)) {
-               skb_pull(skb, sizeof(*eh));
-               fcoe_ctlr_recv(&fnic->ctlr, skb);
+               if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
+                       printk(KERN_ERR "Dropped FIP frame, as firmware "
+                                       "uses non-FIP mode, Enable FIP "
+                                       "using UCSM\n");
+                       goto drop;
+               }
+               skb_queue_tail(&fnic->fip_frame_queue, skb);
+               queue_work(fnic_fip_queue, &fnic->fip_frame_work);
                return 1;               /* let caller know packet was used */
        }
        if (eh->h_proto != htons(ETH_P_FCOE))
@@ -720,3 +1179,104 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
        dev_kfree_skb(fp_skb(fp));
        buf->os_buf = NULL;
 }
+
+void fnic_fcoe_reset_vlans(struct fnic *fnic)
+{
+       unsigned long flags;
+       struct fcoe_vlan *vlan;
+       struct fcoe_vlan *next;
+
+       /*
+        * indicate a link down to fcoe so that all fcf's are free'd
+        * might not be required since we did this before sending vlan
+        * discovery request
+        */
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       if (!list_empty(&fnic->vlans)) {
+               list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
+                       list_del(&vlan->list);
+                       kfree(vlan);
+               }
+       }
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+}
+
+void fnic_handle_fip_timer(struct fnic *fnic)
+{
+       unsigned long flags;
+       struct fcoe_vlan *vlan;
+       u64 sol_time;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (fnic->stop_rx_link_events) {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               return;
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+       if (fnic->ctlr.mode == FIP_ST_NON_FIP)
+               return;
+
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       if (list_empty(&fnic->vlans)) {
+               /* no vlans available, try again */
+               FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                         "Start VLAN Discovery\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+               return;
+       }
+
+       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       shost_printk(KERN_DEBUG, fnic->lport->host,
+                 "fip_timer: vlan %d state %d sol_count %d\n",
+                 vlan->vid, vlan->state, vlan->sol_count);
+       switch (vlan->state) {
+       case FIP_VLAN_USED:
+               FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                         "FIP VLAN is selected for FC transaction\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               break;
+       case FIP_VLAN_FAILED:
+               /* if all vlans are in failed state, restart vlan disc */
+               FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                         "Start VLAN Discovery\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+               break;
+       case FIP_VLAN_SENT:
+               if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
+                       /*
+                        * no response on this vlan, remove  from the list.
+                        * Try the next vlan
+                        */
+                       shost_printk(KERN_INFO, fnic->lport->host,
+                                 "Dequeue this VLAN ID %d from list\n",
+                                 vlan->vid);
+                       list_del(&vlan->list);
+                       kfree(vlan);
+                       vlan = NULL;
+                       if (list_empty(&fnic->vlans)) {
+                               /* we exhausted all vlans, restart vlan disc */
+                               spin_unlock_irqrestore(&fnic->vlans_lock,
+                                                       flags);
+                               shost_printk(KERN_INFO, fnic->lport->host,
+                                         "fip_timer: vlan list empty, "
+                                         "trigger vlan disc\n");
+                               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+                               return;
+                       }
+                       /* check the next vlan */
+                       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
+                                                       list);
+                       fnic->set_vlan(fnic, vlan->vid);
+                       vlan->state = FIP_VLAN_SENT; /* sent now */
+               }
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               vlan->sol_count++;
+               sol_time = jiffies + msecs_to_jiffies
+                                       (FCOE_CTLR_START_DELAY);
+               mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+               break;
+       }
+}
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
new file mode 100644 (file)
index 0000000..87e74c2
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _FNIC_FIP_H_
+#define _FNIC_FIP_H_
+
+
+#define FCOE_CTLR_START_DELAY    2000    /* ms after first adv. to choose FCF */
+#define FCOE_CTLR_FIPVLAN_TOV    2000    /* ms after FIP VLAN disc */
+#define FCOE_CTLR_MAX_SOL        8
+
+#define FINC_MAX_FLOGI_REJECTS   8
+
+/*
+ * FIP_DT_VLAN descriptor.
+ */
+struct fip_vlan_desc {
+       struct fip_desc fd_desc;
+       __be16 fd_vlan;
+} __attribute__((packed));
+
+struct vlan {
+       __be16 vid;
+       __be16 type;
+};
+
+/*
+ * VLAN entry.
+ */
+struct fcoe_vlan {
+       struct list_head list;
+       u16 vid;                /* vlan ID */
+       u16 sol_count;          /* no. of sols sent */
+       u16 state;              /* state */
+};
+
+enum fip_vlan_state {
+       FIP_VLAN_AVAIL  = 0,    /* don't do anything */
+       FIP_VLAN_SENT   = 1,    /* sent */
+       FIP_VLAN_USED   = 2,    /* succeed */
+       FIP_VLAN_FAILED = 3,    /* failed to response */
+};
+
+struct fip_vlan {
+       struct ethhdr eth;
+       struct fip_header fip;
+       struct {
+               struct fip_mac_desc mac;
+               struct fip_wwn_desc wwnn;
+       } desc;
+};
+
+#endif  /* __FINC_FIP_H_ */
index d601ac543c52bd1f24b50a760c240ab2c374f62c..5f09d1814d2685e86b06d9c6d697659d06cade07 100644 (file)
@@ -39,6 +39,7 @@
 #include "vnic_intr.h"
 #include "vnic_stats.h"
 #include "fnic_io.h"
+#include "fnic_fip.h"
 #include "fnic.h"
 
 #define PCI_DEVICE_ID_CISCO_FNIC       0x0045
@@ -292,6 +293,13 @@ static void fnic_notify_timer(unsigned long data)
                  round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
 }
 
+static void fnic_fip_notify_timer(unsigned long data)
+{
+       struct fnic *fnic = (struct fnic *)data;
+
+       fnic_handle_fip_timer(fnic);
+}
+
 static void fnic_notify_timer_start(struct fnic *fnic)
 {
        switch (vnic_dev_get_intr_mode(fnic->vdev)) {
@@ -403,6 +411,12 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
        return fnic->data_src_addr;
 }
 
+static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
+{
+       u16 old_vlan;
+       old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+}
+
 static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct Scsi_Host *host;
@@ -620,7 +634,29 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
                vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
                vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
+               fnic->set_vlan = fnic_set_vlan;
                fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
+               setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
+                                                       (unsigned long)fnic);
+               spin_lock_init(&fnic->vlans_lock);
+               INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
+               INIT_WORK(&fnic->event_work, fnic_handle_event);
+               skb_queue_head_init(&fnic->fip_frame_queue);
+               spin_lock_irqsave(&fnic_list_lock, flags);
+               if (!fnic_fip_queue) {
+                       fnic_fip_queue =
+                               create_singlethread_workqueue("fnic_fip_q");
+                       if (!fnic_fip_queue) {
+                               spin_unlock_irqrestore(&fnic_list_lock, flags);
+                               printk(KERN_ERR PFX "fnic FIP work queue "
+                                                "create failed\n");
+                               err = -ENOMEM;
+                               goto err_out_free_max_pool;
+                       }
+               }
+               spin_unlock_irqrestore(&fnic_list_lock, flags);
+               INIT_LIST_HEAD(&fnic->evlist);
+               INIT_LIST_HEAD(&fnic->vlans);
        } else {
                shost_printk(KERN_INFO, fnic->lport->host,
                             "firmware uses non-FIP mode\n");
@@ -807,6 +843,13 @@ static void fnic_remove(struct pci_dev *pdev)
        skb_queue_purge(&fnic->frame_queue);
        skb_queue_purge(&fnic->tx_queue);
 
+       if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+               del_timer_sync(&fnic->fip_timer);
+               skb_queue_purge(&fnic->fip_frame_queue);
+               fnic_fcoe_reset_vlans(fnic);
+               fnic_fcoe_evlist_free(fnic);
+       }
+
        /*
         * Log off the fabric. This stops all remote ports, dns port,
         * logs off the fabric. This flushes all rport, disc, lport work
@@ -889,8 +932,8 @@ static int __init fnic_init_module(void)
        len = sizeof(struct fnic_sgl_list);
        fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
                ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
-                SLAB_HWCACHE_ALIGN,
-                NULL);
+                 SLAB_HWCACHE_ALIGN,
+                 NULL);
        if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
                printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
                err = -ENOMEM;
@@ -951,6 +994,10 @@ static void __exit fnic_cleanup_module(void)
 {
        pci_unregister_driver(&fnic_driver);
        destroy_workqueue(fnic_event_queue);
+       if (fnic_fip_queue) {
+               flush_workqueue(fnic_fip_queue);
+               destroy_workqueue(fnic_fip_queue);
+       }
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
        kmem_cache_destroy(fnic_io_req_cache);
index b576be734e2e04a342cd031f75c146096bc1c2f5..9795d6f3e1974e47e98f6d28ef80f1e4f282e528 100644 (file)
@@ -584,6 +584,16 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
        return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
 }
 
+u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
+{
+       u64 a0 = new_default_vlan, a1 = 0;
+       int wait = 1000;
+       int old_vlan = 0;
+
+       old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
+       return (u16)old_vlan;
+}
+
 int vnic_dev_link_status(struct vnic_dev *vdev)
 {
        if (vdev->linkstatus)
index f9935a8a5a0932e14b4f896627ebe59567f8a1d9..40d4195f562bad4f40d406d3e28e48a018b6c213 100644 (file)
@@ -148,6 +148,8 @@ int vnic_dev_disable(struct vnic_dev *vdev);
 int vnic_dev_open(struct vnic_dev *vdev, int arg);
 int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
 int vnic_dev_init(struct vnic_dev *vdev, int arg);
+u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev,
+                               u16 new_default_vlan);
 int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
 int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
index 7c9ccbd4134b722dad20643314fa719fdcffb89c..3e2fcbda6aedad54315aac57e07664df171306db 100644 (file)
@@ -196,6 +196,73 @@ enum vnic_devcmd_cmd {
 
        /* undo initialize of virtual link */
        CMD_DEINIT              = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+
+       /* check fw capability of a cmd:
+        * in:  (u32)a0=cmd
+        * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+       CMD_CAPABILITY      = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
+
+       /* persistent binding info
+        * in:  (u64)a0=paddr of arg
+        *      (u32)a1=CMD_PERBI_XXX */
+       CMD_PERBI       = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
+
+       /* Interrupt Assert Register functionality
+        * in: (u16)a0=interrupt number to assert
+        */
+       CMD_IAR         = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+       /* initiate hangreset, like softreset after hang detected */
+       CMD_HANG_RESET      = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+       /* hangreset status:
+        *    out: a0=0 reset complete, a0=1 reset in progress */
+       CMD_HANG_RESET_STATUS   = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+       /*
+        * Set hw ingress packet vlan rewrite mode:
+        * in:  (u32)a0=new vlan rewrite mode
+        * out: (u32)a0=old vlan rewrite mode */
+       CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+       /*
+        * in:  (u16)a0=bdf of target vnic
+        *      (u32)a1=cmd to proxy
+        *      a2-a15=args to cmd in a1
+        * out: (u32)a0=status of proxied cmd
+        *      a1-a15=out args of proxied cmd */
+       CMD_PROXY_BY_BDF =  _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+       /*
+        * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+        * or SR-IOV virtual vnic
+        */
+       CMD_PROXY_BY_INDEX =    _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+       /*
+        * For HPP toggle:
+        * adapter-info-get
+        * in:  (u64)a0=phsical address of buffer passed in from caller.
+        *      (u16)a1=size of buffer specified in a0.
+        * out: (u64)a0=phsical address of buffer passed in from caller.
+        *      (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+        *              0 if no VIF-CONFIG-INFO TLV was ever received. */
+       CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+
+       /*
+        * INT13 API: (u64)a0=paddr to vnic_int13_params struct
+        *            (u32)a1=INT13_CMD_xxx
+        */
+       CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+       /*
+        * Set default vlan:
+        * in: (u16)a0=new default vlan
+        *     (u16)a1=zero for overriding vlan with param a0,
+        *             non-zero for resetting vlan to the default
+        * out: (u16)a0=old default vlan
+        */
+       CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46)
 };
 
 /* flags for CMD_OPEN */
index cc82d0f322b6cb30fd3fdaf1fd5e49665302dece..4e31caa21ddfba379036b6a7b4c085a5f916549f 100644 (file)
@@ -2179,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
                return 0;
        }
 
-       if (vhost->state == IBMVFC_ACTIVE) {
+       if (vhost->logged_in) {
                evt = ibmvfc_get_event(vhost);
                ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
 
@@ -2190,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
                tmf->common.length = sizeof(*tmf);
                tmf->scsi_id = rport->port_id;
                int_to_scsilun(sdev->lun, &tmf->lun);
-               tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+               if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS))
+                       type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
+               if (vhost->state == IBMVFC_ACTIVE)
+                       tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+               else
+                       tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID);
                tmf->cancel_key = (unsigned long)sdev->hostdata;
                tmf->my_cancel_key = (unsigned long)starget->hostdata;
 
@@ -2327,7 +2332,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
        timeout = wait_for_completion_timeout(&evt->comp, timeout);
 
        if (!timeout) {
-               rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+               rc = ibmvfc_cancel_all(sdev, 0);
                if (!rc) {
                        rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
                        if (rc == SUCCESS)
@@ -2383,24 +2388,30 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
  * @cmd:       scsi command to abort
  *
  * Returns:
- *     SUCCESS / FAILED
+ *     SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct ibmvfc_host *vhost = shost_priv(sdev->host);
-       int cancel_rc, abort_rc;
+       int cancel_rc, block_rc;
        int rc = FAILED;
 
        ENTER;
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        ibmvfc_wait_while_resetting(vhost);
-       cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
-       abort_rc = ibmvfc_abort_task_set(sdev);
+       if (block_rc != FAST_IO_FAIL) {
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+               ibmvfc_abort_task_set(sdev);
+       } else
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
 
-       if (!cancel_rc && !abort_rc)
+       if (!cancel_rc)
                rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
 
+       if (block_rc == FAST_IO_FAIL && rc != FAILED)
+               rc = FAST_IO_FAIL;
+
        LEAVE;
        return rc;
 }
@@ -2410,28 +2421,46 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
  * @cmd:       scsi command struct
  *
  * Returns:
- *     SUCCESS / FAILED
+ *     SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct ibmvfc_host *vhost = shost_priv(sdev->host);
-       int cancel_rc, reset_rc;
+       int cancel_rc, block_rc, reset_rc = 0;
        int rc = FAILED;
 
        ENTER;
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        ibmvfc_wait_while_resetting(vhost);
-       cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
-       reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+       if (block_rc != FAST_IO_FAIL) {
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
+               reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+       } else
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
 
        if (!cancel_rc && !reset_rc)
                rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
 
+       if (block_rc == FAST_IO_FAIL && rc != FAILED)
+               rc = FAST_IO_FAIL;
+
        LEAVE;
        return rc;
 }
 
+/**
+ * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
+ * @sdev:      scsi device struct
+ * @data:      return code
+ *
+ **/
+static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
+{
+       unsigned long *rc = data;
+       *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
+}
+
 /**
  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
  * @sdev:      scsi device struct
@@ -2449,26 +2478,33 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
  * @cmd:       scsi command struct
  *
  * Returns:
- *     SUCCESS / FAILED
+ *     SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct ibmvfc_host *vhost = shost_priv(sdev->host);
        struct scsi_target *starget = scsi_target(sdev);
-       int reset_rc;
+       int block_rc;
+       int reset_rc = 0;
        int rc = FAILED;
        unsigned long cancel_rc = 0;
 
        ENTER;
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        ibmvfc_wait_while_resetting(vhost);
-       starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
-       reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+       if (block_rc != FAST_IO_FAIL) {
+               starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
+               reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+       } else
+               starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
 
        if (!cancel_rc && !reset_rc)
                rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
 
+       if (block_rc == FAST_IO_FAIL && rc != FAILED)
+               rc = FAST_IO_FAIL;
+
        LEAVE;
        return rc;
 }
@@ -2480,12 +2516,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
  **/
 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
-       int rc;
+       int rc, block_rc;
        struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
 
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        dev_err(vhost->dev, "Resetting connection due to error recovery\n");
        rc = ibmvfc_issue_fc_host_lip(vhost->host);
+
+       if (block_rc == FAST_IO_FAIL)
+               return FAST_IO_FAIL;
+
        return rc ? FAILED : SUCCESS;
 }
 
@@ -2509,8 +2549,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
                dev_rport = starget_to_rport(scsi_target(sdev));
                if (dev_rport != rport)
                        continue;
-               ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
-               ibmvfc_abort_task_set(sdev);
+               ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
        }
 
        rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
index 3be8af624e6fce888ab2b148c66321a2ce30558c..017a5290e8c12ba5d27c89a390c4ed6490e05000 100644 (file)
@@ -29,8 +29,8 @@
 #include "viosrp.h"
 
 #define IBMVFC_NAME    "ibmvfc"
-#define IBMVFC_DRIVER_VERSION          "1.0.10"
-#define IBMVFC_DRIVER_DATE             "(August 24, 2012)"
+#define IBMVFC_DRIVER_VERSION          "1.0.11"
+#define IBMVFC_DRIVER_DATE             "(April 12, 2013)"
 
 #define IBMVFC_DEFAULT_TIMEOUT 60
 #define IBMVFC_ADISC_CANCEL_TIMEOUT    45
@@ -208,10 +208,10 @@ struct ibmvfc_npiv_login_resp {
        u16 error;
        u32 flags;
 #define IBMVFC_NATIVE_FC               0x01
-#define IBMVFC_CAN_FLUSH_ON_HALT       0x08
        u32 reserved;
        u64 capabilities;
 #define IBMVFC_CAN_FLUSH_ON_HALT       0x08
+#define IBMVFC_CAN_SUPPRESS_ABTS       0x10
        u32 max_cmds;
        u32 scsi_id_sz;
        u64 max_dma_len;
@@ -351,6 +351,7 @@ struct ibmvfc_tmf {
 #define IBMVFC_TMF_LUN_RESET           0x10
 #define IBMVFC_TMF_TGT_RESET           0x20
 #define IBMVFC_TMF_LUA_VALID           0x40
+#define IBMVFC_TMF_SUPPRESS_ABTS       0x80
        u32 cancel_key;
        u32 my_cancel_key;
        u32 pad;
index 2197b57fb2251f541e0bfc0eab626ce670b66504..82a3c1ec8706600473c4daedd33b248d2c94518f 100644 (file)
@@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
        ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 
-       if (!ioa_cfg->in_reset_reload) {
+       if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
                dev_err(&ioa_cfg->pdev->dev,
                        "Adapter being reset as a result of error recovery.\n");
@@ -6421,7 +6421,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
 {
        u32 ioadl_flags = 0;
        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
-       struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+       struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
        struct ipr_ioadl64_desc *last_ioadl64 = NULL;
        int len = qc->nbytes;
        struct scatterlist *sg;
@@ -6441,7 +6441,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
        ioarcb->ioadl_len =
                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
        ioarcb->u.sis64_addr_data.data_ioadl_addr =
-               cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
+               cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
 
        for_each_sg(qc->sg, sg, qc->n_elem, si) {
                ioadl64->flags = cpu_to_be32(ioadl_flags);
@@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
 {
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       int i;
 
        ENTER;
        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
@@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
 
        ioa_cfg->in_reset_reload = 0;
        ioa_cfg->reset_retries = 0;
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               spin_lock(&ioa_cfg->hrrq[i]._lock);
+               ioa_cfg->hrrq[i].ioa_is_dead = 1;
+               spin_unlock(&ioa_cfg->hrrq[i]._lock);
+       }
+       wmb();
+
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        wake_up_all(&ioa_cfg->reset_wait_q);
        LEAVE;
@@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
        if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
                ioa_cfg->sdt_state = ABORT_DUMP;
-       ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
+       ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
        ioa_cfg->in_ioa_bringdown = 1;
        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
                spin_lock(&ioa_cfg->hrrq[i]._lock);
index 21a6ff1ed5c6b1afb49c38d191372bc59533780d..a1fb840596ef08cb129c8301dd81a34d24c73fd5 100644 (file)
@@ -552,7 +552,7 @@ struct ipr_ioarcb_ata_regs {        /* 22 bytes */
        u8 hob_lbam;
        u8 hob_lbah;
        u8 ctl;
-}__attribute__ ((packed, aligned(4)));
+}__attribute__ ((packed, aligned(2)));
 
 struct ipr_ioadl_desc {
        __be32 flags_and_data_len;
index c3aa6c5457b9c386e7237db6201dfb1f8603b83c..96a26f454673cf8b9cad6abd38ee9a28104c6a95 100644 (file)
@@ -1085,7 +1085,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s
        struct isci_host *ihost = idev->owning_port->owning_controller;
        struct domain_device *dev = idev->domain_dev;
 
-       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
+       if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
                sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
        } else if (dev_is_expander(dev)) {
                sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
@@ -1098,7 +1098,7 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm
        struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
        struct domain_device *dev = idev->domain_dev;
 
-       if (dev->dev_type == SAS_END_DEV) {
+       if (dev->dev_type == SAS_END_DEVICE) {
                struct isci_host *ihost = idev->owning_port->owning_controller;
 
                isci_remote_device_not_ready(ihost, idev,
index 7674caae1d88bac82520c65b0cd6004e19db626f..47a013fffae73c1d416b68fc6b81dc5274bf2c46 100644 (file)
@@ -297,7 +297,7 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte
 
 static inline bool dev_is_expander(struct domain_device *dev)
 {
-       return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
+       return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE;
 }
 
 static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
index 9594ab62702b1786d51d7ac80f7e10b365b6ce61..e3e3bcbd5a9fcf4245bf9d28b53c0274ca642c2d 100644 (file)
@@ -2978,7 +2978,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
        /* all unaccelerated request types (non ssp or ncq) handled with
         * substates
         */
-       if (!task && dev->dev_type == SAS_END_DEV) {
+       if (!task && dev->dev_type == SAS_END_DEVICE) {
                state = SCI_REQ_TASK_WAIT_TC_COMP;
        } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
                state = SCI_REQ_SMP_WAIT_RESP;
@@ -3101,7 +3101,7 @@ sci_io_request_construct(struct isci_host *ihost,
        if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
                return SCI_FAILURE_INVALID_REMOTE_DEVICE;
 
-       if (dev->dev_type == SAS_END_DEV)
+       if (dev->dev_type == SAS_END_DEVICE)
                /* pass */;
        else if (dev_is_sata(dev))
                memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
@@ -3125,7 +3125,7 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost,
        /* Build the common part of the request */
        sci_general_request_construct(ihost, idev, ireq);
 
-       if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
+       if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
                set_bit(IREQ_TMF, &ireq->flags);
                memset(ireq->tc, 0, sizeof(struct scu_task_context));
 
index b6f19a1db780f7303abc9796c19553b058c89ada..9bb020ac089cddf6b665e8a52f728c95621ece34 100644 (file)
@@ -250,7 +250,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
        }
 
        /* XXX convert to get this from task->tproto like other drivers */
-       if (dev->dev_type == SAS_END_DEV) {
+       if (dev->dev_type == SAS_END_DEVICE) {
                isci_tmf->proto = SAS_PROTOCOL_SSP;
                status = sci_task_request_construct_ssp(ireq);
                if (status != SCI_SUCCESS)
index bdb81cda84013f0d132aa1972bd409dcb9903f6b..161c98efade9b9f290c04588e4638df0f3c421ac 100644 (file)
@@ -285,14 +285,14 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
        if (phy->attached_tproto & SAS_PROTOCOL_STP)
                dev->tproto = phy->attached_tproto;
        if (phy->attached_sata_dev)
-               dev->tproto |= SATA_DEV;
+               dev->tproto |= SAS_SATA_DEV;
 
-       if (phy->attached_dev_type == SATA_PENDING)
-               dev->dev_type = SATA_PENDING;
+       if (phy->attached_dev_type == SAS_SATA_PENDING)
+               dev->dev_type = SAS_SATA_PENDING;
        else {
                int res;
 
-               dev->dev_type = SATA_DEV;
+               dev->dev_type = SAS_SATA_DEV;
                res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
                                              &dev->sata_dev.rps_resp);
                if (res) {
@@ -314,7 +314,7 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
        int res;
 
        /* we weren't pending, so successfully end the reset sequence now */
-       if (dev->dev_type != SATA_PENDING)
+       if (dev->dev_type != SAS_SATA_PENDING)
                return 1;
 
        /* hmmm, if this succeeds do we need to repost the domain_device to the
@@ -348,9 +348,9 @@ static int smp_ata_check_ready(struct ata_link *link)
                return 0;
 
        switch (ex_phy->attached_dev_type) {
-       case SATA_PENDING:
+       case SAS_SATA_PENDING:
                return 0;
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                if (ex_phy->attached_sata_dev)
                        return sas_ata_clear_pending(dev, ex_phy);
        default:
@@ -631,7 +631,7 @@ static void sas_get_ata_command_set(struct domain_device *dev)
        struct dev_to_host_fis *fis =
                (struct dev_to_host_fis *) dev->frame_rcvd;
 
-       if (dev->dev_type == SATA_PENDING)
+       if (dev->dev_type == SAS_SATA_PENDING)
                return;
 
        if ((fis->sector_count == 1 && /* ATA */
@@ -797,7 +797,7 @@ int sas_discover_sata(struct domain_device *dev)
 {
        int res;
 
-       if (dev->dev_type == SATA_PM)
+       if (dev->dev_type == SAS_SATA_PM)
                return -ENODEV;
 
        sas_get_ata_command_set(dev);
index a0c3003e0c7d2f6b5e1a89147ab29d58b1609aaa..62b58d38ce2e63beda6d1ba4d9876c9af2e51008 100644 (file)
 void sas_init_dev(struct domain_device *dev)
 {
        switch (dev->dev_type) {
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
                break;
-       case EDGE_DEV:
-       case FANOUT_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                INIT_LIST_HEAD(&dev->ex_dev.children);
                mutex_init(&dev->ex_dev.cmd_mutex);
                break;
@@ -93,9 +93,9 @@ static int sas_get_port_device(struct asd_sas_port *port)
                if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
                    fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
                    && (fis->device & ~0x10) == 0)
-                       dev->dev_type = SATA_PM;
+                       dev->dev_type = SAS_SATA_PM;
                else
-                       dev->dev_type = SATA_DEV;
+                       dev->dev_type = SAS_SATA_DEV;
                dev->tproto = SAS_PROTOCOL_SATA;
        } else {
                struct sas_identify_frame *id =
@@ -109,21 +109,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
 
        dev->port = port;
        switch (dev->dev_type) {
-       case SATA_DEV:
+       case SAS_SATA_DEV:
                rc = sas_ata_init(dev);
                if (rc) {
                        rphy = NULL;
                        break;
                }
                /* fall through */
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                rphy = sas_end_device_alloc(port->port);
                break;
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(port->port,
                                          SAS_EDGE_EXPANDER_DEVICE);
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(port->port,
                                          SAS_FANOUT_EXPANDER_DEVICE);
                break;
@@ -156,7 +156,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
        dev->rphy = rphy;
        get_device(&dev->rphy->dev);
 
-       if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
+       if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE)
                list_add_tail(&dev->disco_list_node, &port->disco_list);
        else {
                spin_lock_irq(&port->dev_list_lock);
@@ -315,7 +315,7 @@ void sas_free_device(struct kref *kref)
        dev->phy = NULL;
 
        /* remove the phys and ports, everything else should be gone */
-       if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV)
+       if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                kfree(dev->ex_dev.ex_phy);
 
        if (dev_is_sata(dev) && dev->sata_dev.ap) {
@@ -343,7 +343,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
        spin_unlock_irq(&port->dev_list_lock);
 
        spin_lock_irq(&ha->lock);
-       if (dev->dev_type == SAS_END_DEV &&
+       if (dev->dev_type == SAS_END_DEVICE &&
            !list_empty(&dev->ssp_dev.eh_list_node)) {
                list_del_init(&dev->ssp_dev.eh_list_node);
                ha->eh_active--;
@@ -457,15 +457,15 @@ static void sas_discover_domain(struct work_struct *work)
                    task_pid_nr(current));
 
        switch (dev->dev_type) {
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                error = sas_discover_end_dev(dev);
                break;
-       case EDGE_DEV:
-       case FANOUT_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                error = sas_discover_root_expander(dev);
                break;
-       case SATA_DEV:
-       case SATA_PM:
+       case SAS_SATA_DEV:
+       case SAS_SATA_PM:
 #ifdef CONFIG_SCSI_SAS_ATA
                error = sas_discover_sata(dev);
                break;
index f42b0e15410f8a52e0a570664b04547d459e7aec..446b85110a1fc0a69b07e42bb3ecc7144d76d1ce 100644 (file)
@@ -183,21 +183,21 @@ static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
        }
 }
 
-static enum sas_dev_type to_dev_type(struct discover_resp *dr)
+static enum sas_device_type to_dev_type(struct discover_resp *dr)
 {
        /* This is detecting a failure to transmit initial dev to host
         * FIS as described in section J.5 of sas-2 r16
         */
-       if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev &&
+       if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev &&
            dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
-               return SATA_PENDING;
+               return SAS_SATA_PENDING;
        else
                return dr->attached_dev_type;
 }
 
 static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
 {
-       enum sas_dev_type dev_type;
+       enum sas_device_type dev_type;
        enum sas_linkrate linkrate;
        u8 sas_addr[SAS_ADDR_SIZE];
        struct smp_resp *resp = rsp;
@@ -238,7 +238,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
        /* Handle vacant phy - rest of dr data is not valid so skip it */
        if (phy->phy_state == PHY_VACANT) {
                memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
-               phy->attached_dev_type = NO_DEVICE;
+               phy->attached_dev_type = SAS_PHY_UNUSED;
                if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
                        phy->phy_id = phy_id;
                        goto skip;
@@ -259,7 +259,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
        /* help some expanders that fail to zero sas_address in the 'no
         * device' case
         */
-       if (phy->attached_dev_type == NO_DEVICE ||
+       if (phy->attached_dev_type == SAS_PHY_UNUSED ||
            phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
                memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
        else
@@ -292,13 +292,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
 
  out:
        switch (phy->attached_dev_type) {
-       case SATA_PENDING:
+       case SAS_SATA_PENDING:
                type = "stp pending";
                break;
-       case NO_DEVICE:
+       case SAS_PHY_UNUSED:
                type = "no device";
                break;
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                if (phy->attached_iproto) {
                        if (phy->attached_tproto)
                                type = "host+target";
@@ -311,8 +311,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
                                type = "ssp";
                }
                break;
-       case EDGE_DEV:
-       case FANOUT_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                type = "smp";
                break;
        default:
@@ -833,7 +833,7 @@ static struct domain_device *sas_ex_discover_end_dev(
        } else
 #endif
          if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
-               child->dev_type = SAS_END_DEV;
+               child->dev_type = SAS_END_DEVICE;
                rphy = sas_end_device_alloc(phy->port);
                /* FIXME: error handling */
                if (unlikely(!rphy))
@@ -932,11 +932,11 @@ static struct domain_device *sas_ex_discover_expander(
 
 
        switch (phy->attached_dev_type) {
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(phy->port,
                                          SAS_EDGE_EXPANDER_DEVICE);
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(phy->port,
                                          SAS_FANOUT_EXPANDER_DEVICE);
                break;
@@ -1013,7 +1013,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
                sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
 
-       if (ex_phy->attached_dev_type == NO_DEVICE) {
+       if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) {
                if (ex_phy->routing_attr == DIRECT_ROUTING) {
                        memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
                        sas_configure_routing(dev, ex_phy->attached_sas_addr);
@@ -1022,10 +1022,10 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
                return 0;
 
-       if (ex_phy->attached_dev_type != SAS_END_DEV &&
-           ex_phy->attached_dev_type != FANOUT_DEV &&
-           ex_phy->attached_dev_type != EDGE_DEV &&
-           ex_phy->attached_dev_type != SATA_PENDING) {
+       if (ex_phy->attached_dev_type != SAS_END_DEVICE &&
+           ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
+           ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+           ex_phy->attached_dev_type != SAS_SATA_PENDING) {
                SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
                            "phy 0x%x\n", ex_phy->attached_dev_type,
                            SAS_ADDR(dev->sas_addr),
@@ -1049,11 +1049,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        }
 
        switch (ex_phy->attached_dev_type) {
-       case SAS_END_DEV:
-       case SATA_PENDING:
+       case SAS_END_DEVICE:
+       case SAS_SATA_PENDING:
                child = sas_ex_discover_end_dev(dev, phy_id);
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
                        SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
                                    "attached to ex %016llx phy 0x%x\n",
@@ -1067,7 +1067,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
                        memcpy(dev->port->disc.fanout_sas_addr,
                               ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
                /* fallthrough */
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                child = sas_ex_discover_expander(dev, phy_id);
                break;
        default:
@@ -1111,8 +1111,8 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
                    phy->phy_state == PHY_NOT_PRESENT)
                        continue;
 
-               if ((phy->attached_dev_type == EDGE_DEV ||
-                    phy->attached_dev_type == FANOUT_DEV) &&
+               if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                    phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
                    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
                        memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
@@ -1130,8 +1130,8 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
        u8 sub_addr[8] = {0, };
 
        list_for_each_entry(child, &ex->children, siblings) {
-               if (child->dev_type != EDGE_DEV &&
-                   child->dev_type != FANOUT_DEV)
+               if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+                   child->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
                        continue;
                if (sub_addr[0] == 0) {
                        sas_find_sub_addr(child, sub_addr);
@@ -1208,7 +1208,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
        int i;
        u8  *sub_sas_addr = NULL;
 
-       if (dev->dev_type != EDGE_DEV)
+       if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE)
                return 0;
 
        for (i = 0; i < ex->num_phys; i++) {
@@ -1218,8 +1218,8 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
                    phy->phy_state == PHY_NOT_PRESENT)
                        continue;
 
-               if ((phy->attached_dev_type == FANOUT_DEV ||
-                    phy->attached_dev_type == EDGE_DEV) &&
+               if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
+                    phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) &&
                    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
                        if (!sub_sas_addr)
@@ -1245,8 +1245,8 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
                                                 struct ex_phy *child_phy)
 {
        static const char *ex_type[] = {
-               [EDGE_DEV] = "edge",
-               [FANOUT_DEV] = "fanout",
+               [SAS_EDGE_EXPANDER_DEVICE] = "edge",
+               [SAS_FANOUT_EXPANDER_DEVICE] = "fanout",
        };
        struct domain_device *parent = child->parent;
 
@@ -1321,8 +1321,8 @@ static int sas_check_parent_topology(struct domain_device *child)
        if (!child->parent)
                return 0;
 
-       if (child->parent->dev_type != EDGE_DEV &&
-           child->parent->dev_type != FANOUT_DEV)
+       if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+           child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
                return 0;
 
        parent_ex = &child->parent->ex_dev;
@@ -1341,8 +1341,8 @@ static int sas_check_parent_topology(struct domain_device *child)
                child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
 
                switch (child->parent->dev_type) {
-               case EDGE_DEV:
-                       if (child->dev_type == FANOUT_DEV) {
+               case SAS_EDGE_EXPANDER_DEVICE:
+                       if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                                if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
                                    child_phy->routing_attr != TABLE_ROUTING) {
                                        sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1366,7 +1366,7 @@ static int sas_check_parent_topology(struct domain_device *child)
                                }
                        }
                        break;
-               case FANOUT_DEV:
+               case SAS_FANOUT_EXPANDER_DEVICE:
                        if (parent_phy->routing_attr != TABLE_ROUTING ||
                            child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
                                sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1619,8 +1619,8 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
        struct domain_device *dev;
 
        list_for_each_entry(dev, &port->dev_list, dev_list_node) {
-               if (dev->dev_type == EDGE_DEV ||
-                   dev->dev_type == FANOUT_DEV) {
+               if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                   dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        struct sas_expander_device *ex =
                                rphy_to_expander_device(dev->rphy);
 
@@ -1720,7 +1720,7 @@ static int sas_get_phy_change_count(struct domain_device *dev,
 }
 
 static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
-                                   u8 *sas_addr, enum sas_dev_type *type)
+                                   u8 *sas_addr, enum sas_device_type *type)
 {
        int res;
        struct smp_resp *disc_resp;
@@ -1849,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
                        SAS_DPRINTK("Expander phys DID NOT change\n");
        }
        list_for_each_entry(ch, &ex->children, siblings) {
-               if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
+               if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        res = sas_find_bcast_dev(ch, src_dev);
                        if (*src_dev)
                                return res;
@@ -1866,8 +1866,8 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi
 
        list_for_each_entry_safe(child, n, &ex->children, siblings) {
                set_bit(SAS_DEV_GONE, &child->state);
-               if (child->dev_type == EDGE_DEV ||
-                   child->dev_type == FANOUT_DEV)
+               if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                   child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                        sas_unregister_ex_tree(port, child);
                else
                        sas_unregister_dev(port, child);
@@ -1887,8 +1887,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
                        if (SAS_ADDR(child->sas_addr) ==
                            SAS_ADDR(phy->attached_sas_addr)) {
                                set_bit(SAS_DEV_GONE, &child->state);
-                               if (child->dev_type == EDGE_DEV ||
-                                   child->dev_type == FANOUT_DEV)
+                               if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                                   child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                                        sas_unregister_ex_tree(parent->port, child);
                                else
                                        sas_unregister_dev(parent->port, child);
@@ -1916,8 +1916,8 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root,
        int res = 0;
 
        list_for_each_entry(child, &ex_root->children, siblings) {
-               if (child->dev_type == EDGE_DEV ||
-                   child->dev_type == FANOUT_DEV) {
+               if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                   child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        struct sas_expander_device *ex =
                                rphy_to_expander_device(child->rphy);
 
@@ -1970,8 +1970,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
        list_for_each_entry(child, &dev->ex_dev.children, siblings) {
                if (SAS_ADDR(child->sas_addr) ==
                    SAS_ADDR(ex_phy->attached_sas_addr)) {
-                       if (child->dev_type == EDGE_DEV ||
-                           child->dev_type == FANOUT_DEV)
+                       if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                           child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                                res = sas_discover_bfs_by_root(child);
                        break;
                }
@@ -1979,16 +1979,16 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
        return res;
 }
 
-static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old)
+static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old)
 {
        if (old == new)
                return true;
 
        /* treat device directed resets as flutter, if we went
-        * SAS_END_DEV to SATA_PENDING the link needs recovery
+        * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery
         */
-       if ((old == SATA_PENDING && new == SAS_END_DEV) ||
-           (old == SAS_END_DEV && new == SATA_PENDING))
+       if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) ||
+           (old == SAS_END_DEVICE && new == SAS_SATA_PENDING))
                return true;
 
        return false;
@@ -1998,7 +1998,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
 {
        struct expander_device *ex = &dev->ex_dev;
        struct ex_phy *phy = &ex->ex_phy[phy_id];
-       enum sas_dev_type type = NO_DEVICE;
+       enum sas_device_type type = SAS_PHY_UNUSED;
        u8 sas_addr[8];
        int res;
 
@@ -2032,7 +2032,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
 
                sas_ex_phy_discover(dev, phy_id);
 
-               if (ata_dev && phy->attached_dev_type == SATA_PENDING)
+               if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
                        action = ", needs recovery";
                SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
                            SAS_ADDR(dev->sas_addr), phy_id, action);
index 1de67964e5a1e39ffa10151c79489078826e2536..7e7ba83f0a2139c565f692fa93d0b3f5954abcce 100644 (file)
@@ -131,16 +131,16 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
        rphy->identify.initiator_port_protocols = dev->iproto;
        rphy->identify.target_port_protocols = dev->tproto;
        switch (dev->dev_type) {
-       case SATA_DEV:
+       case SAS_SATA_DEV:
                /* FIXME: need sata device type */
-       case SAS_END_DEV:
-       case SATA_PENDING:
+       case SAS_END_DEVICE:
+       case SAS_SATA_PENDING:
                rphy->identify.device_type = SAS_END_DEVICE;
                break;
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE;
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE;
                break;
        default:
index 1398b714c01836ee3789199cf3cd627ac4fa4021..d3c5297c6c89e242d845d16142f7a309ed4ef773 100644 (file)
@@ -69,7 +69,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
                        continue;
                }
 
-               if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
+               if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        dev->ex_dev.ex_change_count = -1;
                        for (i = 0; i < dev->ex_dev.num_phys; i++) {
                                struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
index 7706c99ec8bbb1e8c70b051113f789cb6f97c1dd..bcc56cac4fd8f358d53084b598c167d060a82c19 100644 (file)
@@ -46,10 +46,15 @@ struct lpfc_sli2_slim;
 #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128      /* sg element count per scsi
                cmnd for menlo needs nearly twice as for firmware
                downloads using bsg */
-#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
+
+#define LPFC_MIN_SG_SLI4_BUF_SZ        0x800   /* based on LPFC_DEFAULT_SG_SEG_CNT */
+#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SG_SEG_CNT_DIF 512    /* sg element count per scsi cmnd  */
 #define LPFC_MAX_SG_SEG_CNT    4096    /* sg element count per scsi cmnd */
+#define LPFC_MAX_SGL_SEG_CNT   512     /* SGL element count per scsi cmnd */
+#define LPFC_MAX_BPL_SEG_CNT   4096    /* BPL element count per scsi cmnd */
+
 #define LPFC_MAX_SGE_SIZE       0x80000000 /* Maximum data allowed in a SGE */
-#define LPFC_MAX_PROT_SG_SEG_CNT 4096  /* prot sg element count per scsi cmd*/
 #define LPFC_IOCB_LIST_CNT     2250    /* list of IOCBs for fast-path usage. */
 #define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
 #define LPFC_VNAME_LEN         100     /* vport symbolic name length */
@@ -66,8 +71,10 @@ struct lpfc_sli2_slim;
  * queue depths when there are driver resource error or Firmware
  * resource error.
  */
-#define QUEUE_RAMP_DOWN_INTERVAL       (1 * HZ)   /* 1 Second */
-#define QUEUE_RAMP_UP_INTERVAL         (300 * HZ) /* 5 minutes */
+/* 1 Second */
+#define QUEUE_RAMP_DOWN_INTERVAL       (msecs_to_jiffies(1000 * 1))
+/* 5 minutes */
+#define QUEUE_RAMP_UP_INTERVAL         (msecs_to_jiffies(1000 * 300))
 
 /* Number of exchanges reserved for discovery to complete */
 #define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -671,6 +678,7 @@ struct lpfc_hba {
        uint32_t lmt;
 
        uint32_t fc_topology;   /* link topology, from LINK INIT */
+       uint32_t fc_topology_changed;   /* link topology, from LINK INIT */
 
        struct lpfc_stats fc_stat;
 
@@ -701,9 +709,11 @@ struct lpfc_hba {
        uint32_t cfg_poll_tmo;
        uint32_t cfg_use_msi;
        uint32_t cfg_fcp_imax;
+       uint32_t cfg_fcp_cpu_map;
        uint32_t cfg_fcp_wq_count;
        uint32_t cfg_fcp_eq_count;
        uint32_t cfg_fcp_io_channel;
+       uint32_t cfg_total_seg_cnt;
        uint32_t cfg_sg_seg_cnt;
        uint32_t cfg_prot_sg_seg_cnt;
        uint32_t cfg_sg_dma_buf_size;
@@ -804,8 +814,10 @@ struct lpfc_hba {
        uint64_t bg_reftag_err_cnt;
 
        /* fastpath list. */
-       spinlock_t scsi_buf_list_lock;
-       struct list_head lpfc_scsi_buf_list;
+       spinlock_t scsi_buf_list_get_lock;  /* SCSI buf alloc list lock */
+       spinlock_t scsi_buf_list_put_lock;  /* SCSI buf free list lock */
+       struct list_head lpfc_scsi_buf_list_get;
+       struct list_head lpfc_scsi_buf_list_put;
        uint32_t total_scsi_bufs;
        struct list_head lpfc_iocb_list;
        uint32_t total_iocbq_bufs;
index 9290713af253fd740454bbee6d2b6807f64f4b93..3c5625b8b1f4e83814f878c590f27377f4aaecbf 100644 (file)
@@ -674,6 +674,9 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
        int i;
        int rc;
 
+       if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+               return 0;
+
        init_completion(&online_compl);
        rc = lpfc_workq_post_event(phba, &status, &online_compl,
                              LPFC_EVT_OFFLINE_PREP);
@@ -741,7 +744,8 @@ lpfc_selective_reset(struct lpfc_hba *phba)
        int status = 0;
        int rc;
 
-       if (!phba->cfg_enable_hba_reset)
+       if ((!phba->cfg_enable_hba_reset) ||
+           (phba->pport->fc_flag & FC_OFFLINE_MODE))
                return -EACCES;
 
        status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -895,6 +899,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
                pci_disable_sriov(pdev);
                phba->cfg_sriov_nr_virtfn = 0;
        }
+
        status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
 
        if (status != 0)
@@ -2801,6 +2806,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
                lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                        "3054 lpfc_topology changed from %d to %d\n",
                        prev_val, val);
+               if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
+                       phba->fc_topology_changed = 1;
                err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
                if (err) {
                        phba->cfg_topology = prev_val;
@@ -3792,6 +3799,141 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
 static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
                   lpfc_fcp_imax_show, lpfc_fcp_imax_store);
 
+/**
+ * lpfc_state_show - Display current driver CPU affinity
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains text describing the state of the link.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
+                     char *buf)
+{
+       struct Scsi_Host  *shost = class_to_shost(dev);
+       struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
+       struct lpfc_vector_map_info *cpup;
+       int  idx, len = 0;
+
+       if ((phba->sli_rev != LPFC_SLI_REV4) ||
+           (phba->intr_type != MSIX))
+               return len;
+
+       switch (phba->cfg_fcp_cpu_map) {
+       case 0:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "fcp_cpu_map: No mapping (%d)\n",
+                               phba->cfg_fcp_cpu_map);
+               return len;
+       case 1:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "fcp_cpu_map: HBA centric mapping (%d): "
+                               "%d online CPUs\n",
+                               phba->cfg_fcp_cpu_map,
+                               phba->sli4_hba.num_online_cpu);
+               break;
+       case 2:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "fcp_cpu_map: Driver centric mapping (%d): "
+                               "%d online CPUs\n",
+                               phba->cfg_fcp_cpu_map,
+                               phba->sli4_hba.num_online_cpu);
+               break;
+       }
+
+       cpup = phba->sli4_hba.cpu_map;
+       for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+               if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
+                       len += snprintf(buf + len, PAGE_SIZE-len,
+                                       "CPU %02d io_chan %02d "
+                                       "physid %d coreid %d\n",
+                                       idx, cpup->channel_id, cpup->phys_id,
+                                       cpup->core_id);
+               else
+                       len += snprintf(buf + len, PAGE_SIZE-len,
+                                       "CPU %02d io_chan %02d "
+                                       "physid %d coreid %d IRQ %d\n",
+                                       idx, cpup->channel_id, cpup->phys_id,
+                                       cpup->core_id, cpup->irq);
+
+               cpup++;
+       }
+       return len;
+}
+
+/**
+ * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL  - Not implemented yet.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
+                      const char *buf, size_t count)
+{
+       int status = -EINVAL;
+       return status;
+}
+
+/*
+# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
+# for the HBA.
+#
+# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
+#      0 - Do not affinitze IRQ vectors
+#      1 - Affintize HBA vectors with respect to each HBA
+#          (start with CPU0 for each HBA)
+#      2 - Affintize HBA vectors with respect to the entire driver
+#          (round robin thru all CPUs across all HBAs)
+*/
+static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_fcp_cpu_map,
+                "Defines how to map CPUs to IRQ vectors per HBA");
+
+/**
+ * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0-2], then affinitze the adapter's
+ * MSIX vectors.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
+{
+       if (phba->sli_rev != LPFC_SLI_REV4) {
+               phba->cfg_fcp_cpu_map = 0;
+               return 0;
+       }
+
+       if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
+               phba->cfg_fcp_cpu_map = val;
+               return 0;
+       }
+
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "3326 fcp_cpu_map: %d out of range, using default\n",
+                       val);
+       phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+
+       return 0;
+}
+
+static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
+                  lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
+
 /*
 # lpfc_fcp_class:  Determines FC class to use for the FCP protocol.
 # Value range is [2,3]. Default value is 3.
@@ -4009,12 +4151,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
 #       0  = disabled (default)
 #       1  = enabled
 # Value range is [0,1]. Default value is 0.
+#
+# This feature in under investigation and may be supported in the future.
 */
 unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
 
-module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
-MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
-
 /*
 # lpfc_prot_mask: i
 #      - Bit mask of host protection capabilities used to register with the
@@ -4071,16 +4212,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery,
 
 /*
  * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
- * This value can be set to values between 64 and 256. The default value is
+ * This value can be set to values between 64 and 4096. The default value is
  * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
  * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ * Because of the additional overhead involved in setting up T10-DIF,
+ * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
+ * and will be limited to 512 if BlockGuard is enabled under SLI3.
  */
 LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
            LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
 
-LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT,
-               LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT,
-               "Max Protection Scatter Gather Segment Count");
+/*
+ * This parameter will be depricated, the driver cannot limit the
+ * protection data s/g list.
+ */
+LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
+           LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
+           "Max Protection Scatter Gather Segment Count");
 
 struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_bg_info,
@@ -4141,6 +4289,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_poll_tmo,
        &dev_attr_lpfc_use_msi,
        &dev_attr_lpfc_fcp_imax,
+       &dev_attr_lpfc_fcp_cpu_map,
        &dev_attr_lpfc_fcp_wq_count,
        &dev_attr_lpfc_fcp_eq_count,
        &dev_attr_lpfc_fcp_io_channel,
@@ -5123,6 +5272,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
        lpfc_use_msi_init(phba, lpfc_use_msi);
        lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+       lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
        lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
        lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
        lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
index 8886668920049e685cb9b5d3572cb1426dbea526..094be2cad65bba821e9050a48bb2602dfd65f067 100644 (file)
@@ -219,26 +219,35 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
        unsigned int transfer_bytes, bytes_copied = 0;
        unsigned int sg_offset, dma_offset;
        unsigned char *dma_address, *sg_address;
-       struct scatterlist *sgel;
        LIST_HEAD(temp_list);
-
+       struct sg_mapping_iter miter;
+       unsigned long flags;
+       unsigned int sg_flags = SG_MITER_ATOMIC;
+       bool sg_valid;
 
        list_splice_init(&dma_buffers->list, &temp_list);
        list_add(&dma_buffers->list, &temp_list);
        sg_offset = 0;
-       sgel = bsg_buffers->sg_list;
+       if (to_buffers)
+               sg_flags |= SG_MITER_FROM_SG;
+       else
+               sg_flags |= SG_MITER_TO_SG;
+       sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
+                      sg_flags);
+       local_irq_save(flags);
+       sg_valid = sg_miter_next(&miter);
        list_for_each_entry(mp, &temp_list, list) {
                dma_offset = 0;
-               while (bytes_to_transfer && sgel &&
+               while (bytes_to_transfer && sg_valid &&
                       (dma_offset < LPFC_BPL_SIZE)) {
                        dma_address = mp->virt + dma_offset;
                        if (sg_offset) {
                                /* Continue previous partial transfer of sg */
-                               sg_address = sg_virt(sgel) + sg_offset;
-                               transfer_bytes = sgel->length - sg_offset;
+                               sg_address = miter.addr + sg_offset;
+                               transfer_bytes = miter.length - sg_offset;
                        } else {
-                               sg_address = sg_virt(sgel);
-                               transfer_bytes = sgel->length;
+                               sg_address = miter.addr;
+                               transfer_bytes = miter.length;
                        }
                        if (bytes_to_transfer < transfer_bytes)
                                transfer_bytes = bytes_to_transfer;
@@ -252,12 +261,14 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
                        sg_offset += transfer_bytes;
                        bytes_to_transfer -= transfer_bytes;
                        bytes_copied += transfer_bytes;
-                       if (sg_offset >= sgel->length) {
+                       if (sg_offset >= miter.length) {
                                sg_offset = 0;
-                               sgel = sg_next(sgel);
+                               sg_valid = sg_miter_next(&miter);
                        }
                }
        }
+       sg_miter_stop(&miter);
+       local_irq_restore(flags);
        list_del_init(&dma_buffers->list);
        list_splice(&temp_list, &dma_buffers->list);
        return bytes_copied;
@@ -471,6 +482,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
        cmdiocbq->context1 = dd_data;
        cmdiocbq->context2 = cmp;
        cmdiocbq->context3 = bmp;
+       cmdiocbq->context_un.ndlp = ndlp;
        dd_data->type = TYPE_IOCB;
        dd_data->set_job = job;
        dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -1508,6 +1520,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
        ctiocb->context1 = dd_data;
        ctiocb->context2 = cmp;
        ctiocb->context3 = bmp;
+       ctiocb->context_un.ndlp = ndlp;
        ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
 
        dd_data->type = TYPE_IOCB;
@@ -2576,7 +2589,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
        evt->wait_time_stamp = jiffies;
        time_left = wait_event_interruptible_timeout(
                evt->wq, !list_empty(&evt->events_to_see),
-               ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+               msecs_to_jiffies(1000 *
+                       ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
        if (list_empty(&evt->events_to_see))
                ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
        else {
@@ -3151,7 +3165,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
        evt->waiting = 1;
        time_left = wait_event_interruptible_timeout(
                evt->wq, !list_empty(&evt->events_to_see),
-               ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+               msecs_to_jiffies(1000 *
+                       ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
        evt->waiting = 0;
        if (list_empty(&evt->events_to_see)) {
                rc = (time_left) ? -EINTR : -ETIMEDOUT;
index 7631893ae00511c49033f3445e45f8b164d47d46..d41456e5f8149e3fc09d82086554d20ef9921ce2 100644 (file)
@@ -470,3 +470,4 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
 void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
 uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
 int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
+void lpfc_sli4_offline_eratt(struct lpfc_hba *);
index 7bff3a19af56880dba0fdca4e203676d52c4e916..ae1a07c57caef681d5d54222e39b41b67e70643f 100644 (file)
@@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
                if (init_utsname()->nodename[0] != '\0')
                        lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
                else
-                       mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+                       mod_timer(&vport->fc_fdmitmo, jiffies +
+                                 msecs_to_jiffies(1000 * 60));
        }
        return;
 }
index bbed8471bf0b81fce50390bb9b0ec6b7a68ba146..3cae0a92e8bd0001142c7a175102de1e0ab01dc6 100644 (file)
@@ -29,6 +29,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+
 #include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
@@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
 
                icmd->un.elsreq64.remoteID = did;               /* DID */
                icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
-               icmd->ulpTimeout = phba->fc_ratov * 2;
+               if (elscmd == ELS_CMD_FLOGI)
+                       icmd->ulpTimeout = FF_DEF_RATOV * 2;
+               else
+                       icmd->ulpTimeout = phba->fc_ratov * 2;
        } else {
                icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
                icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
@@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
                /* Xmit ELS command <elsCmd> to remote NPORT <did> */
                lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                                 "0116 Xmit ELS command x%x to remote "
-                                "NPORT x%x I/O tag: x%x, port state: x%x\n",
+                                "NPORT x%x I/O tag: x%x, port state:x%x"
+                                " fc_flag:x%x\n",
                                 elscmd, did, elsiocb->iotag,
-                                vport->port_state);
+                                vport->port_state,
+                                vport->fc_flag);
        } else {
                /* Xmit ELS response <elsCmd> to remote NPORT <did> */
                lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                                 "0117 Xmit ELS response x%x to remote "
-                                "NPORT x%x I/O tag: x%x, size: x%x\n",
+                                "NPORT x%x I/O tag: x%x, size: x%x "
+                                "port_state x%x fc_flag x%x\n",
                                 elscmd, ndlp->nlp_DID, elsiocb->iotag,
-                                cmdSize);
+                                cmdSize, vport->port_state,
+                                vport->fc_flag);
        }
        return elsiocb;
 
@@ -909,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        spin_lock_irq(shost->host_lock);
        vport->fc_flag |= FC_PT2PT;
        spin_unlock_irq(shost->host_lock);
+       /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+       if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
+               lpfc_unregister_fcf_prep(phba);
+
+               /* The FC_VFI_REGISTERED flag will get clear in the cmpl
+                * handler for unreg_vfi, but if we don't force the
+                * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
+                * built with the update bit set instead of just the vp bit to
+                * change the Nport ID.  We need to have the vp set and the
+                * Upd cleared on topology changes.
+                */
+               spin_lock_irq(shost->host_lock);
+               vport->fc_flag &= ~FC_VFI_REGISTERED;
+               spin_unlock_irq(shost->host_lock);
+               phba->fc_topology_changed = 0;
+               lpfc_issue_reg_vfi(vport);
+       }
 
        /* Start discovery - this should just do CLEAR_LA */
        lpfc_disc_start(vport);
@@ -1030,9 +1055,19 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
                if ((phba->sli_rev == LPFC_SLI_REV4) &&
                    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
-                    (vport->fc_prevDID != vport->fc_myDID))) {
-                       if (vport->fc_flag & FC_VFI_REGISTERED)
-                               lpfc_sli4_unreg_all_rpis(vport);
+                    (vport->fc_prevDID != vport->fc_myDID) ||
+                       phba->fc_topology_changed)) {
+                       if (vport->fc_flag & FC_VFI_REGISTERED) {
+                               if (phba->fc_topology_changed) {
+                                       lpfc_unregister_fcf_prep(phba);
+                                       spin_lock_irq(shost->host_lock);
+                                       vport->fc_flag &= ~FC_VFI_REGISTERED;
+                                       spin_unlock_irq(shost->host_lock);
+                                       phba->fc_topology_changed = 0;
+                               } else {
+                                       lpfc_sli4_unreg_all_rpis(vport);
+                               }
+                       }
                        lpfc_issue_reg_vfi(vport);
                        lpfc_nlp_put(ndlp);
                        goto out;
@@ -1054,10 +1089,11 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
        /* FLOGI completes successfully */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                        "0101 FLOGI completes successfully "
-                        "Data: x%x x%x x%x x%x\n",
+                        "0101 FLOGI completes successfully, I/O tag:x%x, "
+                        "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
                         irsp->un.ulpWord[4], sp->cmn.e_d_tov,
-                        sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
+                        sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
+                        vport->port_state, vport->fc_flag);
 
        if (vport->port_state == LPFC_FLOGI) {
                /*
@@ -5047,6 +5083,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        struct ls_rjt stat;
        uint32_t cmd, did;
        int rc;
+       uint32_t fc_flag = 0;
+       uint32_t port_state = 0;
 
        cmd = *lp++;
        sp = (struct serv_parm *) lp;
@@ -5113,16 +5151,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                         * will be.
                         */
                        vport->fc_myDID = PT2PT_LocalID;
-               }
+               } else
+                       vport->fc_myDID = PT2PT_RemoteID;
 
                /*
                 * The vport state should go to LPFC_FLOGI only
                 * AFTER we issue a FLOGI, not receive one.
                 */
                spin_lock_irq(shost->host_lock);
+               fc_flag = vport->fc_flag;
+               port_state = vport->port_state;
                vport->fc_flag |= FC_PT2PT;
                vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+               vport->port_state = LPFC_FLOGI;
                spin_unlock_irq(shost->host_lock);
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                                "3311 Rcv Flogi PS x%x new PS x%x "
+                                "fc_flag x%x new fc_flag x%x\n",
+                                port_state, vport->port_state,
+                                fc_flag, vport->fc_flag);
 
                /*
                 * We temporarily set fc_myDID to make it look like we are
@@ -6241,7 +6288,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
        }
 
        if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
-               mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+               mod_timer(&vport->els_tmofunc,
+                         jiffies + msecs_to_jiffies(1000 * timeout));
 }
 
 /**
@@ -6612,7 +6660,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        /* ELS command <elsCmd> received from NPORT <did> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                         "0112 ELS command x%x received from NPORT x%x "
-                        "Data: x%x\n", cmd, did, vport->port_state);
+                        "Data: x%x x%x x%x x%x\n",
+                       cmd, did, vport->port_state, vport->fc_flag,
+                       vport->fc_myDID, vport->fc_prevDID);
        switch (cmd) {
        case ELS_CMD_PLOGI:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6621,6 +6671,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
                phba->fc_stat.elsRcvPLOGI++;
                ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+               if (phba->sli_rev == LPFC_SLI_REV4 &&
+                   (phba->pport->fc_flag & FC_PT2PT)) {
+                       vport->fc_prevDID = vport->fc_myDID;
+                       /* Our DID needs to be updated before registering
+                        * the vfi. This is done in lpfc_rcv_plogi but
+                        * that is called after the reg_vfi.
+                        */
+                       vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
+                       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                                        "3312 Remote port assigned DID x%x "
+                                        "%x\n", vport->fc_myDID,
+                                        vport->fc_prevDID);
+               }
 
                lpfc_send_els_event(vport, ndlp, payload);
 
@@ -6630,6 +6693,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        rjt_exp = LSEXP_NOTHING_MORE;
                        break;
                }
+               shost = lpfc_shost_from_vport(vport);
                if (vport->port_state < LPFC_DISC_AUTH) {
                        if (!(phba->pport->fc_flag & FC_PT2PT) ||
                                (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6641,9 +6705,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                         * another NPort and the other side has initiated
                         * the PLOGI before responding to our FLOGI.
                         */
+                       if (phba->sli_rev == LPFC_SLI_REV4 &&
+                           (phba->fc_topology_changed ||
+                            vport->fc_myDID != vport->fc_prevDID)) {
+                               lpfc_unregister_fcf_prep(phba);
+                               spin_lock_irq(shost->host_lock);
+                               vport->fc_flag &= ~FC_VFI_REGISTERED;
+                               spin_unlock_irq(shost->host_lock);
+                               phba->fc_topology_changed = 0;
+                               lpfc_issue_reg_vfi(vport);
+                       }
                }
 
-               shost = lpfc_shost_from_vport(vport);
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
                spin_unlock_irq(shost->host_lock);
@@ -7002,8 +7075,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
        spin_lock_irq(shost->host_lock);
        if (vport->fc_flag & FC_DISC_DELAYED) {
                spin_unlock_irq(shost->host_lock);
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                               "3334 Delay fc port discovery for %d seconds\n",
+                               phba->fc_ratov);
                mod_timer(&vport->delayed_disc_tmo,
-                       jiffies + HZ * phba->fc_ratov);
+                       jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
                return;
        }
        spin_unlock_irq(shost->host_lock);
@@ -7287,7 +7363,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
                return;
 
        shost = lpfc_shost_from_vport(phba->pport);
-       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DELAY_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -7791,7 +7867,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
        blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
        /* Start a timer to unblock fabric iocbs after 100ms */
        if (!blocked)
-               mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
+               mod_timer(&phba->fabric_block_timer,
+                         jiffies + msecs_to_jiffies(100));
 
        return;
 }
index 326e05a65a7314d7648a16f0097485bb5d439a0b..0f6e2548f35d7b7b03d870b813b5c1726bfcc48d 100644 (file)
@@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
        if (!list_empty(&evtp->evt_listp))
                return;
 
+       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+
        spin_lock_irq(&phba->hbalock);
        /* We need to hold the node by incrementing the reference
         * count until this queued work is done
         */
-       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
        if (evtp->evt_arg1) {
                evtp->evt = LPFC_EVT_DEV_LOSS;
                list_add_tail(&evtp->evt_listp, &phba->work_list);
@@ -1008,9 +1009,6 @@ lpfc_linkup(struct lpfc_hba *phba)
                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
                        lpfc_linkup_port(vports[i]);
        lpfc_destroy_vport_work_array(phba, vports);
-       if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
-           (phba->sli_rev < LPFC_SLI_REV4))
-               lpfc_issue_clear_la(phba, phba->pport);
 
        return 0;
 }
@@ -1436,7 +1434,8 @@ lpfc_register_fcf(struct lpfc_hba *phba)
        if (phba->fcf.fcf_flag & FCF_REGISTERED) {
                phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
                phba->hba_flag &= ~FCF_TS_INPROG;
-               if (phba->pport->port_state != LPFC_FLOGI) {
+               if (phba->pport->port_state != LPFC_FLOGI &&
+                   phba->pport->fc_flag & FC_FABRIC) {
                        phba->hba_flag |= FCF_RR_INPROG;
                        spin_unlock_irq(&phba->hbalock);
                        lpfc_initial_flogi(phba->pport);
@@ -2270,8 +2269,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                                spin_unlock_irq(&phba->hbalock);
                                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
                                                "2836 New FCF matches in-use "
-                                               "FCF (x%x)\n",
-                                               phba->fcf.current_rec.fcf_indx);
+                                               "FCF (x%x), port_state:x%x, "
+                                               "fc_flag:x%x\n",
+                                               phba->fcf.current_rec.fcf_indx,
+                                               phba->pport->port_state,
+                                               phba->pport->fc_flag);
                                goto out;
                        } else
                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
@@ -2796,7 +2798,19 @@ void
 lpfc_issue_init_vpi(struct lpfc_vport *vport)
 {
        LPFC_MBOXQ_t *mboxq;
-       int rc;
+       int rc, vpi;
+
+       if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
+               vpi = lpfc_alloc_vpi(vport->phba);
+               if (!vpi) {
+                       lpfc_printf_vlog(vport, KERN_ERR,
+                                        LOG_MBOX,
+                                        "3303 Failed to obtain vport vpi\n");
+                       lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+                       return;
+               }
+               vport->vpi = vpi;
+       }
 
        mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
        if (!mboxq) {
@@ -2894,9 +2908,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                goto out_free_mem;
        }
 
-       /* If the VFI is already registered, there is nothing else to do */
+       /* If the VFI is already registered, there is nothing else to do
+        * Unless this was a VFI update and we are in PT2PT mode, then
+        * we should drop through to set the port state to ready.
+        */
        if (vport->fc_flag & FC_VFI_REGISTERED)
-               goto out_free_mem;
+               if (!(phba->sli_rev == LPFC_SLI_REV4 &&
+                     vport->fc_flag & FC_PT2PT))
+                       goto out_free_mem;
 
        /* The VPI is implicitly registered when the VFI is registered */
        spin_lock_irq(shost->host_lock);
@@ -2913,6 +2932,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                goto out_free_mem;
        }
 
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                        "3313 cmpl reg vfi  port_state:%x fc_flag:%x myDid:%x "
+                        "alpacnt:%d LinkState:%x topology:%x\n",
+                        vport->port_state, vport->fc_flag, vport->fc_myDID,
+                        vport->phba->alpa_map[0],
+                        phba->link_state, phba->fc_topology);
+
        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
                /*
                 * For private loop or for NPort pt2pt,
@@ -2925,7 +2951,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                        /* Use loop map to make discovery list */
                        lpfc_disc_list_loopmap(vport);
                        /* Start discovery */
-                       lpfc_disc_start(vport);
+                       if (vport->fc_flag & FC_PT2PT)
+                               vport->port_state = LPFC_VPORT_READY;
+                       else
+                               lpfc_disc_start(vport);
                } else {
                        lpfc_start_fdiscs(phba);
                        lpfc_do_scr_ns_plogi(phba, vport);
@@ -3007,6 +3036,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
                break;
        }
 
+       if (phba->fc_topology &&
+           phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                               "3314 Toplogy changed was 0x%x is 0x%x\n",
+                               phba->fc_topology,
+                               bf_get(lpfc_mbx_read_top_topology, la));
+               phba->fc_topology_changed = 1;
+       }
+
        phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
        phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
 
@@ -4235,7 +4273,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
                        tmo, vport->port_state, vport->fc_flag);
        }
 
-       mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
+       mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
        spin_lock_irq(shost->host_lock);
        vport->fc_flag |= FC_DISC_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -4949,8 +4987,12 @@ lpfc_disc_start(struct lpfc_vport *vport)
        uint32_t clear_la_pending;
        int did_changed;
 
-       if (!lpfc_is_link_up(phba))
+       if (!lpfc_is_link_up(phba)) {
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                                "3315 Link is not up %x\n",
+                                phba->link_state);
                return;
+       }
 
        if (phba->link_state == LPFC_CLEAR_LA)
                clear_la_pending = 1;
@@ -4983,11 +5025,13 @@ lpfc_disc_start(struct lpfc_vport *vport)
        if (num_sent)
                return;
 
-       /* Register the VPI for SLI3, NON-NPIV only. */
+       /* Register the VPI for SLI3, NPIV only. */
        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
            !(vport->fc_flag & FC_PT2PT) &&
            !(vport->fc_flag & FC_RSCN_MODE) &&
            (phba->sli_rev < LPFC_SLI_REV4)) {
+               if (vport->port_type == LPFC_PHYSICAL_PORT)
+                       lpfc_issue_clear_la(phba, vport);
                lpfc_issue_reg_vpi(phba, vport);
                return;
        }
@@ -5410,7 +5454,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        if (vport->cfg_fdmi_on == 1)
                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
        else
-               mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+               mod_timer(&vport->fc_fdmitmo,
+                         jiffies + msecs_to_jiffies(1000 * 60));
 
        /* decrement the node reference count held for this callback
         * function.
@@ -5855,7 +5900,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
        struct lpfc_vport **vports;
        struct lpfc_nodelist *ndlp;
        struct Scsi_Host *shost;
-       int i, rc;
+       int i = 0, rc;
 
        /* Unregister RPIs */
        if (lpfc_fcf_inuse(phba))
@@ -5883,6 +5928,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
                        spin_unlock_irq(shost->host_lock);
                }
        lpfc_destroy_vport_work_array(phba, vports);
+       if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
+               ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+               if (ndlp)
+                       lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+               lpfc_cleanup_pending_mbox(phba->pport);
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       lpfc_sli4_unreg_all_rpis(phba->pport);
+               lpfc_mbx_unreg_vpi(phba->pport);
+               shost = lpfc_shost_from_vport(phba->pport);
+               spin_lock_irq(shost->host_lock);
+               phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+               phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
+               spin_unlock_irq(shost->host_lock);
+       }
 
        /* Cleanup any outstanding ELS commands */
        lpfc_els_flush_all_cmd(phba);
index e8c47603170370ca0d2418bb3e4d759b377d60e3..83700c18f4688a872894aec6da35d7c46058580a 100644 (file)
@@ -1667,6 +1667,7 @@ enum lpfc_protgrp_type {
 #define        BG_OP_IN_CSUM_OUT_CSUM          0x5
 #define        BG_OP_IN_CRC_OUT_CSUM           0x6
 #define        BG_OP_IN_CSUM_OUT_CRC           0x7
+#define        BG_OP_RAW_MODE                  0x8
 
 struct lpfc_pde5 {
        uint32_t word0;
index 1dd2f6f0a1272f4717e6063449879b55b06ce4b0..713a4613ec3ac1c22829fa9d150d6c9767ae3d80 100644 (file)
@@ -200,6 +200,11 @@ struct lpfc_sli_intf {
 #define LPFC_MAX_IMAX          5000000
 #define LPFC_DEF_IMAX          50000
 
+#define LPFC_MIN_CPU_MAP       0
+#define LPFC_MAX_CPU_MAP       2
+#define LPFC_HBA_CPU_MAP       1
+#define LPFC_DRIVER_CPU_MAP    2  /* Default */
+
 /* PORT_CAPABILITIES constants. */
 #define LPFC_MAX_SUPPORTED_PAGES       8
 
@@ -621,7 +626,7 @@ struct lpfc_register {
 #define lpfc_sliport_status_rdy_SHIFT  23
 #define lpfc_sliport_status_rdy_MASK   0x1
 #define lpfc_sliport_status_rdy_WORD   word0
-#define MAX_IF_TYPE_2_RESETS   1000
+#define MAX_IF_TYPE_2_RESETS           6
 
 #define LPFC_CTL_PORT_CTL_OFFSET       0x408
 #define lpfc_sliport_ctrl_end_SHIFT    30
index 90b8b0515e23d7c0ee0f309d640c5fb6e94e8988..cb465b253910b9972eb48639d4527162a7a0b0b9 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/firmware.h>
 #include <linux/miscdevice.h>
+#include <linux/percpu.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -58,6 +59,9 @@ char *_dump_buf_dif;
 unsigned long _dump_buf_dif_order;
 spinlock_t _dump_buf_lock;
 
+/* Used when mapping IRQ vectors in a driver centric manner */
+uint16_t lpfc_used_cpu[LPFC_MAX_CPU];
+
 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
 static int lpfc_post_rcv_buf(struct lpfc_hba *);
 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -541,13 +545,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 
        /* Set up ring-0 (ELS) timer */
        timeout = phba->fc_ratov * 2;
-       mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+       mod_timer(&vport->els_tmofunc,
+                 jiffies + msecs_to_jiffies(1000 * timeout));
        /* Set up heart beat (HB) timer */
-       mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+       mod_timer(&phba->hb_tmofunc,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        phba->hb_outstanding = 0;
        phba->last_completion_time = jiffies;
        /* Set up error attention (ERATT) polling timer */
-       mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+       mod_timer(&phba->eratt_poll,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 
        if (phba->hba_flag & LINK_DISABLED) {
                lpfc_printf_log(phba,
@@ -908,9 +915,9 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
                psb->pCmd = NULL;
                psb->status = IOSTAT_SUCCESS;
        }
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-       list_splice(&aborts, &phba->lpfc_scsi_buf_list);
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+       list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
        return 0;
 }
 
@@ -1021,7 +1028,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
                !(phba->link_state == LPFC_HBA_ERROR) &&
                !(phba->pport->load_flag & FC_UNLOADING))
                mod_timer(&phba->hb_tmofunc,
-                       jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                         jiffies +
+                         msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        return;
 }
 
@@ -1064,15 +1072,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 
        spin_lock_irq(&phba->pport->work_port_lock);
 
-       if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
-               jiffies)) {
+       if (time_after(phba->last_completion_time +
+                       msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+                       jiffies)) {
                spin_unlock_irq(&phba->pport->work_port_lock);
                if (!phba->hb_outstanding)
                        mod_timer(&phba->hb_tmofunc,
-                               jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
                else
                        mod_timer(&phba->hb_tmofunc,
-                               jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                return;
        }
        spin_unlock_irq(&phba->pport->work_port_lock);
@@ -1104,7 +1115,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                if (!pmboxq) {
                                        mod_timer(&phba->hb_tmofunc,
                                                 jiffies +
-                                                HZ * LPFC_HB_MBOX_INTERVAL);
+                                                msecs_to_jiffies(1000 *
+                                                LPFC_HB_MBOX_INTERVAL));
                                        return;
                                }
 
@@ -1120,7 +1132,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                                        phba->mbox_mem_pool);
                                        mod_timer(&phba->hb_tmofunc,
                                                jiffies +
-                                               HZ * LPFC_HB_MBOX_INTERVAL);
+                                               msecs_to_jiffies(1000 *
+                                               LPFC_HB_MBOX_INTERVAL));
                                        return;
                                }
                                phba->skipped_hb = 0;
@@ -1136,7 +1149,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                phba->skipped_hb = jiffies;
 
                        mod_timer(&phba->hb_tmofunc,
-                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                                jiffies +
+                                msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                        return;
                } else {
                        /*
@@ -1150,7 +1164,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                        jiffies_to_msecs(jiffies
                                                 - phba->last_completion_time));
                        mod_timer(&phba->hb_tmofunc,
-                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                }
        }
 }
@@ -1191,7 +1206,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
  * other than Port Error 6 has been detected.
  **/
-static void
+void
 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
 {
        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
@@ -2633,6 +2648,7 @@ lpfc_online(struct lpfc_hba *phba)
        struct lpfc_vport *vport;
        struct lpfc_vport **vports;
        int i;
+       bool vpis_cleared = false;
 
        if (!phba)
                return 0;
@@ -2656,6 +2672,10 @@ lpfc_online(struct lpfc_hba *phba)
                        lpfc_unblock_mgmt_io(phba);
                        return 1;
                }
+               spin_lock_irq(&phba->hbalock);
+               if (!phba->sli4_hba.max_cfg_param.vpi_used)
+                       vpis_cleared = true;
+               spin_unlock_irq(&phba->hbalock);
        } else {
                if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
                        lpfc_unblock_mgmt_io(phba);
@@ -2672,8 +2692,13 @@ lpfc_online(struct lpfc_hba *phba)
                        vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
                                vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
-                       if (phba->sli_rev == LPFC_SLI_REV4)
+                       if (phba->sli_rev == LPFC_SLI_REV4) {
                                vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+                               if ((vpis_cleared) &&
+                                   (vports[i]->port_type !=
+                                       LPFC_PHYSICAL_PORT))
+                                       vports[i]->vpi = 0;
+                       }
                        spin_unlock_irq(shost->host_lock);
                }
                lpfc_destroy_vport_work_array(phba, vports);
@@ -2833,16 +2858,30 @@ lpfc_scsi_free(struct lpfc_hba *phba)
        struct lpfc_iocbq *io, *io_next;
 
        spin_lock_irq(&phba->hbalock);
+
        /* Release all the lpfc_scsi_bufs maintained by this host. */
-       spin_lock(&phba->scsi_buf_list_lock);
-       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
+
+       spin_lock(&phba->scsi_buf_list_put_lock);
+       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
+                                list) {
                list_del(&sb->list);
                pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
                              sb->dma_handle);
                kfree(sb);
                phba->total_scsi_bufs--;
        }
-       spin_unlock(&phba->scsi_buf_list_lock);
+       spin_unlock(&phba->scsi_buf_list_put_lock);
+
+       spin_lock(&phba->scsi_buf_list_get_lock);
+       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
+                                list) {
+               list_del(&sb->list);
+               pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
+                             sb->dma_handle);
+               kfree(sb);
+               phba->total_scsi_bufs--;
+       }
+       spin_unlock(&phba->scsi_buf_list_get_lock);
 
        /* Release all the lpfc_iocbq entries maintained by this host. */
        list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2978,9 +3017,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                        phba->sli4_hba.scsi_xri_cnt,
                        phba->sli4_hba.scsi_xri_max);
 
-       spin_lock_irq(&phba->scsi_buf_list_lock);
-       list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list);
-       spin_unlock_irq(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_get_lock);
+       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
+       list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
+       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
                /* max scsi xri shrinked below the allocated scsi buffers */
@@ -2994,9 +3036,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                                      psb->dma_handle);
                        kfree(psb);
                }
-               spin_lock_irq(&phba->scsi_buf_list_lock);
+               spin_lock_irq(&phba->scsi_buf_list_get_lock);
                phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
-               spin_unlock_irq(&phba->scsi_buf_list_lock);
+               spin_unlock_irq(&phba->scsi_buf_list_get_lock);
        }
 
        /* update xris associated to remaining allocated scsi buffers */
@@ -3014,9 +3056,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                psb->cur_iocbq.sli4_lxritag = lxri;
                psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
        }
-       spin_lock_irq(&phba->scsi_buf_list_lock);
-       list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
-       spin_unlock_irq(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_get_lock);
+       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        return 0;
 
@@ -3197,14 +3242,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
                stat = 1;
                goto finished;
        }
-       if (time >= 30 * HZ) {
+       if (time >= msecs_to_jiffies(30 * 1000)) {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "0461 Scanning longer than 30 "
                                "seconds.  Continuing initialization\n");
                stat = 1;
                goto finished;
        }
-       if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
+       if (time >= msecs_to_jiffies(15 * 1000) &&
+           phba->link_state <= LPFC_LINK_DOWN) {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "0465 Link down longer than 15 "
                                "seconds.  Continuing initialization\n");
@@ -3216,7 +3262,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
                goto finished;
        if (vport->num_disc_nodes || vport->fc_prli_sent)
                goto finished;
-       if (vport->fc_map_cnt == 0 && time < 2 * HZ)
+       if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
                goto finished;
        if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
                goto finished;
@@ -4215,7 +4261,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
                         * If there are other active VLinks present,
                         * re-instantiate the Vlink using FDISC.
                         */
-                       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+                       mod_timer(&ndlp->nlp_delayfunc,
+                                 jiffies + msecs_to_jiffies(1000));
                        shost = lpfc_shost_from_vport(vport);
                        spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -4707,23 +4754,52 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
                return -ENOMEM;
 
        /*
-        * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+        * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
         * used to create the sg_dma_buf_pool must be dynamically calculated.
-        * 2 segments are added since the IOCB needs a command and response bde.
         */
-       phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
-               sizeof(struct fcp_rsp) +
-                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
 
+       /* Initialize the host templates the configured values. */
+       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+       /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
        if (phba->cfg_enable_bg) {
-               phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
-               phba->cfg_sg_dma_buf_size +=
-                       phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
+               /*
+                * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+                * the FCP rsp, and a BDE for each. Sice we have no control
+                * over how many protection data segments the SCSI Layer
+                * will hand us (ie: there could be one for every block
+                * in the IO), we just allocate enough BDEs to accomidate
+                * our max amount and we need to limit lpfc_sg_seg_cnt to
+                * minimize the risk of running out.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
+
+               if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
+                       phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
+
+               /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
+               phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+       } else {
+               /*
+                * The scsi_buf for a regular I/O will hold the FCP cmnd,
+                * the FCP rsp, a BDE for each, and a BDE for up to
+                * cfg_sg_seg_cnt data segments.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+               /* Total BDEs in BPL for scsi_sg_list */
+               phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
        }
 
-       /* Also reinitialize the host templates with new values. */
-       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+                       "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
+                       phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+                       phba->cfg_total_seg_cnt);
 
        phba->max_vpi = LPFC_MAX_VPI;
        /* This will be set to correct value after config_port mbox */
@@ -4789,13 +4865,13 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
 static int
 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 {
+       struct lpfc_vector_map_info *cpup;
        struct lpfc_sli *psli;
        LPFC_MBOXQ_t *mboxq;
-       int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
+       int rc, i, hbq_count, max_buf_size;
        uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
        struct lpfc_mqe *mqe;
-       int longs, sli_family;
-       int sges_per_segment;
+       int longs;
 
        /* Before proceed, wait for POST done and device ready */
        rc = lpfc_sli4_post_status_check(phba);
@@ -4863,11 +4939,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
        phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
 
-       /* With BlockGuard we can have multiple SGEs per Data Segemnt */
-       sges_per_segment = 1;
-       if (phba->cfg_enable_bg)
-               sges_per_segment = 2;
-
        /*
         * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
         * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
@@ -4878,43 +4949,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                        sizeof(struct lpfc_sli_ring), GFP_KERNEL);
        if (!phba->sli.ring)
                return -ENOMEM;
+
        /*
-        * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+        * It doesn't matter what family our adapter is in, we are
+        * limited to 2 Pages, 512 SGEs, for our SGL.
+        * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
+        */
+       max_buf_size = (2 * SLI4_PAGE_SIZE);
+       if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
+               phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+
+       /*
+        * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
         * used to create the sg_dma_buf_pool must be dynamically calculated.
-        * 2 segments are added since the IOCB needs a command and response bde.
-        * To insure that the scsi sgl does not cross a 4k page boundary only
-        * sgl sizes of must be a power of 2.
         */
-       buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
-                   (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
-                   sizeof(struct sli4_sge)));
-
-       sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
-       max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
-       switch (sli_family) {
-       case LPFC_SLI_INTF_FAMILY_BE2:
-       case LPFC_SLI_INTF_FAMILY_BE3:
-               /* There is a single hint for BE - 2 pages per BPL. */
-               if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
-                   LPFC_SLI_INTF_SLI_HINT1_1)
-                       max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
-               break;
-       case LPFC_SLI_INTF_FAMILY_LNCR_A0:
-       case LPFC_SLI_INTF_FAMILY_LNCR_B0:
-       default:
-               break;
+
+       if (phba->cfg_enable_bg) {
+               /*
+                * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+                * the FCP rsp, and a SGE for each. Sice we have no control
+                * over how many protection data segments the SCSI Layer
+                * will hand us (ie: there could be one for every block
+                * in the IO), we just allocate enough SGEs to accomidate
+                * our max amount and we need to limit lpfc_sg_seg_cnt to
+                * minimize the risk of running out.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) + max_buf_size;
+
+               /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
+               phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
+
+               if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
+                       phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
+       } else {
+               /*
+                * The scsi_buf for a regular I/O will hold the FCP cmnd,
+                * the FCP rsp, a SGE for each, and a SGE for up to
+                * cfg_sg_seg_cnt data segments.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
+
+               /* Total SGEs for scsi_sg_list */
+               phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+               /*
+                * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
+                * to post 1 page for the SGL.
+                */
        }
 
-       for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
-            dma_buf_size < max_buf_size && buf_size > dma_buf_size;
-            dma_buf_size = dma_buf_size << 1)
-               ;
-       if (dma_buf_size == max_buf_size)
-               phba->cfg_sg_seg_cnt = (dma_buf_size -
-                       sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
-                       (2 * sizeof(struct sli4_sge))) /
-                               sizeof(struct sli4_sge);
-       phba->cfg_sg_dma_buf_size = dma_buf_size;
+       /* Initialize the host templates with the updated values. */
+       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+       if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
+               phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
+       else
+               phba->cfg_sg_dma_buf_size =
+                       SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+                       "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
+                       phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+                       phba->cfg_total_seg_cnt);
 
        /* Initialize buffer queue management fields */
        hbq_count = lpfc_sli_hbq_count();
@@ -5104,6 +5203,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_free_fcp_eq_hdl;
        }
 
+       phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
+                                        phba->sli4_hba.num_present_cpu),
+                                        GFP_KERNEL);
+       if (!phba->sli4_hba.cpu_map) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3327 Failed allocate memory for msi-x "
+                               "interrupt vector mapping\n");
+               rc = -ENOMEM;
+               goto out_free_msix;
+       }
+       /* Initialize io channels for round robin */
+       cpup = phba->sli4_hba.cpu_map;
+       rc = 0;
+       for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+               cpup->channel_id = rc;
+               rc++;
+               if (rc >= phba->cfg_fcp_io_channel)
+                       rc = 0;
+       }
+
        /*
         * Enable sr-iov virtual functions if supported and configured
         * through the module parameter.
@@ -5123,6 +5242,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
        return 0;
 
+out_free_msix:
+       kfree(phba->sli4_hba.msix_entries);
 out_free_fcp_eq_hdl:
        kfree(phba->sli4_hba.fcp_eq_hdl);
 out_free_fcf_rr_bmask:
@@ -5152,6 +5273,11 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
 {
        struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
 
+       /* Free memory allocated for msi-x interrupt vector to CPU mapping */
+       kfree(phba->sli4_hba.cpu_map);
+       phba->sli4_hba.num_present_cpu = 0;
+       phba->sli4_hba.num_online_cpu = 0;
+
        /* Free memory allocated for msi-x interrupt vector entries */
        kfree(phba->sli4_hba.msix_entries);
 
@@ -5260,8 +5386,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
        init_waitqueue_head(&phba->work_waitq);
 
        /* Initialize the scsi buffer list used by driver for scsi IO */
-       spin_lock_init(&phba->scsi_buf_list_lock);
-       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+       spin_lock_init(&phba->scsi_buf_list_get_lock);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
+       spin_lock_init(&phba->scsi_buf_list_put_lock);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
 
        /* Initialize the fabric iocb list */
        INIT_LIST_HEAD(&phba->fabric_iocb_list);
@@ -6696,6 +6824,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
        int cfg_fcp_io_channel;
        uint32_t cpu;
        uint32_t i = 0;
+       uint32_t j = 0;
 
 
        /*
@@ -6706,15 +6835,21 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
        /* Sanity check on HBA EQ parameters */
        cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
 
-       /* It doesn't make sense to have more io channels then CPUs */
-       for_each_online_cpu(cpu) {
-               i++;
+       /* It doesn't make sense to have more io channels then online CPUs */
+       for_each_present_cpu(cpu) {
+               if (cpu_online(cpu))
+                       i++;
+               j++;
        }
+       phba->sli4_hba.num_online_cpu = i;
+       phba->sli4_hba.num_present_cpu = j;
+
        if (i < cfg_fcp_io_channel) {
                lpfc_printf_log(phba,
                                KERN_ERR, LOG_INIT,
                                "3188 Reducing IO channels to match number of "
-                               "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
+                               "online CPUs: from %d to %d\n",
+                               cfg_fcp_io_channel, i);
                cfg_fcp_io_channel = i;
        }
 
@@ -7743,8 +7878,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
 
 out:
        /* Catch the not-ready port failure after a port reset. */
-       if (num_resets >= MAX_IF_TYPE_2_RESETS)
+       if (num_resets >= MAX_IF_TYPE_2_RESETS) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3317 HBA not functional: IP Reset Failed "
+                               "after (%d) retries, try: "
+                               "echo fw_reset > board_mode\n", num_resets);
                rc = -ENODEV;
+       }
 
        return rc;
 }
@@ -8208,6 +8348,269 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
        return;
 }
 
+/**
+ * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Find next available CPU to use for IRQ to CPU affinity.
+ */
+static int
+lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
+{
+       struct lpfc_vector_map_info *cpup;
+       int cpu;
+
+       cpup = phba->sli4_hba.cpu_map;
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+               /* CPU must be online */
+               if (cpu_online(cpu)) {
+                       if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
+                           (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
+                           (cpup->phys_id == phys_id)) {
+                               return cpu;
+                       }
+               }
+               cpup++;
+       }
+
+       /*
+        * If we get here, we have used ALL CPUs for the specific
+        * phys_id. Now we need to clear out lpfc_used_cpu and start
+        * reusing CPUs.
+        */
+
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+               if (lpfc_used_cpu[cpu] == phys_id)
+                       lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
+       }
+
+       cpup = phba->sli4_hba.cpu_map;
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+               /* CPU must be online */
+               if (cpu_online(cpu)) {
+                       if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
+                           (cpup->phys_id == phys_id)) {
+                               return cpu;
+                       }
+               }
+               cpup++;
+       }
+       return LPFC_VECTOR_MAP_EMPTY;
+}
+
+/**
+ * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
+ * @phba:      pointer to lpfc hba data structure.
+ * @vectors:   number of HBA vectors
+ *
+ * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
+ * affinization across multple physical CPUs (numa nodes).
+ * In addition, this routine will assign an IO channel for each CPU
+ * to use when issuing I/Os.
+ */
+static int
+lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
+{
+       int i, idx, saved_chann, used_chann, cpu, phys_id;
+       int max_phys_id, num_io_channel, first_cpu;
+       struct lpfc_vector_map_info *cpup;
+#ifdef CONFIG_X86
+       struct cpuinfo_x86 *cpuinfo;
+#endif
+       struct cpumask *mask;
+       uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
+
+       /* If there is no mapping, just return */
+       if (!phba->cfg_fcp_cpu_map)
+               return 1;
+
+       /* Init cpu_map array */
+       memset(phba->sli4_hba.cpu_map, 0xff,
+              (sizeof(struct lpfc_vector_map_info) *
+               phba->sli4_hba.num_present_cpu));
+
+       max_phys_id = 0;
+       phys_id = 0;
+       num_io_channel = 0;
+       first_cpu = LPFC_VECTOR_MAP_EMPTY;
+
+       /* Update CPU map with physical id and core id of each CPU */
+       cpup = phba->sli4_hba.cpu_map;
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+#ifdef CONFIG_X86
+               cpuinfo = &cpu_data(cpu);
+               cpup->phys_id = cpuinfo->phys_proc_id;
+               cpup->core_id = cpuinfo->cpu_core_id;
+#else
+               /* No distinction between CPUs for other platforms */
+               cpup->phys_id = 0;
+               cpup->core_id = 0;
+#endif
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3328 CPU physid %d coreid %d\n",
+                               cpup->phys_id, cpup->core_id);
+
+               if (cpup->phys_id > max_phys_id)
+                       max_phys_id = cpup->phys_id;
+               cpup++;
+       }
+
+       /* Now associate the HBA vectors with specific CPUs */
+       for (idx = 0; idx < vectors; idx++) {
+               cpup = phba->sli4_hba.cpu_map;
+               cpu = lpfc_find_next_cpu(phba, phys_id);
+               if (cpu == LPFC_VECTOR_MAP_EMPTY) {
+
+                       /* Try for all phys_id's */
+                       for (i = 1; i < max_phys_id; i++) {
+                               phys_id++;
+                               if (phys_id > max_phys_id)
+                                       phys_id = 0;
+                               cpu = lpfc_find_next_cpu(phba, phys_id);
+                               if (cpu == LPFC_VECTOR_MAP_EMPTY)
+                                       continue;
+                               goto found;
+                       }
+
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3329 Cannot set affinity:"
+                                       "Error mapping vector %d (%d)\n",
+                                       idx, vectors);
+                       return 0;
+               }
+found:
+               cpup += cpu;
+               if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
+                       lpfc_used_cpu[cpu] = phys_id;
+
+               /* Associate vector with selected CPU */
+               cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
+
+               /* Associate IO channel with selected CPU */
+               cpup->channel_id = idx;
+               num_io_channel++;
+
+               if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
+                       first_cpu = cpu;
+
+               /* Now affinitize to the selected CPU */
+               mask = &cpup->maskbits;
+               cpumask_clear(mask);
+               cpumask_set_cpu(cpu, mask);
+               i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
+                                         vector, mask);
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3330 Set Affinity: CPU %d channel %d "
+                               "irq %d (%x)\n",
+                               cpu, cpup->channel_id,
+                               phba->sli4_hba.msix_entries[idx].vector, i);
+
+               /* Spread vector mapping across multple physical CPU nodes */
+               phys_id++;
+               if (phys_id > max_phys_id)
+                       phys_id = 0;
+       }
+
+       /*
+        * Finally fill in the IO channel for any remaining CPUs.
+        * At this point, all IO channels have been assigned to a specific
+        * MSIx vector, mapped to a specific CPU.
+        * Base the remaining IO channel assigned, to IO channels already
+        * assigned to other CPUs on the same phys_id.
+        */
+       for (i = 0; i <= max_phys_id; i++) {
+               /*
+                * If there are no io channels already mapped to
+                * this phys_id, just round robin thru the io_channels.
+                * Setup chann[] for round robin.
+                */
+               for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+                       chann[idx] = idx;
+
+               saved_chann = 0;
+               used_chann = 0;
+
+               /*
+                * First build a list of IO channels already assigned
+                * to this phys_id before reassigning the same IO
+                * channels to the remaining CPUs.
+                */
+               cpup = phba->sli4_hba.cpu_map;
+               cpu = first_cpu;
+               cpup += cpu;
+               for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
+                    idx++) {
+                       if (cpup->phys_id == i) {
+                               /*
+                                * Save any IO channels that are
+                                * already mapped to this phys_id.
+                                */
+                               if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
+                                       chann[saved_chann] =
+                                               cpup->channel_id;
+                                       saved_chann++;
+                                       goto out;
+                               }
+
+                               /* See if we are using round-robin */
+                               if (saved_chann == 0)
+                                       saved_chann =
+                                               phba->cfg_fcp_io_channel;
+
+                               /* Associate next IO channel with CPU */
+                               cpup->channel_id = chann[used_chann];
+                               num_io_channel++;
+                               used_chann++;
+                               if (used_chann == saved_chann)
+                                       used_chann = 0;
+
+                               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                                               "3331 Set IO_CHANN "
+                                               "CPU %d channel %d\n",
+                                               idx, cpup->channel_id);
+                       }
+out:
+                       cpu++;
+                       if (cpu >= phba->sli4_hba.num_present_cpu) {
+                               cpup = phba->sli4_hba.cpu_map;
+                               cpu = 0;
+                       } else {
+                               cpup++;
+                       }
+               }
+       }
+
+       if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
+               cpup = phba->sli4_hba.cpu_map;
+               for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+                       if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
+                               cpup->channel_id = 0;
+                               num_io_channel++;
+
+                               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                                               "3332 Assign IO_CHANN "
+                                               "CPU %d channel %d\n",
+                                               idx, cpup->channel_id);
+                       }
+                       cpup++;
+               }
+       }
+
+       /* Sanity check */
+       if (num_io_channel != phba->sli4_hba.num_present_cpu)
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3333 Set affinity mismatch:"
+                               "%d chann != %d cpus: %d vactors\n",
+                               num_io_channel, phba->sli4_hba.num_present_cpu,
+                               vectors);
+
+       phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
+       return 1;
+}
+
+
 /**
  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
  * @phba: pointer to lpfc hba data structure.
@@ -8259,9 +8662,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
                                phba->sli4_hba.msix_entries[index].vector,
                                phba->sli4_hba.msix_entries[index].entry);
 
-       /*
-        * Assign MSI-X vectors to interrupt handlers
-        */
+       /* Assign MSI-X vectors to interrupt handlers */
        for (index = 0; index < vectors; index++) {
                memset(&phba->sli4_hba.handler_name[index], 0, 16);
                sprintf((char *)&phba->sli4_hba.handler_name[index],
@@ -8289,6 +8690,8 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
                                phba->cfg_fcp_io_channel, vectors);
                phba->cfg_fcp_io_channel = vectors;
        }
+
+       lpfc_sli4_set_affinity(phba, vectors);
        return rc;
 
 cfg_fail_out:
@@ -9213,15 +9616,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
        /* Block all SCSI devices' I/Os on the host */
        lpfc_scsi_dev_block(phba);
 
+       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+       lpfc_sli_flush_fcp_rings(phba);
+
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
 
        /* Disable interrupt and pci device */
        lpfc_sli_disable_intr(phba);
        pci_disable_device(phba->pcidev);
-
-       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
-       lpfc_sli_flush_fcp_rings(phba);
 }
 
 /**
@@ -9966,6 +10369,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
        /* Block all SCSI devices' I/Os on the host */
        lpfc_scsi_dev_block(phba);
 
+       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+       lpfc_sli_flush_fcp_rings(phba);
+
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
 
@@ -9973,9 +10379,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
        lpfc_sli4_disable_intr(phba);
        lpfc_sli4_queue_destroy(phba);
        pci_disable_device(phba->pcidev);
-
-       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
-       lpfc_sli_flush_fcp_rings(phba);
 }
 
 /**
@@ -10535,6 +10938,7 @@ static struct miscdevice lpfc_mgmt_dev = {
 static int __init
 lpfc_init(void)
 {
+       int cpu;
        int error = 0;
 
        printk(LPFC_MODULE_DESC "\n");
@@ -10561,6 +10965,11 @@ lpfc_init(void)
                        return -ENOMEM;
                }
        }
+
+       /* Initialize in case vector mapping is needed */
+       for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++)
+               lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
+
        error = pci_register_driver(&lpfc_driver);
        if (error) {
                fc_release_transport(lpfc_transport_template);
index baf53e6c2bd15bfbbf8c25af5face63f1c9fe006..2a4e5d21eab2ad3c10f45dd7bece5c5a7ae02e06 100644 (file)
@@ -37,6 +37,7 @@
 #define LOG_EVENT      0x00010000      /* CT,TEMP,DUMP, logging */
 #define LOG_FIP                0x00020000      /* FIP events */
 #define LOG_FCP_UNDER  0x00040000      /* FCP underruns errors */
+#define LOG_SCSI_CMD   0x00080000      /* ALL SCSI commands */
 #define LOG_ALL_MSG    0xffffffff      /* LOG all messages */
 
 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
index a7a9fa468308bda8ddaa97d196fa23ce75133824..41363db7d42628b9af9ec5e1a1ab7f927ab6aff9 100644 (file)
@@ -2149,18 +2149,21 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
 
        /* Only FC supports upd bit */
        if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
-           (vport->fc_flag & FC_VFI_REGISTERED)) {
+           (vport->fc_flag & FC_VFI_REGISTERED) &&
+           (!phba->fc_topology_changed)) {
                bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
                bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
        }
        lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
                        "3134 Register VFI, mydid:x%x, fcfi:%d, "
-                       " vfi:%d, vpi:%d, fc_pname:%x%x\n",
+                       " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
+                       " port_state:x%x topology chg:%d\n",
                        vport->fc_myDID,
                        phba->fcf.fcfi,
                        phba->sli4_hba.vfi_ids[vport->vfi],
                        phba->vpi_ids[vport->vpi],
-                       reg_vfi->wwn[0], reg_vfi->wwn[1]);
+                       reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
+                       vport->port_state, phba->fc_topology_changed);
 }
 
 /**
index cd86069a0ba82a2954238139e8626283fac56d07..812d0cd7c86dcc8c9c3fe86f3ff6b8079ba850ab 100644 (file)
@@ -64,18 +64,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
        struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
        int i;
 
-       if (phba->sli_rev == LPFC_SLI_REV4)
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               /* Calculate alignment */
+               if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
+                       i = phba->cfg_sg_dma_buf_size;
+               else
+                       i = SLI4_PAGE_SIZE;
+
                phba->lpfc_scsi_dma_buf_pool =
                        pci_pool_create("lpfc_scsi_dma_buf_pool",
                                phba->pcidev,
                                phba->cfg_sg_dma_buf_size,
-                               phba->cfg_sg_dma_buf_size,
+                               i,
                                0);
-       else
+       } else {
                phba->lpfc_scsi_dma_buf_pool =
                        pci_pool_create("lpfc_scsi_dma_buf_pool",
                                phba->pcidev, phba->cfg_sg_dma_buf_size,
                                align, 0);
+       }
+
        if (!phba->lpfc_scsi_dma_buf_pool)
                goto fail;
 
index 82f4d3542289f6a8fab4cc4f99a5b5d3c2604cf1..31e9b92f5a9bebc181e0fffec874d95c94dac6e9 100644 (file)
@@ -332,9 +332,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
        /* PLOGI chkparm OK */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                        "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+                        "0114 PLOGI chkparm OK Data: x%x x%x x%x "
+                        "x%x x%x x%x\n",
                         ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
-                        ndlp->nlp_rpi);
+                        ndlp->nlp_rpi, vport->port_state,
+                        vport->fc_flag);
 
        if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
                ndlp->nlp_fcp_info |= CLASS2;
@@ -574,7 +576,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
 
        /* 1 sec timeout */
-       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
 
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -631,7 +633,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                         * If there are other active VLinks present,
                         * re-instantiate the Vlink using FDISC.
                         */
-                       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+                       mod_timer(&ndlp->nlp_delayfunc,
+                                 jiffies + msecs_to_jiffies(1000));
                        spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_DELAY_TMO;
                        spin_unlock_irq(shost->host_lock);
@@ -648,7 +651,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
                (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
                /* Only try to re-login if this is NOT a Fabric Node */
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                spin_unlock_irq(shost->host_lock);
@@ -969,7 +973,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        }
 
        /* Put ndlp in npr state set plogi timer for 1 sec */
-       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DELAY_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -1303,7 +1307,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
        if ((irsp->ulpStatus) ||
            (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
                /* 1 sec timeout */
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                spin_unlock_irq(shost->host_lock);
@@ -1509,7 +1514,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
                }
 
                /* Put ndlp in npr state set plogi timer for 1 sec */
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                spin_unlock_irq(shost->host_lock);
@@ -2145,7 +2151,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
 
        if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
index 74b8710e1e90499eadcbfc72580b12a4c223c18e..8523b278ec9daf1c2d37ffedf08335b32701cb53 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/export.h>
 #include <linux/delay.h>
 #include <asm/unaligned.h>
+#include <linux/crc-t10dif.h>
+#include <net/checksum.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -48,7 +50,7 @@
 #define LPFC_RESET_WAIT  2
 #define LPFC_ABORT_WAIT  2
 
-int _dump_buf_done;
+int _dump_buf_done = 1;
 
 static char *dif_op_str[] = {
        "PROT_NORMAL",
@@ -66,6 +68,10 @@ struct scsi_dif_tuple {
        __be32 ref_tag;         /* Target LBA or indirect LBA */
 };
 
+#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK)
+#define scsi_prot_flagged(sc, flg)     sc
+#endif
+
 static void
 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
 static void
@@ -534,7 +540,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
        dma_addr_t pdma_phys_fcp_rsp;
        dma_addr_t pdma_phys_bpl;
        uint16_t iotag;
-       int bcnt;
+       int bcnt, bpl_size;
+
+       bpl_size = phba->cfg_sg_dma_buf_size -
+               (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                        "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+                        num_to_alloc, phba->cfg_sg_dma_buf_size,
+                        (int)sizeof(struct fcp_cmnd),
+                        (int)sizeof(struct fcp_rsp), bpl_size);
 
        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
                psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@@ -759,7 +774,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
                             struct list_head *post_sblist, int sb_count)
 {
        struct lpfc_scsi_buf *psb, *psb_next;
-       int status;
+       int status, sgl_size;
        int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
        dma_addr_t pdma_phys_bpl1;
        int last_xritag = NO_XRI;
@@ -771,6 +786,9 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
        if (sb_count <= 0)
                return -EINVAL;
 
+       sgl_size = phba->cfg_sg_dma_buf_size -
+               (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
        list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
                list_del_init(&psb->list);
                block_cnt++;
@@ -803,7 +821,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
                                post_cnt = block_cnt;
                        } else if (block_cnt == 1) {
                                /* last single sgl with non-contiguous xri */
-                               if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+                               if (sgl_size > SGL_PAGE_SIZE)
                                        pdma_phys_bpl1 = psb->dma_phys_bpl +
                                                                SGL_PAGE_SIZE;
                                else
@@ -885,9 +903,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
        int num_posted, rc = 0;
 
        /* get all SCSI buffers need to repost to a local list */
-       spin_lock_irq(&phba->scsi_buf_list_lock);
-       list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
-       spin_unlock_irq(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_get_lock);
+       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
+       list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
+       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        /* post the list of scsi buffer sgls to port if available */
        if (!list_empty(&post_sblist)) {
@@ -923,13 +944,22 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
        IOCB_t *iocb;
        dma_addr_t pdma_phys_fcp_cmd;
        dma_addr_t pdma_phys_fcp_rsp;
-       dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
+       dma_addr_t pdma_phys_bpl;
        uint16_t iotag, lxri = 0;
-       int bcnt, num_posted;
+       int bcnt, num_posted, sgl_size;
        LIST_HEAD(prep_sblist);
        LIST_HEAD(post_sblist);
        LIST_HEAD(scsi_sblist);
 
+       sgl_size = phba->cfg_sg_dma_buf_size -
+               (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                        "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+                        num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
+                        (int)sizeof(struct fcp_cmnd),
+                        (int)sizeof(struct fcp_rsp));
+
        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
                psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
                if (!psb)
@@ -948,6 +978,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                }
                memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 
+               /* Page alignment is CRITICAL, double check to be sure */
+               if (((unsigned long)(psb->data) &
+                   (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
+                       pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+                                     psb->data, psb->dma_handle);
+                       kfree(psb);
+                       break;
+               }
+
                /* Allocate iotag for psb->cur_iocbq. */
                iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
                if (iotag == 0) {
@@ -968,17 +1007,14 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
                psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
                psb->fcp_bpl = psb->data;
-               psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
-                       - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+               psb->fcp_cmnd = (psb->data + sgl_size);
                psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
                                        sizeof(struct fcp_cmnd));
 
                /* Initialize local short-hand pointers. */
                sgl = (struct sli4_sge *)psb->fcp_bpl;
                pdma_phys_bpl = psb->dma_handle;
-               pdma_phys_fcp_cmd =
-                       (psb->dma_handle + phba->cfg_sg_dma_buf_size)
-                        - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+               pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
                pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
 
                /*
@@ -1020,17 +1056,13 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                iocb->ulpLe = 1;
                iocb->ulpClass = CLASS3;
                psb->cur_iocbq.context1 = psb;
-               if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
-                       pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
-               else
-                       pdma_phys_bpl1 = 0;
                psb->dma_phys_bpl = pdma_phys_bpl;
 
                /* add the scsi buffer to a post list */
                list_add_tail(&psb->list, &post_sblist);
-               spin_lock_irq(&phba->scsi_buf_list_lock);
+               spin_lock_irq(&phba->scsi_buf_list_get_lock);
                phba->sli4_hba.scsi_xri_cnt++;
-               spin_unlock_irq(&phba->scsi_buf_list_lock);
+               spin_unlock_irq(&phba->scsi_buf_list_get_lock);
        }
        lpfc_printf_log(phba, KERN_INFO, LOG_BG,
                        "3021 Allocate %d out of %d requested new SCSI "
@@ -1079,17 +1111,23 @@ static struct lpfc_scsi_buf*
 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        struct  lpfc_scsi_buf * lpfc_cmd = NULL;
-       struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
-       unsigned long iflag = 0;
-
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-       list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
-       if (lpfc_cmd) {
-               lpfc_cmd->seg_cnt = 0;
-               lpfc_cmd->nonsg_phys = 0;
-               lpfc_cmd->prot_seg_cnt = 0;
+       struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
+       unsigned long gflag = 0;
+       unsigned long pflag = 0;
+
+       spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+       list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
+                        list);
+       if (!lpfc_cmd) {
+               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+               list_splice(&phba->lpfc_scsi_buf_list_put,
+                           &phba->lpfc_scsi_buf_list_get);
+               INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+               list_remove_head(scsi_buf_list_get, lpfc_cmd,
+                                struct lpfc_scsi_buf, list);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
        }
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
        return  lpfc_cmd;
 }
 /**
@@ -1107,28 +1145,39 @@ static struct lpfc_scsi_buf*
 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        struct lpfc_scsi_buf *lpfc_cmd ;
-       unsigned long iflag = 0;
+       unsigned long gflag = 0;
+       unsigned long pflag = 0;
        int found = 0;
 
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-       list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
-                                                       list) {
+       spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+       list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) {
                if (lpfc_test_rrq_active(phba, ndlp,
                                         lpfc_cmd->cur_iocbq.sli4_lxritag))
                        continue;
                list_del(&lpfc_cmd->list);
                found = 1;
-               lpfc_cmd->seg_cnt = 0;
-               lpfc_cmd->nonsg_phys = 0;
-               lpfc_cmd->prot_seg_cnt = 0;
                break;
        }
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
-                                                iflag);
+       if (!found) {
+               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+               list_splice(&phba->lpfc_scsi_buf_list_put,
+                           &phba->lpfc_scsi_buf_list_get);
+               INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+               list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get,
+                                   list) {
+                       if (lpfc_test_rrq_active(
+                               phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
+                               continue;
+                       list_del(&lpfc_cmd->list);
+                       found = 1;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
        if (!found)
                return NULL;
-       else
-               return  lpfc_cmd;
+       return  lpfc_cmd;
 }
 /**
  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1160,10 +1209,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
        unsigned long iflag = 0;
 
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+       psb->seg_cnt = 0;
+       psb->nonsg_phys = 0;
+       psb->prot_seg_cnt = 0;
+
+       spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
        psb->pCmd = NULL;
-       list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+       list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
 }
 
 /**
@@ -1181,6 +1235,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
        unsigned long iflag = 0;
 
+       psb->seg_cnt = 0;
+       psb->nonsg_phys = 0;
+       psb->prot_seg_cnt = 0;
+
        if (psb->exch_busy) {
                spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
                                        iflag);
@@ -1190,11 +1248,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
                spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
                                        iflag);
        } else {
-
-               spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
                psb->pCmd = NULL;
-               list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
-               spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+               psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+               list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
        }
 }
 
@@ -1268,6 +1326,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
                               "dma_map_sg.  Config %d, seg_cnt %d\n",
                               __func__, phba->cfg_sg_seg_cnt,
                               lpfc_cmd->seg_cnt);
+                       lpfc_cmd->seg_cnt = 0;
                        scsi_dma_unmap(scsi_cmnd);
                        return 1;
                }
@@ -2013,9 +2072,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
        bf_set(pde6_optx, pde6, txop);
        bf_set(pde6_oprx, pde6, rxop);
+
+       /*
+        * We only need to check the data on READs, for WRITEs
+        * protection data is automatically generated, not checked.
+        */
        if (datadir == DMA_FROM_DEVICE) {
-               bf_set(pde6_ce, pde6, checking);
-               bf_set(pde6_re, pde6, checking);
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+                       bf_set(pde6_ce, pde6, checking);
+               else
+                       bf_set(pde6_ce, pde6, 0);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(pde6_re, pde6, checking);
+               else
+                       bf_set(pde6_re, pde6, 0);
        }
        bf_set(pde6_ai, pde6, 1);
        bf_set(pde6_ae, pde6, 0);
@@ -2145,6 +2216,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
        split_offset = 0;
        do {
+               /* Check to see if we ran out of space */
+               if (num_bde >= (phba->cfg_total_seg_cnt - 2))
+                       return num_bde + 3;
+
                /* setup PDE5 with what we have */
                pde5 = (struct lpfc_pde5 *) bpl;
                memset(pde5, 0, sizeof(struct lpfc_pde5));
@@ -2164,8 +2239,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
                bf_set(pde6_optx, pde6, txop);
                bf_set(pde6_oprx, pde6, rxop);
-               bf_set(pde6_ce, pde6, checking);
-               bf_set(pde6_re, pde6, checking);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+                       bf_set(pde6_ce, pde6, checking);
+               else
+                       bf_set(pde6_ce, pde6, 0);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(pde6_re, pde6, checking);
+               else
+                       bf_set(pde6_re, pde6, 0);
+
                bf_set(pde6_ai, pde6, 1);
                bf_set(pde6_ae, pde6, 0);
                bf_set(pde6_apptagval, pde6, 0);
@@ -2213,6 +2297,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                pgdone = 0;
                subtotal = 0; /* total bytes processed for current prot grp */
                while (!pgdone) {
+                       /* Check to see if we ran out of space */
+                       if (num_bde >= phba->cfg_total_seg_cnt)
+                               return num_bde + 1;
+
                        if (!sgde) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
                                        "9065 BLKGRD:%s Invalid data segment\n",
@@ -2324,7 +2412,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        struct sli4_sge_diseed *diseed = NULL;
        dma_addr_t physaddr;
        int i = 0, num_sge = 0, status;
-       int datadir = sc->sc_data_direction;
        uint32_t reftag;
        unsigned blksize;
        uint8_t txop, rxop;
@@ -2362,13 +2449,26 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        diseed->ref_tag = cpu_to_le32(reftag);
        diseed->ref_tag_tran = diseed->ref_tag;
 
+       /*
+        * We only need to check the data on READs, for WRITEs
+        * protection data is automatically generated, not checked.
+        */
+       if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+               else
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+               else
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+       }
+
        /* setup DISEED with the rest of the info */
        bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
        bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
-       if (datadir == DMA_FROM_DEVICE) {
-               bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-               bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
-       }
+
        bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
        bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
 
@@ -2497,6 +2597,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
        split_offset = 0;
        do {
+               /* Check to see if we ran out of space */
+               if (num_sge >= (phba->cfg_total_seg_cnt - 2))
+                       return num_sge + 3;
+
                /* setup DISEED with what we have */
                diseed = (struct sli4_sge_diseed *) sgl;
                memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2506,11 +2610,34 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                diseed->ref_tag = cpu_to_le32(reftag);
                diseed->ref_tag_tran = diseed->ref_tag;
 
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) {
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+
+               } else {
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+                       /*
+                        * When in this mode, the hardware will replace
+                        * the guard tag from the host with a
+                        * newly generated good CRC for the wire.
+                        * Switch to raw mode here to avoid this
+                        * behavior. What the host sends gets put on the wire.
+                        */
+                       if (txop == BG_OP_IN_CRC_OUT_CRC) {
+                               txop = BG_OP_RAW_MODE;
+                               rxop = BG_OP_RAW_MODE;
+                       }
+               }
+
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+               else
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+
                /* setup DISEED with the rest of the info */
                bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
                bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
-               bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-               bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+
                bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
                bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
 
@@ -2556,6 +2683,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                pgdone = 0;
                subtotal = 0; /* total bytes processed for current prot grp */
                while (!pgdone) {
+                       /* Check to see if we ran out of space */
+                       if (num_sge >= phba->cfg_total_seg_cnt)
+                               return num_sge + 1;
+
                        if (!sgde) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
                                        "9086 BLKGRD:%s Invalid data segment\n",
@@ -2669,6 +2800,47 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
        return ret;
 }
 
+/**
+ * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be adjusted.
+ *
+ * Adjust the data length to account for how much data
+ * is actually on the wire.
+ *
+ * returns the adjusted data length
+ **/
+static int
+lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
+                      struct lpfc_scsi_buf *lpfc_cmd)
+{
+       struct scsi_cmnd *sc = lpfc_cmd->pCmd;
+       int fcpdl;
+
+       fcpdl = scsi_bufflen(sc);
+
+       /* Check if there is protection data on the wire */
+       if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+               /* Read */
+               if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
+                       return fcpdl;
+
+       } else {
+               /* Write */
+               if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
+                       return fcpdl;
+       }
+
+       /*
+        * If we are in DIF Type 1 mode every data block has a 8 byte
+        * DIF (trailer) attached to it. Must ajust FCP data length.
+        */
+       if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI))
+               fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
+
+       return fcpdl;
+}
+
 /**
  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
  * @phba: The Hba for which this call is being executed.
@@ -2689,8 +2861,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
        uint32_t num_bde = 0;
        int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
        int prot_group_type = 0;
-       int diflen, fcpdl;
-       unsigned blksize;
+       int fcpdl;
 
        /*
         * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
@@ -2711,28 +2882,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
                        return 1;
 
                lpfc_cmd->seg_cnt = datasegcnt;
-               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9067 BLKGRD: %s: Too many sg segments"
-                                       " from dma_map_sg.  Config %d, seg_cnt"
-                                       " %d\n",
-                                       __func__, phba->cfg_sg_seg_cnt,
-                                       lpfc_cmd->seg_cnt);
-                       scsi_dma_unmap(scsi_cmnd);
-                       return 1;
-               }
+
+               /* First check if data segment count from SCSI Layer is good */
+               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+                       goto err;
 
                prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
 
                switch (prot_group_type) {
                case LPFC_PG_TYPE_NO_DIF:
+
+                       /* Here we need to add a PDE5 and PDE6 to the count */
+                       if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
+                               goto err;
+
                        num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
                                        datasegcnt);
                        /* we should have 2 or more entries in buffer list */
                        if (num_bde < 2)
                                goto err;
                        break;
-               case LPFC_PG_TYPE_DIF_BUF:{
+
+               case LPFC_PG_TYPE_DIF_BUF:
                        /*
                         * This type indicates that protection buffers are
                         * passed to the driver, so that needs to be prepared
@@ -2747,31 +2918,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
                        }
 
                        lpfc_cmd->prot_seg_cnt = protsegcnt;
-                       if (lpfc_cmd->prot_seg_cnt
-                           > phba->cfg_prot_sg_seg_cnt) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9068 BLKGRD: %s: Too many prot sg "
-                                       "segments from dma_map_sg.  Config %d,"
-                                               "prot_seg_cnt %d\n", __func__,
-                                               phba->cfg_prot_sg_seg_cnt,
-                                               lpfc_cmd->prot_seg_cnt);
-                               dma_unmap_sg(&phba->pcidev->dev,
-                                            scsi_prot_sglist(scsi_cmnd),
-                                            scsi_prot_sg_count(scsi_cmnd),
-                                            datadir);
-                               scsi_dma_unmap(scsi_cmnd);
-                               return 1;
-                       }
+
+                       /*
+                        * There is a minimun of 4 BPLs used for every
+                        * protection data segment.
+                        */
+                       if ((lpfc_cmd->prot_seg_cnt * 4) >
+                           (phba->cfg_total_seg_cnt - 2))
+                               goto err;
 
                        num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
                                        datasegcnt, protsegcnt);
                        /* we should have 3 or more entries in buffer list */
-                       if (num_bde < 3)
+                       if ((num_bde < 3) ||
+                           (num_bde > phba->cfg_total_seg_cnt))
                                goto err;
                        break;
-               }
+
                case LPFC_PG_TYPE_INVALID:
                default:
+                       scsi_dma_unmap(scsi_cmnd);
+                       lpfc_cmd->seg_cnt = 0;
+
                        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
                                        "9022 Unexpected protection group %i\n",
                                        prot_group_type);
@@ -2790,18 +2958,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
        iocb_cmd->ulpBdeCount = 1;
        iocb_cmd->ulpLe = 1;
 
-       fcpdl = scsi_bufflen(scsi_cmnd);
-
-       if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
-               /*
-                * We are in DIF Type 1 mode
-                * Every data block has a 8 byte DIF (trailer)
-                * attached to it.  Must ajust FCP data length
-                */
-               blksize = lpfc_cmd_blksize(scsi_cmnd);
-               diflen = (fcpdl / blksize) * 8;
-               fcpdl += diflen;
-       }
+       fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
        fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
 
        /*
@@ -2812,13 +2969,233 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
 
        return 0;
 err:
+       if (lpfc_cmd->seg_cnt)
+               scsi_dma_unmap(scsi_cmnd);
+       if (lpfc_cmd->prot_seg_cnt)
+               dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+                            scsi_prot_sg_count(scsi_cmnd),
+                            scsi_cmnd->sc_data_direction);
+
        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "9023 Could not setup all needed BDE's"
-                       "prot_group_type=%d, num_bde=%d\n",
+                       "9023 Cannot setup S/G List for HBA"
+                       "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
+                       lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+                       phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
                        prot_group_type, num_bde);
+
+       lpfc_cmd->seg_cnt = 0;
+       lpfc_cmd->prot_seg_cnt = 0;
        return 1;
 }
 
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CRC algorithmn
+ * using crc_t10dif.
+ */
+uint16_t
+lpfc_bg_crc(uint8_t *data, int count)
+{
+       uint16_t crc = 0;
+       uint16_t x;
+
+       crc = crc_t10dif(data, count);
+       x = cpu_to_be16(crc);
+       return x;
+}
+
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CSUM algorithmn
+ * using ip_compute_csum.
+ */
+uint16_t
+lpfc_bg_csum(uint8_t *data, int count)
+{
+       uint16_t ret;
+
+       ret = ip_compute_csum(data, count);
+       return ret;
+}
+
+/*
+ * This function examines the protection data to try to determine
+ * what type of T10-DIF error occurred.
+ */
+void
+lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+       struct scatterlist *sgpe; /* s/g prot entry */
+       struct scatterlist *sgde; /* s/g data entry */
+       struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+       struct scsi_dif_tuple *src = NULL;
+       uint8_t *data_src = NULL;
+       uint16_t guard_tag, guard_type;
+       uint16_t start_app_tag, app_tag;
+       uint32_t start_ref_tag, ref_tag;
+       int prot, protsegcnt;
+       int err_type, len, data_len;
+       int chk_ref, chk_app, chk_guard;
+       uint16_t sum;
+       unsigned blksize;
+
+       err_type = BGS_GUARD_ERR_MASK;
+       sum = 0;
+       guard_tag = 0;
+
+       /* First check to see if there is protection data to examine */
+       prot = scsi_get_prot_op(cmd);
+       if ((prot == SCSI_PROT_READ_STRIP) ||
+           (prot == SCSI_PROT_WRITE_INSERT) ||
+           (prot == SCSI_PROT_NORMAL))
+               goto out;
+
+       /* Currently the driver just supports ref_tag and guard_tag checking */
+       chk_ref = 1;
+       chk_app = 0;
+       chk_guard = 0;
+
+       /* Setup a ptr to the protection data provided by the SCSI host */
+       sgpe = scsi_prot_sglist(cmd);
+       protsegcnt = lpfc_cmd->prot_seg_cnt;
+
+       if (sgpe && protsegcnt) {
+
+               /*
+                * We will only try to verify guard tag if the segment
+                * data length is a multiple of the blksize.
+                */
+               sgde = scsi_sglist(cmd);
+               blksize = lpfc_cmd_blksize(cmd);
+               data_src = (uint8_t *)sg_virt(sgde);
+               data_len = sgde->length;
+               if ((data_len & (blksize - 1)) == 0)
+                       chk_guard = 1;
+               guard_type = scsi_host_get_guard(cmd->device->host);
+
+               start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
+               start_app_tag = src->app_tag;
+               src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+               len = sgpe->length;
+               while (src && protsegcnt) {
+                       while (len) {
+
+                               /*
+                                * First check to see if a protection data
+                                * check is valid
+                                */
+                               if ((src->ref_tag == 0xffffffff) ||
+                                   (src->app_tag == 0xffff)) {
+                                       start_ref_tag++;
+                                       goto skipit;
+                               }
+
+                               /* App Tag checking */
+                               app_tag = src->app_tag;
+                               if (chk_app && (app_tag != start_app_tag)) {
+                                       err_type = BGS_APPTAG_ERR_MASK;
+                                       goto out;
+                               }
+
+                               /* Reference Tag checking */
+                               ref_tag = be32_to_cpu(src->ref_tag);
+                               if (chk_ref && (ref_tag != start_ref_tag)) {
+                                       err_type = BGS_REFTAG_ERR_MASK;
+                                       goto out;
+                               }
+                               start_ref_tag++;
+
+                               /* Guard Tag checking */
+                               if (chk_guard) {
+                                       guard_tag = src->guard_tag;
+                                       if (guard_type == SHOST_DIX_GUARD_IP)
+                                               sum = lpfc_bg_csum(data_src,
+                                                                  blksize);
+                                       else
+                                               sum = lpfc_bg_crc(data_src,
+                                                                 blksize);
+                                       if ((guard_tag != sum)) {
+                                               err_type = BGS_GUARD_ERR_MASK;
+                                               goto out;
+                                       }
+                               }
+skipit:
+                               len -= sizeof(struct scsi_dif_tuple);
+                               if (len < 0)
+                                       len = 0;
+                               src++;
+
+                               data_src += blksize;
+                               data_len -= blksize;
+
+                               /*
+                                * Are we at the end of the Data segment?
+                                * The data segment is only used for Guard
+                                * tag checking.
+                                */
+                               if (chk_guard && (data_len == 0)) {
+                                       chk_guard = 0;
+                                       sgde = sg_next(sgde);
+                                       if (!sgde)
+                                               goto out;
+
+                                       data_src = (uint8_t *)sg_virt(sgde);
+                                       data_len = sgde->length;
+                                       if ((data_len & (blksize - 1)) == 0)
+                                               chk_guard = 1;
+                               }
+                       }
+
+                       /* Goto the next Protection data segment */
+                       sgpe = sg_next(sgpe);
+                       if (sgpe) {
+                               src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+                               len = sgpe->length;
+                       } else {
+                               src = NULL;
+                       }
+                       protsegcnt--;
+               }
+       }
+out:
+       if (err_type == BGS_GUARD_ERR_MASK) {
+               scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x10, 0x1);
+               cmd->result = DRIVER_SENSE << 24
+                       | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+               phba->bg_guard_err_cnt++;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
+                               (unsigned long)scsi_get_lba(cmd),
+                               sum, guard_tag);
+
+       } else if (err_type == BGS_REFTAG_ERR_MASK) {
+               scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x10, 0x3);
+               cmd->result = DRIVER_SENSE << 24
+                       | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+               phba->bg_reftag_err_cnt++;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
+                               (unsigned long)scsi_get_lba(cmd),
+                               ref_tag, start_ref_tag);
+
+       } else if (err_type == BGS_APPTAG_ERR_MASK) {
+               scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x10, 0x2);
+               cmd->result = DRIVER_SENSE << 24
+                       | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+               phba->bg_apptag_err_cnt++;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
+                               (unsigned long)scsi_get_lba(cmd),
+                               app_tag, start_app_tag);
+       }
+}
+
+
 /*
  * This function checks for BlockGuard errors detected by
  * the HBA.  In case of errors, the ASC/ASCQ fields in the
@@ -2842,12 +3219,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
        uint32_t bgstat = bgf->bgstat;
        uint64_t failing_sector = 0;
 
-       lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
-                       " 0x%x lba 0x%llx blk cnt 0x%x "
-                       "bgstat=0x%x bghm=0x%x\n",
-                       cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
-                       blk_rq_sectors(cmd->request), bgstat, bghm);
-
        spin_lock(&_dump_buf_lock);
        if (!_dump_buf_done) {
                lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
@@ -2870,18 +3241,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 
        if (lpfc_bgs_get_invalid_prof(bgstat)) {
                cmd->result = ScsiResult(DID_ERROR, 0);
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
-                       " BlockGuard profile. bgstat:0x%x\n",
-                       bgstat);
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9072 BLKGRD: Invalid BG Profile in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
                ret = (-1);
                goto out;
        }
 
        if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
                cmd->result = ScsiResult(DID_ERROR, 0);
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
-                               "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
-                               bgstat);
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9073 BLKGRD: Invalid BG PDIF Block in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
                ret = (-1);
                goto out;
        }
@@ -2894,8 +3271,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                cmd->result = DRIVER_SENSE << 24
                        | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
                phba->bg_guard_err_cnt++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9055 BLKGRD: guard_tag error\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9055 BLKGRD: Guard Tag error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -2907,8 +3288,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                        | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 
                phba->bg_reftag_err_cnt++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9056 BLKGRD: ref_tag error\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9056 BLKGRD: Ref Tag error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -2920,8 +3305,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                        | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 
                phba->bg_apptag_err_cnt++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9061 BLKGRD: app_tag error\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9061 BLKGRD: App Tag error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -2960,11 +3349,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 
        if (!ret) {
                /* No error was reported - problem in FW? */
-               cmd->result = ScsiResult(DID_ERROR, 0);
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9057 BLKGRD: Unknown error reported!\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9057 BLKGRD: Unknown error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
+
+               /* Calcuate what type of error it was */
+               lpfc_calc_bg_err(phba, lpfc_cmd);
        }
-
 out:
        return ret;
 }
@@ -3028,6 +3422,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
                                "dma_map_sg.  Config %d, seg_cnt %d\n",
                                __func__, phba->cfg_sg_seg_cnt,
                               lpfc_cmd->seg_cnt);
+                       lpfc_cmd->seg_cnt = 0;
                        scsi_dma_unmap(scsi_cmnd);
                        return 1;
                }
@@ -3093,45 +3488,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
        return 0;
 }
 
-/**
- * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
- * @phba: The Hba for which this call is being executed.
- * @lpfc_cmd: The scsi buffer which is going to be adjusted.
- *
- * Adjust the data length to account for how much data
- * is actually on the wire.
- *
- * returns the adjusted data length
- **/
-static int
-lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
-               struct lpfc_scsi_buf *lpfc_cmd)
-{
-       struct scsi_cmnd *sc = lpfc_cmd->pCmd;
-       int diflen, fcpdl;
-       unsigned blksize;
-
-       fcpdl = scsi_bufflen(sc);
-
-       /* Check if there is protection data on the wire */
-       if (sc->sc_data_direction == DMA_FROM_DEVICE) {
-               /* Read */
-               if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
-                       return fcpdl;
-
-       } else {
-               /* Write */
-               if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
-                       return fcpdl;
-       }
-
-       /* If protection data on the wire, adjust the count accordingly */
-       blksize = lpfc_cmd_blksize(sc);
-       diflen = (fcpdl / blksize) * 8;
-       fcpdl += diflen;
-       return fcpdl;
-}
-
 /**
  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
  * @phba: The Hba for which this call is being executed.
@@ -3149,14 +3505,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
        struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
        struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
        IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
-       uint32_t num_bde = 0;
+       uint32_t num_sge = 0;
        int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
        int prot_group_type = 0;
        int fcpdl;
 
        /*
         * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
-        *  fcp_rsp regions to the first data bde entry
+        *  fcp_rsp regions to the first data sge entry
         */
        if (scsi_sg_count(scsi_cmnd)) {
                /*
@@ -3179,28 +3535,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
 
                sgl += 1;
                lpfc_cmd->seg_cnt = datasegcnt;
-               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9087 BLKGRD: %s: Too many sg segments"
-                                       " from dma_map_sg.  Config %d, seg_cnt"
-                                       " %d\n",
-                                       __func__, phba->cfg_sg_seg_cnt,
-                                       lpfc_cmd->seg_cnt);
-                       scsi_dma_unmap(scsi_cmnd);
-                       return 1;
-               }
+
+               /* First check if data segment count from SCSI Layer is good */
+               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+                       goto err;
 
                prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
 
                switch (prot_group_type) {
                case LPFC_PG_TYPE_NO_DIF:
-                       num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
+                       /* Here we need to add a DISEED to the count */
+                       if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
+                               goto err;
+
+                       num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
                                        datasegcnt);
+
                        /* we should have 2 or more entries in buffer list */
-                       if (num_bde < 2)
+                       if (num_sge < 2)
                                goto err;
                        break;
-               case LPFC_PG_TYPE_DIF_BUF:{
+
+               case LPFC_PG_TYPE_DIF_BUF:
                        /*
                         * This type indicates that protection buffers are
                         * passed to the driver, so that needs to be prepared
@@ -3215,31 +3571,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
                        }
 
                        lpfc_cmd->prot_seg_cnt = protsegcnt;
-                       if (lpfc_cmd->prot_seg_cnt
-                           > phba->cfg_prot_sg_seg_cnt) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9088 BLKGRD: %s: Too many prot sg "
-                                       "segments from dma_map_sg.  Config %d,"
-                                               "prot_seg_cnt %d\n", __func__,
-                                               phba->cfg_prot_sg_seg_cnt,
-                                               lpfc_cmd->prot_seg_cnt);
-                               dma_unmap_sg(&phba->pcidev->dev,
-                                            scsi_prot_sglist(scsi_cmnd),
-                                            scsi_prot_sg_count(scsi_cmnd),
-                                            datadir);
-                               scsi_dma_unmap(scsi_cmnd);
-                               return 1;
-                       }
+                       /*
+                        * There is a minimun of 3 SGEs used for every
+                        * protection data segment.
+                        */
+                       if ((lpfc_cmd->prot_seg_cnt * 3) >
+                           (phba->cfg_total_seg_cnt - 2))
+                               goto err;
 
-                       num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
+                       num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
                                        datasegcnt, protsegcnt);
+
                        /* we should have 3 or more entries in buffer list */
-                       if (num_bde < 3)
+                       if ((num_sge < 3) ||
+                           (num_sge > phba->cfg_total_seg_cnt))
                                goto err;
                        break;
-               }
+
                case LPFC_PG_TYPE_INVALID:
                default:
+                       scsi_dma_unmap(scsi_cmnd);
+                       lpfc_cmd->seg_cnt = 0;
+
                        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
                                        "9083 Unexpected protection group %i\n",
                                        prot_group_type);
@@ -3263,7 +3616,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
        }
 
        fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
-
        fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
 
        /*
@@ -3274,10 +3626,22 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
 
        return 0;
 err:
+       if (lpfc_cmd->seg_cnt)
+               scsi_dma_unmap(scsi_cmnd);
+       if (lpfc_cmd->prot_seg_cnt)
+               dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+                            scsi_prot_sg_count(scsi_cmnd),
+                            scsi_cmnd->sc_data_direction);
+
        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "9084 Could not setup all needed BDE's"
-                       "prot_group_type=%d, num_bde=%d\n",
-                       prot_group_type, num_bde);
+                       "9084 Cannot setup S/G List for HBA"
+                       "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
+                       lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+                       phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
+                       prot_group_type, num_sge);
+
+       lpfc_cmd->seg_cnt = 0;
+       lpfc_cmd->prot_seg_cnt = 0;
        return 1;
 }
 
@@ -4357,7 +4721,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
 
        if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
                if (vport->phba->cfg_enable_bg) {
-                       lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+                       lpfc_printf_vlog(vport,
+                                        KERN_INFO, LOG_SCSI_CMD,
                                         "9033 BLKGRD: rcvd %s cmd:x%x "
                                         "sector x%llx cnt %u pt %x\n",
                                         dif_op_str[scsi_get_prot_op(cmnd)],
@@ -4369,7 +4734,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
                err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
        } else {
                if (vport->phba->cfg_enable_bg) {
-                       lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+                       lpfc_printf_vlog(vport,
+                                        KERN_INFO, LOG_SCSI_CMD,
                                         "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
                                         "x%x sector x%llx cnt %u pt %x\n",
                                         cmnd->cmnd[0],
@@ -4542,7 +4908,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        /* Wait for abort to complete */
        wait_event_timeout(waitq,
                          (lpfc_cmd->pCmd != cmnd),
-                          (2*vport->cfg_devloss_tmo*HZ));
+                          msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
        lpfc_cmd->waitq = NULL;
 
        if (lpfc_cmd->pCmd == cmnd) {
@@ -5012,16 +5378,24 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
        struct lpfc_hba *phba = vport->phba;
        int rc, ret = SUCCESS;
 
+       lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                        "3172 SCSI layer issued Host Reset Data:\n");
+
        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
        lpfc_offline(phba);
        rc = lpfc_sli_brdrestart(phba);
        if (rc)
                ret = FAILED;
-       lpfc_online(phba);
+       rc = lpfc_online(phba);
+       if (rc)
+               ret = FAILED;
        lpfc_unblock_mgmt_io(phba);
 
-       lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "3172 SCSI layer issued Host Reset Data: x%x\n", ret);
+       if (ret == FAILED) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                                "3323 Failed host reset, bring it offline\n");
+               lpfc_sli4_offline_eratt(phba);
+       }
        return ret;
 }
 
@@ -5088,11 +5462,11 @@ lpfc_slave_alloc(struct scsi_device *sdev)
        }
        num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
        if (num_to_alloc != num_allocated) {
-                       lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
-                                "0708 Allocation request of %d "
-                                "command buffers did not succeed.  "
-                                "Allocated %d buffers.\n",
-                                num_to_alloc, num_allocated);
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                                        "0708 Allocation request of %d "
+                                        "command buffers did not succeed.  "
+                                        "Allocated %d buffers.\n",
+                                        num_to_alloc, num_allocated);
        }
        if (num_allocated > 0)
                phba->total_scsi_bufs += num_allocated;
index 35dd17eb0f27907f67f4c50968a95452498b93be..572579f87de4fb1c3f74268da15ff8d226384103 100644 (file)
@@ -667,7 +667,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
 
        spin_lock_irqsave(&phba->hbalock, iflags);
        phba->hba_flag &= ~HBA_RRQ_ACTIVE;
-       next_time = jiffies + HZ * (phba->fc_ratov + 1);
+       next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
        list_for_each_entry_safe(rrq, nextrrq,
                                 &phba->active_rrq_list, list) {
                if (time_after(jiffies, rrq->rrq_stop_time))
@@ -782,7 +782,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
                return;
        spin_lock_irqsave(&phba->hbalock, iflags);
        phba->hba_flag &= ~HBA_RRQ_ACTIVE;
-       next_time = jiffies + HZ * (phba->fc_ratov * 2);
+       next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
        list_splice_init(&phba->active_rrq_list, &rrq_list);
        spin_unlock_irqrestore(&phba->hbalock, iflags);
 
@@ -878,7 +878,8 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
        else
                rrq->send_rrq = 0;
        rrq->xritag = xritag;
-       rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
+       rrq->rrq_stop_time = jiffies +
+                               msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
        rrq->ndlp = ndlp;
        rrq->nlp_DID = ndlp->nlp_DID;
        rrq->vport = ndlp->vport;
@@ -926,8 +927,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
        } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
                        !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
                ndlp = piocbq->context_un.ndlp;
-       else  if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
-                       (piocbq->iocb_flag & LPFC_IO_LIBDFC))
+       else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
                ndlp = piocbq->context_un.ndlp;
        else
                ndlp = piocbq->context1;
@@ -1339,7 +1339,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        BUG();
                else
                        mod_timer(&piocb->vport->els_tmofunc,
-                                 jiffies + HZ * (phba->fc_ratov << 1));
+                               jiffies +
+                               msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
        }
 
 
@@ -2340,7 +2341,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
                /* Mailbox cmd <cmd> Cmpl <cmpl> */
                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
                                "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
-                               "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
+                               "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+                               "x%x x%x x%x\n",
                                pmb->vport ? pmb->vport->vpi : 0,
                                pmbox->mbxCommand,
                                lpfc_sli_config_mbox_subsys_get(phba, pmb),
@@ -2354,7 +2356,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
                                pmbox->un.varWords[4],
                                pmbox->un.varWords[5],
                                pmbox->un.varWords[6],
-                               pmbox->un.varWords[7]);
+                               pmbox->un.varWords[7],
+                               pmbox->un.varWords[8],
+                               pmbox->un.varWords[9],
+                               pmbox->un.varWords[10]);
 
                if (pmb->mbox_cmpl)
                        pmb->mbox_cmpl(phba,pmb);
@@ -2908,8 +2913,9 @@ void lpfc_poll_eratt(unsigned long ptr)
                lpfc_worker_wake_up(phba);
        else
                /* Restart the timer for next eratt poll */
-               mod_timer(&phba->eratt_poll, jiffies +
-                                       HZ * LPFC_ERATT_POLL_INTERVAL);
+               mod_timer(&phba->eratt_poll,
+                         jiffies +
+                         msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
        return;
 }
 
@@ -5511,6 +5517,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
                        list_del_init(&rsrc_blk->list);
                        kfree(rsrc_blk);
                }
+               phba->sli4_hba.max_cfg_param.vpi_used = 0;
                break;
        case LPFC_RSC_TYPE_FCOE_XRI:
                kfree(phba->sli4_hba.xri_bmask);
@@ -5811,6 +5818,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
                lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
        } else {
                kfree(phba->vpi_bmask);
+               phba->sli4_hba.max_cfg_param.vpi_used = 0;
                kfree(phba->vpi_ids);
                bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
                kfree(phba->sli4_hba.xri_bmask);
@@ -5992,7 +6000,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
        struct lpfc_sglq *sglq_entry = NULL;
        struct lpfc_sglq *sglq_entry_next = NULL;
        struct lpfc_sglq *sglq_entry_first = NULL;
-       int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
+       int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
        int last_xritag = NO_XRI;
        LIST_HEAD(prep_sgl_list);
        LIST_HEAD(blck_sgl_list);
@@ -6004,6 +6012,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
        list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
        spin_unlock_irq(&phba->hbalock);
 
+       total_cnt = phba->sli4_hba.els_xri_cnt;
        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
                                 &allc_sgl_list, list) {
                list_del_init(&sglq_entry->list);
@@ -6055,9 +6064,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
                                                sglq_entry->sli4_xritag);
                                        list_add_tail(&sglq_entry->list,
                                                      &free_sgl_list);
-                                       spin_lock_irq(&phba->hbalock);
-                                       phba->sli4_hba.els_xri_cnt--;
-                                       spin_unlock_irq(&phba->hbalock);
+                                       total_cnt--;
                                }
                        }
                }
@@ -6085,9 +6092,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
                                        (sglq_entry_first->sli4_xritag +
                                         post_cnt - 1));
                        list_splice_init(&blck_sgl_list, &free_sgl_list);
-                       spin_lock_irq(&phba->hbalock);
-                       phba->sli4_hba.els_xri_cnt -= post_cnt;
-                       spin_unlock_irq(&phba->hbalock);
+                       total_cnt -= post_cnt;
                }
 
                /* don't reset xirtag due to hole in xri block */
@@ -6097,6 +6102,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
                /* reset els sgl post count for next round of posting */
                post_cnt = 0;
        }
+       /* update the number of XRIs posted for ELS */
+       phba->sli4_hba.els_xri_cnt = total_cnt;
 
        /* free the els sgls failed to post */
        lpfc_free_sgl_list(phba, &free_sgl_list);
@@ -6446,16 +6453,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 
        /* Start the ELS watchdog timer */
        mod_timer(&vport->els_tmofunc,
-                 jiffies + HZ * (phba->fc_ratov * 2));
+                 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
 
        /* Start heart beat timer */
        mod_timer(&phba->hb_tmofunc,
-                 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        phba->hb_outstanding = 0;
        phba->last_completion_time = jiffies;
 
        /* Start error attention (ERATT) polling timer */
-       mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+       mod_timer(&phba->eratt_poll,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 
        /* Enable PCIe device Advanced Error Reporting (AER) if configured */
        if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
@@ -6822,8 +6830,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                        goto out_not_finished;
                }
                /* timeout active mbox command */
-               mod_timer(&psli->mbox_tmo, (jiffies +
-                              (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
+               timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+                                          1000);
+               mod_timer(&psli->mbox_tmo, jiffies + timeout);
        }
 
        /* Mailbox cmd <cmd> issue */
@@ -7496,7 +7505,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
 
        /* Start timer for the mbox_tmo and log some mailbox post messages */
        mod_timer(&psli->mbox_tmo, (jiffies +
-                 (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
+                 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
 
        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
                        "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -7914,15 +7923,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
 static inline uint32_t
 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
 {
-       int i;
-
-       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
-               i = smp_processor_id();
-       else
-               i = atomic_add_return(1, &phba->fcp_qidx);
+       struct lpfc_vector_map_info *cpup;
+       int chann, cpu;
 
-       i = (i % phba->cfg_fcp_io_channel);
-       return i;
+       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
+               cpu = smp_processor_id();
+               if (cpu < phba->sli4_hba.num_present_cpu) {
+                       cpup = phba->sli4_hba.cpu_map;
+                       cpup += cpu;
+                       return cpup->channel_id;
+               }
+               chann = cpu;
+       }
+       chann = atomic_add_return(1, &phba->fcp_qidx);
+       chann = (chann % phba->cfg_fcp_io_channel);
+       return chann;
 }
 
 /**
@@ -8444,10 +8459,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
 
        if ((piocb->iocb_flag & LPFC_IO_FCP) ||
                (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+               if (unlikely(!phba->sli4_hba.fcp_wq))
+                       return IOCB_ERROR;
                if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
                                     &wqe))
                        return IOCB_ERROR;
        } else {
+               if (unlikely(!phba->sli4_hba.els_wq))
+                       return IOCB_ERROR;
                if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
                        return IOCB_ERROR;
        }
@@ -10003,7 +10022,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
        retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
                                     SLI_IOCB_RET_IOCB);
        if (retval == IOCB_SUCCESS) {
-               timeout_req = timeout * HZ;
+               timeout_req = msecs_to_jiffies(timeout * 1000);
                timeleft = wait_event_timeout(done_q,
                                lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
                                timeout_req);
@@ -10108,7 +10127,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
        if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
                wait_event_interruptible_timeout(done_q,
                                pmboxq->mbox_flag & LPFC_MBX_WAKE,
-                               timeout * HZ);
+                               msecs_to_jiffies(timeout * 1000));
 
                spin_lock_irqsave(&phba->hbalock, flag);
                pmboxq->context1 = NULL;
@@ -12899,8 +12918,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
                }
                wq->db_regaddr = bar_memmap_p + db_offset;
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "3264 WQ[%d]: barset:x%x, offset:x%x\n",
-                               wq->queue_id, pci_barset, db_offset);
+                               "3264 WQ[%d]: barset:x%x, offset:x%x, "
+                               "format:x%x\n", wq->queue_id, pci_barset,
+                               db_offset, wq->db_format);
        } else {
                wq->db_format = LPFC_DB_LIST_FORMAT;
                wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
@@ -13120,8 +13140,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
                }
                hrq->db_regaddr = bar_memmap_p + db_offset;
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
-                               hrq->queue_id, pci_barset, db_offset);
+                               "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
+                               "format:x%x\n", hrq->queue_id, pci_barset,
+                               db_offset, hrq->db_format);
        } else {
                hrq->db_format = LPFC_DB_RING_FORMAT;
                hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -13971,13 +13992,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        }
 
        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-                       "2538 Received frame rctl:%s type:%s "
-                       "Frame Data:%08x %08x %08x %08x %08x %08x\n",
-                       rctl_names[fc_hdr->fh_r_ctl],
-                       type_names[fc_hdr->fh_type],
+                       "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
+                       "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
+                       rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
+                       type_names[fc_hdr->fh_type], fc_hdr->fh_type,
                        be32_to_cpu(header[0]), be32_to_cpu(header[1]),
                        be32_to_cpu(header[2]), be32_to_cpu(header[3]),
-                       be32_to_cpu(header[4]), be32_to_cpu(header[5]));
+                       be32_to_cpu(header[4]), be32_to_cpu(header[5]),
+                       be32_to_cpu(header[6]));
        return 0;
 drop:
        lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
index be02b59ea2797a53972a1795dcd4b0b1c156a846..67af460184badc9a2104cd477e099aeb8112a573 100644 (file)
@@ -346,11 +346,6 @@ struct lpfc_bmbx {
 #define SLI4_CT_VFI 2
 #define SLI4_CT_FCFI 3
 
-#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
-#define LPFC_SLI4_FL1_MAX_BUF_SIZE     0X2000
-#define LPFC_SLI4_MIN_BUF_SIZE         0x400
-#define LPFC_SLI4_MAX_BUF_SIZE         0x20000
-
 /*
  * SLI4 specific data structures
  */
@@ -440,6 +435,17 @@ struct lpfc_sli4_lnk_info {
 
 #define LPFC_SLI4_HANDLER_NAME_SZ      16
 
+/* Used for IRQ vector to CPU mapping */
+struct lpfc_vector_map_info {
+       uint16_t        phys_id;
+       uint16_t        core_id;
+       uint16_t        irq;
+       uint16_t        channel_id;
+       struct cpumask  maskbits;
+};
+#define LPFC_VECTOR_MAP_EMPTY  0xffff
+#define LPFC_MAX_CPU           256
+
 /* SLI4 HBA data structure entries */
 struct lpfc_sli4_hba {
        void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -573,6 +579,11 @@ struct lpfc_sli4_hba {
        struct lpfc_iov iov;
        spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
        spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+
+       /* CPU to vector mapping information */
+       struct lpfc_vector_map_info *cpu_map;
+       uint16_t num_online_cpu;
+       uint16_t num_present_cpu;
 };
 
 enum lpfc_sge_type {
index 664cd04f7cd8091dcaf0396f889b0d0852a4e6f8..a38dc3b169697ee2915cee01158334a240eb51b1 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.38"
+#define LPFC_DRIVER_VERSION "8.3.39"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 0fe188e66000da063c01feec76b557a075545258..e28e431564b08f5b90693c3dc49c5d218e67a262 100644 (file)
@@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
        }
 }
 
-static int
+int
 lpfc_alloc_vpi(struct lpfc_hba *phba)
 {
        unsigned long vpi;
@@ -568,6 +568,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
        struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
        struct lpfc_hba   *phba = vport->phba;
        long timeout;
+       bool ns_ndlp_referenced = false;
 
        if (vport->port_type == LPFC_PHYSICAL_PORT) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
@@ -628,6 +629,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
 
        lpfc_debugfs_terminate(vport);
 
+       /*
+        * The call to fc_remove_host might release the NameServer ndlp. Since
+        * we might need to use the ndlp to send the DA_ID CT command,
+        * increment the reference for the NameServer ndlp to prevent it from
+        * being released.
+        */
+       ndlp = lpfc_findnode_did(vport, NameServer_DID);
+       if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+               lpfc_nlp_get(ndlp);
+               ns_ndlp_referenced = true;
+       }
+
        /* Remove FC host and then SCSI host with the vport */
        fc_remove_host(lpfc_shost_from_vport(vport));
        scsi_remove_host(lpfc_shost_from_vport(vport));
@@ -734,6 +747,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
                lpfc_discovery_wait(vport);
 
 skip_logo:
+
+       /*
+        * If the NameServer ndlp has been incremented to allow the DA_ID CT
+        * command to be sent, decrement the ndlp now.
+        */
+       if (ns_ndlp_referenced) {
+               ndlp = lpfc_findnode_did(vport, NameServer_DID);
+               lpfc_nlp_put(ndlp);
+       }
+
        lpfc_cleanup(vport);
        lpfc_sli_host_down(vport);
 
index 90828340aceadbd275c7184b8ad465582e894e2a..6b2c94eb8134300e72b4d06571994c0c2fe5d32b 100644 (file)
@@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
 int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
 struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
 void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
+int lpfc_alloc_vpi(struct lpfc_hba *phba);
 
 /*
  *  queuecommand  VPORT-specific return codes. Specified in  the host byte code.
index 7c90d57b867e2c332e1a953a3c01892f971bd779..3a9ddae86f1f8e2d5e33474a7f0f3510ec413c65 100644 (file)
@@ -4931,11 +4931,12 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
                printk(KERN_ERR "megaraid_sas: timed out while"
                        "waiting for HBA to recover\n");
                error = -ENODEV;
-               goto out_kfree_ioc;
+               goto out_up;
        }
        spin_unlock_irqrestore(&instance->hba_lock, flags);
 
        error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
+      out_up:
        up(&instance->ioctl_sem);
 
       out_kfree_ioc:
index 74550922ad55e418eb7e37753e0983023728943b..7b7381d7671fdc9cfeecdf3c256984b0261d3116 100644 (file)
@@ -254,7 +254,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
        }
        for (i = 0; i < MVS_MAX_DEVICES; i++) {
                mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
-               mvi->devices[i].dev_type = NO_DEVICE;
+               mvi->devices[i].dev_type = SAS_PHY_UNUSED;
                mvi->devices[i].device_id = i;
                mvi->devices[i].dev_status = MVS_DEV_NORMAL;
                init_timer(&mvi->devices[i].timer);
index 532110f4562af58b7517b7b72bfaf0d0f5e25653..c9e244984e30ec1dfed39c7b574b71caffeb0794 100644 (file)
@@ -706,7 +706,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
        return 0;
 }
 
-#define        DEV_IS_GONE(mvi_dev)    ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
+#define        DEV_IS_GONE(mvi_dev)    ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
 static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
                                struct mvs_tmf_task *tmf, int *pass)
 {
@@ -726,7 +726,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
                 * libsas will use dev->port, should
                 * not call task_done for sata
                 */
-               if (dev->dev_type != SATA_DEV)
+               if (dev->dev_type != SAS_SATA_DEV)
                        task->task_done(task);
                return rc;
        }
@@ -1159,10 +1159,10 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
                        phy->identify.device_type =
                                phy->att_dev_info & PORT_DEV_TYPE_MASK;
 
-                       if (phy->identify.device_type == SAS_END_DEV)
+                       if (phy->identify.device_type == SAS_END_DEVICE)
                                phy->identify.target_port_protocols =
                                                        SAS_PROTOCOL_SSP;
-                       else if (phy->identify.device_type != NO_DEVICE)
+                       else if (phy->identify.device_type != SAS_PHY_UNUSED)
                                phy->identify.target_port_protocols =
                                                        SAS_PROTOCOL_SMP;
                        if (oob_done)
@@ -1260,7 +1260,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
 {
        u32 dev;
        for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
-               if (mvi->devices[dev].dev_type == NO_DEVICE) {
+               if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) {
                        mvi->devices[dev].device_id = dev;
                        return &mvi->devices[dev];
                }
@@ -1278,7 +1278,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev)
        u32 id = mvi_dev->device_id;
        memset(mvi_dev, 0, sizeof(*mvi_dev));
        mvi_dev->device_id = id;
-       mvi_dev->dev_type = NO_DEVICE;
+       mvi_dev->dev_type = SAS_PHY_UNUSED;
        mvi_dev->dev_status = MVS_DEV_NORMAL;
        mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
 }
@@ -1480,7 +1480,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
 {
        int rc;
        struct sas_phy *phy = sas_get_local_phy(dev);
-       int reset_type = (dev->dev_type == SATA_DEV ||
+       int reset_type = (dev->dev_type == SAS_SATA_DEV ||
                        (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
        rc = sas_phy_reset(phy, reset_type);
        sas_put_local_phy(phy);
@@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task)
 
        } else if (task->task_proto & SAS_PROTOCOL_SATA ||
                task->task_proto & SAS_PROTOCOL_STP) {
-               if (SATA_DEV == dev->dev_type) {
+               if (SAS_SATA_DEV == dev->dev_type) {
                        struct mvs_slot_info *slot = task->lldd_task;
                        u32 slot_idx = (u32)(slot - mvi->slot_info);
                        mv_dprintk("mvs_abort_task() mvi=%p task=%p "
index 9f3cc13a5ce7ef70ca9c59c29530a8481fd679fc..60e2fb7f2dca7e37512b128bea9b59fd1c9cd579 100644 (file)
@@ -67,7 +67,7 @@ extern const struct mvs_dispatch mvs_94xx_dispatch;
 extern struct kmem_cache *mvs_task_list_cache;
 
 #define DEV_IS_EXPANDER(type)  \
-       ((type == EDGE_DEV) || (type == FANOUT_DEV))
+       ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
 
 #define bit(n) ((u64)1 << n)
 
@@ -241,7 +241,7 @@ struct mvs_phy {
 
 struct mvs_device {
        struct list_head                dev_entry;
-       enum sas_dev_type dev_type;
+       enum sas_device_type dev_type;
        struct mvs_info *mvi_info;
        struct domain_device *sas_device;
        struct timer_list timer;
index 52f04296171c3e1c3dc0963f6e5e8a6b3037f92f..ce4cd87c7c662a6de92f71331e1e49052f4c8e4f 100644 (file)
@@ -4,9 +4,10 @@
 # Copyright (C) 2008-2009  USI Co., Ltd.
 
 
-obj-$(CONFIG_SCSI_PM8001) += pm8001.o
-pm8001-y += pm8001_init.o \
+obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
+pm80xx-y += pm8001_init.o \
                pm8001_sas.o  \
                pm8001_ctl.o  \
-               pm8001_hwi.o
+               pm8001_hwi.o  \
+               pm80xx_hwi.o
 
index 45bc197bc22f2a45c46f5176cabefddc69e077f2..d99f41c2ca13b927c66d34036937d3443b29f862 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -58,8 +58,13 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-               pm8001_ha->main_cfg_tbl.interface_rev);
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev);
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev);
+       }
 }
 static
 DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
@@ -78,11 +83,19 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24),
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16),
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8),
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev));
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24),
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16),
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8),
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev));
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24),
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16),
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8),
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
+       }
 }
 static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
 /**
@@ -99,8 +112,13 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-                       pm8001_ha->main_cfg_tbl.max_out_io);
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io);
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io);
+       }
 }
 static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
 /**
@@ -117,8 +135,15 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%04d\n",
-                       (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16));
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)
+                       );
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)
+                       );
+       }
 }
 static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
 /**
@@ -136,8 +161,15 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%04d\n",
-                       pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF);
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF
+                       );
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF
+                       );
+       }
 }
 static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
 
@@ -173,7 +205,14 @@ static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev,
        struct Scsi_Host *shost = class_to_shost(cdev);
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
-       mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25;
+       /* fe000000 means supports SAS2.1 */
+       if (pm8001_ha->chip_id == chip_8001)
+               mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag &
+                                                       0xfe000000)>>25;
+       else
+               /* fe000000 means supports SAS2.1 */
+               mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag &
+                                                       0xfe000000)>>25;
        return show_sas_spec_support_status(mode, buf);
 }
 static DEVICE_ATTR(sas_spec_support, S_IRUGO,
@@ -361,10 +400,11 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
                goto out;
        }
        payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
-       memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
+       memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
                                pm8001_ha->fw_image->size);
        payload->length = pm8001_ha->fw_image->size;
        payload->id = 0;
+       payload->minor_function = 0x1;
        pm8001_ha->nvmd_completion = &completion;
        ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
        wait_for_completion(&completion);
@@ -411,7 +451,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
                        payload->length = 1024*16;
                        payload->id = 0;
                        fwControl =
-                             (struct fw_control_info *)payload->func_specific;
+                             (struct fw_control_info *)&payload->func_specific;
                        fwControl->len = IOCTL_BUF_SIZE;   /* IN */
                        fwControl->size = partitionSize + HEADER_LEN;/* IN */
                        fwControl->retcode = 0;/* OUT */
index c3d20c8d4abe3724308182697a558dac7440a07a..479c5a7a863a8aa8fded8e4164b8377d3658089d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
 
 enum chip_flavors {
        chip_8001,
+       chip_8008,
+       chip_8009,
+       chip_8018,
+       chip_8019
 };
-#define USI_MAX_MEMCNT                 9
-#define PM8001_MAX_DMA_SG              SG_ALL
+
 enum phy_speed {
        PHY_SPEED_15 = 0x01,
        PHY_SPEED_30 = 0x02,
@@ -69,23 +72,34 @@ enum port_type {
 #define PM8001_MPI_QUEUE         1024   /* maximum mpi queue entries */
 #define        PM8001_MAX_INB_NUM       1
 #define        PM8001_MAX_OUTB_NUM      1
+#define        PM8001_MAX_SPCV_INB_NUM         1
+#define        PM8001_MAX_SPCV_OUTB_NUM        4
 #define        PM8001_CAN_QUEUE         508    /* SCSI Queue depth */
 
+/* Inbound/Outbound queue size */
+#define IOMB_SIZE_SPC          64
+#define IOMB_SIZE_SPCV         128
+
 /* unchangeable hardware details */
-#define        PM8001_MAX_PHYS          8      /* max. possible phys */
-#define        PM8001_MAX_PORTS         8      /* max. possible ports */
-#define        PM8001_MAX_DEVICES       1024   /* max supported device */
+#define        PM8001_MAX_PHYS          16     /* max. possible phys */
+#define        PM8001_MAX_PORTS         16     /* max. possible ports */
+#define        PM8001_MAX_DEVICES       2048   /* max supported device */
+#define        PM8001_MAX_MSIX_VEC      64     /* max msi-x int for spcv/ve */
 
+#define USI_MAX_MEMCNT_BASE    5
+#define IB                     (USI_MAX_MEMCNT_BASE + 1)
+#define CI                     (IB + PM8001_MAX_SPCV_INB_NUM)
+#define OB                     (CI + PM8001_MAX_SPCV_INB_NUM)
+#define PI                     (OB + PM8001_MAX_SPCV_OUTB_NUM)
+#define USI_MAX_MEMCNT         (PI + PM8001_MAX_SPCV_OUTB_NUM)
+#define PM8001_MAX_DMA_SG      SG_ALL
 enum memory_region_num {
        AAP1 = 0x0, /* application acceleration processor */
        IOP,        /* IO processor */
-       CI,         /* consumer index */
-       PI,         /* producer index */
-       IB,         /* inbound queue */
-       OB,         /* outbound queue */
        NVMD,       /* NVM device */
        DEV_MEM,    /* memory for devices */
        CCB_MEM,    /* memory for command control block */
+       FW_FLASH    /* memory for fw flash update */
 };
 #define        PM8001_EVENT_LOG_SIZE    (128 * 1024)
 
index b8dd05074abb4f61faebbe0af12bd3074fd8b930..69dd49c05f1e1069b2aea0544734f52ea5126abb 100644 (file)
 static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
 {
        void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
-       pm8001_ha->main_cfg_tbl.signature       = pm8001_mr32(address, 0x00);
-       pm8001_ha->main_cfg_tbl.interface_rev   = pm8001_mr32(address, 0x04);
-       pm8001_ha->main_cfg_tbl.firmware_rev    = pm8001_mr32(address, 0x08);
-       pm8001_ha->main_cfg_tbl.max_out_io      = pm8001_mr32(address, 0x0C);
-       pm8001_ha->main_cfg_tbl.max_sgl         = pm8001_mr32(address, 0x10);
-       pm8001_ha->main_cfg_tbl.ctrl_cap_flag   = pm8001_mr32(address, 0x14);
-       pm8001_ha->main_cfg_tbl.gst_offset      = pm8001_mr32(address, 0x18);
-       pm8001_ha->main_cfg_tbl.inbound_queue_offset =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.signature    =
+                               pm8001_mr32(address, 0x00);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev =
+                               pm8001_mr32(address, 0x04);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev =
+                               pm8001_mr32(address, 0x08);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io   =
+                               pm8001_mr32(address, 0x0C);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl      =
+                               pm8001_mr32(address, 0x10);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag =
+                               pm8001_mr32(address, 0x14);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset   =
+                               pm8001_mr32(address, 0x18);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset =
                pm8001_mr32(address, MAIN_IBQ_OFFSET);
-       pm8001_ha->main_cfg_tbl.outbound_queue_offset =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset =
                pm8001_mr32(address, MAIN_OBQ_OFFSET);
-       pm8001_ha->main_cfg_tbl.hda_mode_flag   =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag        =
                pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
 
        /* read analog Setting offset from the configuration table */
-       pm8001_ha->main_cfg_tbl.anolog_setup_table_offset =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset =
                pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
 
        /* read Error Dump Offset and Length */
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
 }
 
@@ -86,31 +93,56 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
 static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
 {
        void __iomem *address = pm8001_ha->general_stat_tbl_addr;
-       pm8001_ha->gs_tbl.gst_len_mpistate      = pm8001_mr32(address, 0x00);
-       pm8001_ha->gs_tbl.iq_freeze_state0      = pm8001_mr32(address, 0x04);
-       pm8001_ha->gs_tbl.iq_freeze_state1      = pm8001_mr32(address, 0x08);
-       pm8001_ha->gs_tbl.msgu_tcnt             = pm8001_mr32(address, 0x0C);
-       pm8001_ha->gs_tbl.iop_tcnt              = pm8001_mr32(address, 0x10);
-       pm8001_ha->gs_tbl.reserved              = pm8001_mr32(address, 0x14);
-       pm8001_ha->gs_tbl.phy_state[0]  = pm8001_mr32(address, 0x18);
-       pm8001_ha->gs_tbl.phy_state[1]  = pm8001_mr32(address, 0x1C);
-       pm8001_ha->gs_tbl.phy_state[2]  = pm8001_mr32(address, 0x20);
-       pm8001_ha->gs_tbl.phy_state[3]  = pm8001_mr32(address, 0x24);
-       pm8001_ha->gs_tbl.phy_state[4]  = pm8001_mr32(address, 0x28);
-       pm8001_ha->gs_tbl.phy_state[5]  = pm8001_mr32(address, 0x2C);
-       pm8001_ha->gs_tbl.phy_state[6]  = pm8001_mr32(address, 0x30);
-       pm8001_ha->gs_tbl.phy_state[7]  = pm8001_mr32(address, 0x34);
-       pm8001_ha->gs_tbl.reserved1             = pm8001_mr32(address, 0x38);
-       pm8001_ha->gs_tbl.reserved2             = pm8001_mr32(address, 0x3C);
-       pm8001_ha->gs_tbl.reserved3             = pm8001_mr32(address, 0x40);
-       pm8001_ha->gs_tbl.recover_err_info[0]   = pm8001_mr32(address, 0x44);
-       pm8001_ha->gs_tbl.recover_err_info[1]   = pm8001_mr32(address, 0x48);
-       pm8001_ha->gs_tbl.recover_err_info[2]   = pm8001_mr32(address, 0x4C);
-       pm8001_ha->gs_tbl.recover_err_info[3]   = pm8001_mr32(address, 0x50);
-       pm8001_ha->gs_tbl.recover_err_info[4]   = pm8001_mr32(address, 0x54);
-       pm8001_ha->gs_tbl.recover_err_info[5]   = pm8001_mr32(address, 0x58);
-       pm8001_ha->gs_tbl.recover_err_info[6]   = pm8001_mr32(address, 0x5C);
-       pm8001_ha->gs_tbl.recover_err_info[7]   = pm8001_mr32(address, 0x60);
+       pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate   =
+                               pm8001_mr32(address, 0x00);
+       pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0   =
+                               pm8001_mr32(address, 0x04);
+       pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1   =
+                               pm8001_mr32(address, 0x08);
+       pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt          =
+                               pm8001_mr32(address, 0x0C);
+       pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt           =
+                               pm8001_mr32(address, 0x10);
+       pm8001_ha->gs_tbl.pm8001_tbl.rsvd               =
+                               pm8001_mr32(address, 0x14);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0]       =
+                               pm8001_mr32(address, 0x18);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1]       =
+                               pm8001_mr32(address, 0x1C);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2]       =
+                               pm8001_mr32(address, 0x20);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3]       =
+                               pm8001_mr32(address, 0x24);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4]       =
+                               pm8001_mr32(address, 0x28);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5]       =
+                               pm8001_mr32(address, 0x2C);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6]       =
+                               pm8001_mr32(address, 0x30);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7]       =
+                               pm8001_mr32(address, 0x34);
+       pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val     =
+                               pm8001_mr32(address, 0x38);
+       pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0]           =
+                               pm8001_mr32(address, 0x3C);
+       pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1]           =
+                               pm8001_mr32(address, 0x40);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0]        =
+                               pm8001_mr32(address, 0x44);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1]        =
+                               pm8001_mr32(address, 0x48);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2]        =
+                               pm8001_mr32(address, 0x4C);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3]        =
+                               pm8001_mr32(address, 0x50);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4]        =
+                               pm8001_mr32(address, 0x54);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5]        =
+                               pm8001_mr32(address, 0x58);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6]        =
+                               pm8001_mr32(address, 0x5C);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7]        =
+                               pm8001_mr32(address, 0x60);
 }
 
 /**
@@ -119,10 +151,9 @@ static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
  */
 static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
 {
-       int inbQ_num = 1;
        int i;
        void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
-       for (i = 0; i < inbQ_num; i++) {
+       for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
                u32 offset = i * 0x20;
                pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
                      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -137,10 +168,9 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
  */
 static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
 {
-       int outbQ_num = 1;
        int i;
        void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
-       for (i = 0; i < outbQ_num; i++) {
+       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
                u32 offset = i * 0x24;
                pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
                      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -155,54 +185,57 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
  */
 static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
 {
-       int qn = 1;
        int i;
        u32 offsetib, offsetob;
        void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
        void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
 
-       pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd                     = 0;
-       pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3                = 0;
-       pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7                = 0;
-       pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3               = 0;
-       pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7               = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3       = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7       = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3   = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7   = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3   = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7   = 0;
-
-       pm8001_ha->main_cfg_tbl.upper_event_log_addr            =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd          = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3     = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7     = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3    = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7    = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 =
+                                                                        0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 =
+                                                                        0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0;
+
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr         =
                pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
-       pm8001_ha->main_cfg_tbl.lower_event_log_addr            =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr         =
                pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
-       pm8001_ha->main_cfg_tbl.event_log_size  = PM8001_EVENT_LOG_SIZE;
-       pm8001_ha->main_cfg_tbl.event_log_option                = 0x01;
-       pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr        =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size               =
+               PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option             = 0x01;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr     =
                pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
-       pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr        =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr     =
                pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
-       pm8001_ha->main_cfg_tbl.iop_event_log_size      = PM8001_EVENT_LOG_SIZE;
-       pm8001_ha->main_cfg_tbl.iop_event_log_option            = 0x01;
-       pm8001_ha->main_cfg_tbl.fatal_err_interrupt             = 0x01;
-       for (i = 0; i < qn; i++) {
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size           =
+               PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option         = 0x01;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt          = 0x01;
+       for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
                pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt  =
                        PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
                pm8001_ha->inbnd_q_tbl[i].upper_base_addr       =
-                       pm8001_ha->memoryMap.region[IB].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
                pm8001_ha->inbnd_q_tbl[i].lower_base_addr       =
-               pm8001_ha->memoryMap.region[IB].phys_addr_lo;
+               pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
                pm8001_ha->inbnd_q_tbl[i].base_virt             =
-                       (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr;
+                       (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
                pm8001_ha->inbnd_q_tbl[i].total_length          =
-                       pm8001_ha->memoryMap.region[IB].total_len;
+                       pm8001_ha->memoryMap.region[IB + i].total_len;
                pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr    =
-                       pm8001_ha->memoryMap.region[CI].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
                pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr    =
-                       pm8001_ha->memoryMap.region[CI].phys_addr_lo;
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
                pm8001_ha->inbnd_q_tbl[i].ci_virt               =
-                       pm8001_ha->memoryMap.region[CI].virt_ptr;
+                       pm8001_ha->memoryMap.region[CI + i].virt_ptr;
                offsetib = i * 0x20;
                pm8001_ha->inbnd_q_tbl[i].pi_pci_bar            =
                        get_pci_bar_index(pm8001_mr32(addressib,
@@ -212,25 +245,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
                pm8001_ha->inbnd_q_tbl[i].producer_idx          = 0;
                pm8001_ha->inbnd_q_tbl[i].consumer_index        = 0;
        }
-       for (i = 0; i < qn; i++) {
+       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
                pm8001_ha->outbnd_q_tbl[i].element_size_cnt     =
                        PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
                pm8001_ha->outbnd_q_tbl[i].upper_base_addr      =
-                       pm8001_ha->memoryMap.region[OB].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
                pm8001_ha->outbnd_q_tbl[i].lower_base_addr      =
-                       pm8001_ha->memoryMap.region[OB].phys_addr_lo;
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
                pm8001_ha->outbnd_q_tbl[i].base_virt            =
-                       (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr;
+                       (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
                pm8001_ha->outbnd_q_tbl[i].total_length         =
-                       pm8001_ha->memoryMap.region[OB].total_len;
+                       pm8001_ha->memoryMap.region[OB + i].total_len;
                pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr   =
-                       pm8001_ha->memoryMap.region[PI].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
                pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr   =
-                       pm8001_ha->memoryMap.region[PI].phys_addr_lo;
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
                pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay       =
-                       0 | (10 << 16) | (0 << 24);
+                       0 | (10 << 16) | (i << 24);
                pm8001_ha->outbnd_q_tbl[i].pi_virt              =
-                       pm8001_ha->memoryMap.region[PI].virt_ptr;
+                       pm8001_ha->memoryMap.region[PI + i].virt_ptr;
                offsetob = i * 0x24;
                pm8001_ha->outbnd_q_tbl[i].ci_pci_bar           =
                        get_pci_bar_index(pm8001_mr32(addressob,
@@ -250,42 +283,51 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
 {
        void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
        pm8001_mw32(address, 0x24,
-               pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd);
        pm8001_mw32(address, 0x28,
-               pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3);
        pm8001_mw32(address, 0x2C,
-               pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7);
        pm8001_mw32(address, 0x30,
-               pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3);
        pm8001_mw32(address, 0x34,
-               pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7);
        pm8001_mw32(address, 0x38,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ITNexus_event_pid0_3);
        pm8001_mw32(address, 0x3C,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ITNexus_event_pid4_7);
        pm8001_mw32(address, 0x40,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ssp_event_pid0_3);
        pm8001_mw32(address, 0x44,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ssp_event_pid4_7);
        pm8001_mw32(address, 0x48,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_smp_event_pid0_3);
        pm8001_mw32(address, 0x4C,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_smp_event_pid4_7);
        pm8001_mw32(address, 0x50,
-               pm8001_ha->main_cfg_tbl.upper_event_log_addr);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr);
        pm8001_mw32(address, 0x54,
-               pm8001_ha->main_cfg_tbl.lower_event_log_addr);
-       pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size);
-       pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr);
+       pm8001_mw32(address, 0x58,
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size);
+       pm8001_mw32(address, 0x5C,
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option);
        pm8001_mw32(address, 0x60,
-               pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr);
        pm8001_mw32(address, 0x64,
-               pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr);
-       pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr);
+       pm8001_mw32(address, 0x68,
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size);
        pm8001_mw32(address, 0x6C,
-               pm8001_ha->main_cfg_tbl.iop_event_log_option);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option);
        pm8001_mw32(address, 0x70,
-               pm8001_ha->main_cfg_tbl.fatal_err_interrupt);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt);
 }
 
 /**
@@ -597,6 +639,19 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
  */
 static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
 {
+       u8 i = 0;
+       u16 deviceid;
+       pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+       /* 8081 controllers need BAR shift to access MPI space
+       * as this is shared with BIOS data */
+       if (deviceid == 0x8081) {
+               if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("Shift Bar4 to 0x%x failed\n",
+                                       GSM_SM_BASE));
+                       return -1;
+               }
+       }
        /* check the firmware status */
        if (-1 == check_fw_ready(pm8001_ha)) {
                PM8001_FAIL_DBG(pm8001_ha,
@@ -613,11 +668,16 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
        read_outbnd_queue_table(pm8001_ha);
        /* update main config table ,inbound table and outbound table */
        update_main_config_table(pm8001_ha);
-       update_inbnd_queue_table(pm8001_ha, 0);
-       update_outbnd_queue_table(pm8001_ha, 0);
-       mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
-       /* 7->130ms, 34->500ms, 119->1.5s */
-       mpi_set_open_retry_interval_reg(pm8001_ha, 119);
+       for (i = 0; i < PM8001_MAX_INB_NUM; i++)
+               update_inbnd_queue_table(pm8001_ha, i);
+       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
+               update_outbnd_queue_table(pm8001_ha, i);
+       /* 8081 controller donot require these operations */
+       if (deviceid != 0x8081) {
+               mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
+               /* 7->130ms, 34->500ms, 119->1.5s */
+               mpi_set_open_retry_interval_reg(pm8001_ha, 119);
+       }
        /* notify firmware update finished and check initialization status */
        if (0 == mpi_init_check(pm8001_ha)) {
                PM8001_INIT_DBG(pm8001_ha,
@@ -639,6 +699,16 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
        u32 max_wait_count;
        u32 value;
        u32 gst_len_mpistate;
+       u16 deviceid;
+       pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+       if (deviceid == 0x8081) {
+               if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("Shift Bar4 to 0x%x failed\n",
+                                       GSM_SM_BASE));
+                       return -1;
+               }
+       }
        init_pci_device_addresses(pm8001_ha);
        /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
        table is stop */
@@ -740,14 +810,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
  * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
  * the FW register status to the originated status.
  * @pm8001_ha: our hba card information
- * @signature: signature in host scratch pad0 register.
  */
 static int
-pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
+pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
 {
        u32     regVal, toggleVal;
        u32     max_wait_count;
        u32     regVal1, regVal2, regVal3;
+       u32     signature = 0x252acbcd; /* for host scratch pad0 */
        unsigned long flags;
 
        /* step1: Check FW is ready for soft reset */
@@ -1113,7 +1183,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
  * pm8001_chip_iounmap - which maped when initialized.
  * @pm8001_ha: our hba card information
  */
-static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
+void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
 {
        s8 bar, logical = 0;
        for (bar = 0; bar < 6; bar++) {
@@ -1192,7 +1262,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
  * @pm8001_ha: our hba card information
  */
 static void
-pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
        pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
@@ -1207,7 +1277,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
  * @pm8001_ha: our hba card information
  */
 static void
-pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
        pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
@@ -1218,12 +1288,13 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * mpi_msg_free_get- get the free message buffer for transfer inbound queue.
+ * pm8001_mpi_msg_free_get - get the free message buffer for transfer
+ * inbound queue.
  * @circularQ: the inbound queue  we want to transfer to HBA.
  * @messageSize: the message size of this transfer, normally it is 64 bytes
  * @messagePtr: the pointer to message.
  */
-static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
+int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
                            u16 messageSize, void **messagePtr)
 {
        u32 offset, consumer_index;
@@ -1231,7 +1302,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
        u8 bcCount = 1; /* only support single buffer */
 
        /* Checks is the requested message size can be allocated in this queue*/
-       if (messageSize > 64) {
+       if (messageSize > IOMB_SIZE_SPCV) {
                *messagePtr = NULL;
                return -1;
        }
@@ -1245,7 +1316,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
                return -1;
        }
        /* get memory IOMB buffer address */
-       offset = circularQ->producer_idx * 64;
+       offset = circularQ->producer_idx * messageSize;
        /* increment to next bcCount element */
        circularQ->producer_idx = (circularQ->producer_idx + bcCount)
                                % PM8001_MPI_QUEUE;
@@ -1257,29 +1328,30 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
 }
 
 /**
- * mpi_build_cmd- build the message queue for transfer, update the PI to FW
- * to tell the fw to get this message from IOMB.
+ * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to
+ * FW to tell the fw to get this message from IOMB.
  * @pm8001_ha: our hba card information
  * @circularQ: the inbound queue we want to transfer to HBA.
  * @opCode: the operation code represents commands which LLDD and fw recognized.
  * @payload: the command payload of each operation command.
  */
-static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
                         struct inbound_queue_table *circularQ,
-                        u32 opCode, void *payload)
+                        u32 opCode, void *payload, u32 responseQueue)
 {
        u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
-       u32 responseQueue = 0;
        void *pMessage;
 
-       if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) {
+       if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
+               &pMessage) < 0) {
                PM8001_IO_DBG(pm8001_ha,
                        pm8001_printk("No free mpi buffer\n"));
                return -1;
        }
        BUG_ON(!payload);
        /*Copy to the payload*/
-       memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr)));
+       memcpy(pMessage, payload, (pm8001_ha->iomb_size -
+                               sizeof(struct mpi_msg_hdr)));
 
        /*Build the header*/
        Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
@@ -1291,12 +1363,13 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
        pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
                circularQ->pi_offset, circularQ->producer_idx);
        PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx,
-               circularQ->consumer_index));
+               pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
+                       responseQueue, opCode, circularQ->producer_idx,
+                       circularQ->consumer_index));
        return 0;
 }
 
-static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
                            struct outbound_queue_table *circularQ, u8 bc)
 {
        u32 producer_index;
@@ -1305,7 +1378,7 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
 
        msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr));
        pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
-                               circularQ->consumer_idx * 64);
+                               circularQ->consumer_idx * pm8001_ha->iomb_size);
        if (pOutBoundMsgHeader != msgHeader) {
                PM8001_FAIL_DBG(pm8001_ha,
                        pm8001_printk("consumer_idx = %d msgHeader = %p\n",
@@ -1336,13 +1409,14 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
 }
 
 /**
- * mpi_msg_consume- get the MPI message from  outbound queue message table.
+ * pm8001_mpi_msg_consume- get the MPI message from outbound queue
+ * message table.
  * @pm8001_ha: our hba card information
  * @circularQ: the outbound queue  table.
  * @messagePtr1: the message contents of this outbound message.
  * @pBC: the message size.
  */
-static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
                           struct outbound_queue_table *circularQ,
                           void **messagePtr1, u8 *pBC)
 {
@@ -1356,7 +1430,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
                        /*Get the pointer to the circular queue buffer element*/
                        msgHeader = (struct mpi_msg_hdr *)
                                (circularQ->base_virt +
-                               circularQ->consumer_idx * 64);
+                               circularQ->consumer_idx * pm8001_ha->iomb_size);
                        /* read header */
                        header_tmp = pm8001_read_32(msgHeader);
                        msgHeader_tmp = cpu_to_le32(header_tmp);
@@ -1416,7 +1490,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
        return MPI_IO_STATUS_BUSY;
 }
 
-static void pm8001_work_fn(struct work_struct *work)
+void pm8001_work_fn(struct work_struct *work)
 {
        struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
        struct pm8001_device *pm8001_dev;
@@ -1431,7 +1505,7 @@ static void pm8001_work_fn(struct work_struct *work)
        pm8001_dev = pw->data; /* Most stash device structure */
        if ((pm8001_dev == NULL)
         || ((pw->handler != IO_XFER_ERROR_BREAK)
-         && (pm8001_dev->dev_type == NO_DEVICE))) {
+         && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
                kfree(pw);
                return;
        }
@@ -1596,7 +1670,7 @@ static void pm8001_work_fn(struct work_struct *work)
        }       break;
        case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
                dev = pm8001_dev->sas_device;
-               pm8001_I_T_nexus_reset(dev);
+               pm8001_I_T_nexus_event_handler(dev);
                break;
        case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
                dev = pm8001_dev->sas_device;
@@ -1614,7 +1688,7 @@ static void pm8001_work_fn(struct work_struct *work)
        kfree(pw);
 }
 
-static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
+int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
                               int handler)
 {
        struct pm8001_work *pw;
@@ -1633,6 +1707,123 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
        return ret;
 }
 
+static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct task_abort_req task_abort;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_ABORT;
+       int ret;
+
+       if (!pm8001_ha_dev) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+               return;
+       }
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
+                                               "allocate task\n"));
+               return;
+       }
+
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res)
+               return;
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       memset(&task_abort, 0, sizeof(task_abort));
+       task_abort.abort_all = cpu_to_le32(1);
+       task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       task_abort.tag = cpu_to_le32(ccb_tag);
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+
+}
+
+static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       struct sata_start_req sata_cmd;
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct host_to_dev_fis fis;
+       struct domain_device *dev;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate task !!!\n"));
+               return;
+       }
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate tag !!!\n"));
+               return;
+       }
+
+       /* allocate domain device by ourselves as libsas
+        * is not going to provide any
+       */
+       dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
+       if (!dev) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Domain device cannot be allocated\n"));
+               sas_free_task(task);
+               return;
+       } else {
+               task->dev = dev;
+               task->dev->lldd_dev = pm8001_ha_dev;
+       }
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+       pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
+       pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
+
+       memset(&sata_cmd, 0, sizeof(sata_cmd));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       /* construct read log FIS */
+       memset(&fis, 0, sizeof(struct host_to_dev_fis));
+       fis.fis_type = 0x27;
+       fis.flags = 0x80;
+       fis.command = ATA_CMD_READ_LOG_EXT;
+       fis.lbal = 0x10;
+       fis.sector_count = 0x1;
+
+       sata_cmd.tag = cpu_to_le32(ccb_tag);
+       sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
+       memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
+
+       res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+
+}
+
 /**
  * mpi_ssp_completion- process the event that FW response to the SSP request.
  * @pm8001_ha: our hba card information
@@ -1867,7 +2058,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
                break;
        }
        PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk("scsi_status = %x \n ",
+               pm8001_printk("scsi_status = %x\n ",
                psspPayload->ssp_resp_iu.status));
        spin_lock_irqsave(&t->task_state_lock, flags);
        t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
@@ -2096,16 +2287,44 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        status = le32_to_cpu(psataPayload->status);
        tag = le32_to_cpu(psataPayload->tag);
 
+       if (!tag) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("tag null\n"));
+               return;
+       }
        ccb = &pm8001_ha->ccb_info[tag];
        param = le32_to_cpu(psataPayload->param);
-       t = ccb->task;
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("ccb null\n"));
+               return;
+       }
+
+       if (t) {
+               if (t->dev && (t->dev->lldd_dev))
+                       pm8001_dev = t->dev->lldd_dev;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task null\n"));
+               return;
+       }
+
+       if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
+               && unlikely(!t || !t->lldd_task || !t->dev)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task or dev null\n"));
+               return;
+       }
+
        ts = &t->task_status;
-       pm8001_dev = ccb->device;
-       if (status)
+       if (!ts) {
                PM8001_FAIL_DBG(pm8001_ha,
-                       pm8001_printk("sata IO status 0x%x\n", status));
-       if (unlikely(!t || !t->lldd_task || !t->dev))
+                       pm8001_printk("ts null\n"));
                return;
+       }
 
        switch (status) {
        case IO_SUCCESS:
@@ -2113,6 +2332,19 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
                if (param == 0) {
                        ts->resp = SAS_TASK_COMPLETE;
                        ts->stat = SAM_STAT_GOOD;
+                       /* check if response is for SEND READ LOG */
+                       if (pm8001_dev &&
+                               (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
+                               /* set new bit for abort_all */
+                               pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
+                               /* clear bit for read log */
+                               pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+                               pm8001_send_abort_all(pm8001_ha, pm8001_dev);
+                               /* Free the tag */
+                               pm8001_tag_free(pm8001_ha, tag);
+                               sas_free_task(t);
+                               return;
+                       }
                } else {
                        u8 len;
                        ts->resp = SAS_TASK_COMPLETE;
@@ -2423,6 +2655,29 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        u32 dev_id = le32_to_cpu(psataPayload->device_id);
        unsigned long flags;
 
+       ccb = &pm8001_ha->ccb_info[tag];
+
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("No CCB !!!. returning\n"));
+       }
+       if (event)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("SATA EVENT 0x%x\n", event));
+
+       /* Check if this is NCQ error */
+       if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+               /* find device using device id */
+               pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
+               /* send read log extension */
+               if (pm8001_dev)
+                       pm8001_send_read_log(pm8001_ha, pm8001_dev);
+               return;
+       }
+
        ccb = &pm8001_ha->ccb_info[tag];
        t = ccb->task;
        pm8001_dev = ccb->device;
@@ -2432,9 +2687,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        if (unlikely(!t || !t->lldd_task || !t->dev))
                return;
        ts = &t->task_status;
-       PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk("port_id = %x,device_id = %x\n",
-               port_id, dev_id));
+       PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+               "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
+               port_id, dev_id, tag, event));
        switch (event) {
        case IO_OVERFLOW:
                PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
@@ -2822,8 +3077,8 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        }
 }
 
-static void
-mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+               void *piomb)
 {
        struct set_dev_state_resp *pPayload =
                (struct set_dev_state_resp *)(piomb + 4);
@@ -2843,8 +3098,7 @@ mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static void
-mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct get_nvm_data_resp *pPayload =
                (struct get_nvm_data_resp *)(piomb + 4);
@@ -2863,8 +3117,8 @@ mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static void
-mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void
+pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct fw_control_ex    *fw_control_context;
        struct get_nvm_data_resp *pPayload =
@@ -2925,7 +3179,7 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct local_phy_ctl_resp *pPayload =
                (struct local_phy_ctl_resp *)(piomb + 4);
@@ -2954,7 +3208,7 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
  * while receive a broadcast(change) primitive just tell the sas
  * layer to discover the changed domain rather than the whole domain.
  */
-static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
+void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
 {
        struct pm8001_phy *phy = &pm8001_ha->phy[i];
        struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -2988,7 +3242,7 @@ static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
 }
 
 /* Get the link rate speed  */
-static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
+void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
 {
        struct sas_phy *sas_phy = phy->sas_phy.phy;
 
@@ -3025,7 +3279,7 @@ static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
  * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
  * buffer.
  */
-static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
+void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
        u8 *sas_addr)
 {
        if (phy->sas_phy.frame_rcvd[0] == 0x34
@@ -3067,7 +3321,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
                ((phyId & 0x0F) << 4) | (port_id & 0x0F));
        payload.param0 = cpu_to_le32(param0);
        payload.param1 = cpu_to_le32(param1);
-       mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 }
 
 static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3112,19 +3366,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
                pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
                        PHY_NOTIFY_ENABLE_SPINUP);
                port->port_attached = 1;
-               get_lrate_mode(phy, link_rate);
+               pm8001_get_lrate_mode(phy, link_rate);
                break;
        case SAS_EDGE_EXPANDER_DEVICE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("expander device.\n"));
                port->port_attached = 1;
-               get_lrate_mode(phy, link_rate);
+               pm8001_get_lrate_mode(phy, link_rate);
                break;
        case SAS_FANOUT_EXPANDER_DEVICE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("fanout expander device.\n"));
                port->port_attached = 1;
-               get_lrate_mode(phy, link_rate);
+               pm8001_get_lrate_mode(phy, link_rate);
                break;
        default:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3179,7 +3433,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
                " phy id = %d\n", port_id, phy_id));
        port->port_state =  portstate;
        port->port_attached = 1;
-       get_lrate_mode(phy, link_rate);
+       pm8001_get_lrate_mode(phy, link_rate);
        phy->phy_type |= PORT_TYPE_SATA;
        phy->phy_attached = 1;
        phy->sas_phy.oob_mode = SATA_OOB_MODE;
@@ -3189,7 +3443,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
                sizeof(struct dev_to_host_fis));
        phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
        phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
-       phy->identify.device_type = SATA_DEV;
+       phy->identify.device_type = SAS_SATA_DEV;
        pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
        spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
        pm8001_bytes_dmaed(pm8001_ha, phy_id);
@@ -3260,7 +3514,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
 }
 
 /**
- * mpi_reg_resp -process register device ID response.
+ * pm8001_mpi_reg_resp -process register device ID response.
  * @pm8001_ha: our hba card information
  * @piomb: IO message buffer
  *
@@ -3269,7 +3523,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
  * has assigned, from now,inter-communication with FW is no longer using the
  * SAS address, use device ID which FW assigned.
  */
-static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        u32 status;
        u32 device_id;
@@ -3331,7 +3585,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        return 0;
 }
 
-static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        u32 status;
        u32 device_id;
@@ -3347,8 +3601,13 @@ static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        return 0;
 }
 
-static int
-mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+/**
+ * fw_flash_update_resp - Response from FW for flash update command.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+               void *piomb)
 {
        u32 status;
        struct fw_control_ex    fw_control_context;
@@ -3403,10 +3662,6 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
                break;
        }
        ccb->fw_control_context->fw_control->retcode = status;
-       pci_free_consistent(pm8001_ha->pdev,
-                       fw_control_context.len,
-                       fw_control_context.virtAddr,
-                       fw_control_context.phys_addr);
        complete(pm8001_ha->nvmd_completion);
        ccb->task = NULL;
        ccb->ccb_tag = 0xFFFFFFFF;
@@ -3414,8 +3669,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        return 0;
 }
 
-static int
-mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 {
        u32 status;
        int i;
@@ -3431,8 +3685,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        return 0;
 }
 
-static int
-mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct sas_task *t;
        struct pm8001_ccb_info *ccb;
@@ -3440,19 +3693,29 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        u32 status ;
        u32 tag, scp;
        struct task_status_struct *ts;
+       struct pm8001_device *pm8001_dev;
 
        struct task_abort_resp *pPayload =
                (struct task_abort_resp *)(piomb + 4);
 
        status = le32_to_cpu(pPayload->status);
        tag = le32_to_cpu(pPayload->tag);
+       if (!tag) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk(" TAG NULL. RETURNING !!!"));
+               return -1;
+       }
+
        scp = le32_to_cpu(pPayload->scp);
        ccb = &pm8001_ha->ccb_info[tag];
        t = ccb->task;
-       PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk(" status = 0x%x\n", status));
-       if (t == NULL)
+       pm8001_dev = ccb->device; /* retrieve device */
+
+       if (!t) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk(" TASK NULL. RETURNING !!!"));
                return -1;
+       }
        ts = &t->task_status;
        if (status != 0)
                PM8001_FAIL_DBG(pm8001_ha,
@@ -3476,7 +3739,15 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        spin_unlock_irqrestore(&t->task_state_lock, flags);
        pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
        mb();
-       t->task_done(t);
+
+       if ((pm8001_dev->id & NCQ_ABORT_ALL_FLAG) && t) {
+               pm8001_tag_free(pm8001_ha, tag);
+               sas_free_task(t);
+               /* clear the flag */
+               pm8001_dev->id &= 0xBFFFFFFF;
+       } else
+               t->task_done(t);
+
        return 0;
 }
 
@@ -3727,17 +3998,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_LOCAL_PHY_CNTRL:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
-               mpi_local_phy_ctl(pm8001_ha, piomb);
+               pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
                break;
        case OPC_OUB_DEV_REGIST:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_DEV_REGIST\n"));
-               mpi_reg_resp(pm8001_ha, piomb);
+               pm8001_mpi_reg_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_DEREG_DEV:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("unregister the device\n"));
-               mpi_dereg_resp(pm8001_ha, piomb);
+               pm8001_mpi_dereg_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GET_DEV_HANDLE:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3775,7 +4046,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_FW_FLASH_UPDATE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
-               mpi_fw_flash_update_resp(pm8001_ha, piomb);
+               pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GPIO_RESPONSE:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3788,17 +4059,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_GENERAL_EVENT:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
-               mpi_general_event(pm8001_ha, piomb);
+               pm8001_mpi_general_event(pm8001_ha, piomb);
                break;
        case OPC_OUB_SSP_ABORT_RSP:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
-               mpi_task_abort_resp(pm8001_ha, piomb);
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_SATA_ABORT_RSP:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
-               mpi_task_abort_resp(pm8001_ha, piomb);
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_SAS_DIAG_MODE_START_END:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3823,17 +4094,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_SMP_ABORT_RSP:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
-               mpi_task_abort_resp(pm8001_ha, piomb);
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GET_NVMD_DATA:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
-               mpi_get_nvmd_resp(pm8001_ha, piomb);
+               pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_SET_NVMD_DATA:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
-               mpi_set_nvmd_resp(pm8001_ha, piomb);
+               pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_DEVICE_HANDLE_REMOVAL:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3842,7 +4113,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_SET_DEVICE_STATE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
-               mpi_set_dev_state_resp(pm8001_ha, piomb);
+               pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GET_DEVICE_STATE:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3864,7 +4135,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        }
 }
 
-static int process_oq(struct pm8001_hba_info *pm8001_ha)
+static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
        struct outbound_queue_table *circularQ;
        void *pMsg1 = NULL;
@@ -3873,14 +4144,15 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
        unsigned long flags;
 
        spin_lock_irqsave(&pm8001_ha->lock, flags);
-       circularQ = &pm8001_ha->outbnd_q_tbl[0];
+       circularQ = &pm8001_ha->outbnd_q_tbl[vec];
        do {
-               ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+               ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
                if (MPI_IO_STATUS_SUCCESS == ret) {
                        /* process the outbound message */
                        process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
                        /* free the message from the outbound circular buffer */
-                       mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc);
+                       pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
+                                                       circularQ, bc);
                }
                if (MPI_IO_STATUS_BUSY == ret) {
                        /* Update the producer index from SPC */
@@ -3903,7 +4175,7 @@ static const u8 data_dir_flags[] = {
        [PCI_DMA_FROMDEVICE]    = DATA_DIR_IN,/* INBOUND */
        [PCI_DMA_NONE]          = DATA_DIR_NONE,/* NO TRANSFER */
 };
-static void
+void
 pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
 {
        int i;
@@ -3978,7 +4250,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
        smp_cmd.long_smp_req.long_resp_size =
                cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
        build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
-       mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
        return 0;
 
 err_out_2:
@@ -4042,7 +4314,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
                ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
                ssp_cmd.esgl = 0;
        }
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0);
        return ret;
 }
 
@@ -4060,6 +4332,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
        u32 ATAP = 0x0;
        u32 dir;
        struct inbound_queue_table *circularQ;
+       unsigned long flags;
        u32  opc = OPC_INB_SATA_HOST_OPSTART;
        memset(&sata_cmd, 0, sizeof(sata_cmd));
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
@@ -4080,8 +4353,10 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
                        PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
                }
        }
-       if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
+       if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
+               task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
                ncg_tag = hdr_tag;
+       }
        dir = data_dir_flags[task->data_dir] << 8;
        sata_cmd.tag = cpu_to_le32(tag);
        sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -4112,7 +4387,55 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
                sata_cmd.len = cpu_to_le32(task->total_xfer_len);
                sata_cmd.esgl = 0;
        }
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd);
+
+       /* Check for read log for failed drive and return */
+       if (sata_cmd.sata_fis.command == 0x2f) {
+               if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
+                       struct task_status_struct *ts;
+
+                       pm8001_ha_dev->id &= 0xDFFFFFFF;
+                       ts = &task->task_status;
+
+                       spin_lock_irqsave(&task->task_state_lock, flags);
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+                       task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+                       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+                       task->task_state_flags |= SAS_TASK_STATE_DONE;
+                       if (unlikely((task->task_state_flags &
+                                       SAS_TASK_STATE_ABORTED))) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("task 0x%p resp 0x%x "
+                                       " stat 0x%x but aborted by upper layer "
+                                       "\n", task, ts->resp, ts->stat));
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                       } else if (task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/* ditto */
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       } else if (!task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/*ditto*/
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       }
+               }
+       }
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
        return ret;
 }
 
@@ -4142,12 +4465,12 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
        payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
                LINKMODE_AUTO | LINKRATE_15 |
                LINKRATE_30 | LINKRATE_60 | phy_id);
-       payload.sas_identify.dev_type = SAS_END_DEV;
+       payload.sas_identify.dev_type = SAS_END_DEVICE;
        payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
        memcpy(payload.sas_identify.sas_addr,
                pm8001_ha->sas_addr, SAS_ADDR_SIZE);
        payload.sas_identify.phy_id = phy_id;
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
        return ret;
 }
 
@@ -4157,7 +4480,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
  * @num: the inbound queue number
  * @phy_id: the phy id which we wanted to start up.
  */
-static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
        u8 phy_id)
 {
        struct phy_stop_req payload;
@@ -4169,12 +4492,12 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
        memset(&payload, 0, sizeof(payload));
        payload.tag = cpu_to_le32(tag);
        payload.phy_id = cpu_to_le32(phy_id);
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
        return ret;
 }
 
 /**
- * see comments on mpi_reg_resp.
+ * see comments on pm8001_mpi_reg_resp.
  */
 static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u32 flag)
@@ -4204,11 +4527,11 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
        if (flag == 1)
                stp_sspsmp_sata = 0x02; /*direct attached sata */
        else {
-               if (pm8001_dev->dev_type == SATA_DEV)
+               if (pm8001_dev->dev_type == SAS_SATA_DEV)
                        stp_sspsmp_sata = 0x00; /* stp*/
-               else if (pm8001_dev->dev_type == SAS_END_DEV ||
-                       pm8001_dev->dev_type == EDGE_DEV ||
-                       pm8001_dev->dev_type == FANOUT_DEV)
+               else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
+                       pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                       pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                        stp_sspsmp_sata = 0x01; /*ssp or smp*/
        }
        if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
@@ -4228,14 +4551,14 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
                cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
        memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
                SAS_ADDR_SIZE);
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return rc;
 }
 
 /**
- * see comments on mpi_reg_resp.
+ * see comments on pm8001_mpi_reg_resp.
  */
-static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
        u32 device_id)
 {
        struct dereg_dev_req payload;
@@ -4249,7 +4572,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
        payload.device_id = cpu_to_le32(device_id);
        PM8001_MSG_DBG(pm8001_ha,
                pm8001_printk("unregister device device_id = %d\n", device_id));
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return ret;
 }
 
@@ -4272,7 +4595,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
        payload.tag = cpu_to_le32(1);
        payload.phyop_phyid =
                cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return ret;
 }
 
@@ -4296,11 +4619,11 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
  * @stat: stat.
  */
 static irqreturn_t
-pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
-       pm8001_chip_interrupt_disable(pm8001_ha);
-       process_oq(pm8001_ha);
-       pm8001_chip_interrupt_enable(pm8001_ha);
+       pm8001_chip_interrupt_disable(pm8001_ha, vec);
+       process_oq(pm8001_ha, vec);
+       pm8001_chip_interrupt_enable(pm8001_ha, vec);
        return IRQ_HANDLED;
 }
 
@@ -4322,7 +4645,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
                task_abort.device_id = cpu_to_le32(dev_id);
                task_abort.tag = cpu_to_le32(cmd_tag);
        }
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
        return ret;
 }
 
@@ -4331,16 +4654,17 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
  * @task: the task we wanted to aborted.
  * @flag: the abort flag.
  */
-static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
 {
        u32 opc, device_id;
        int rc = TMF_RESP_FUNC_FAILED;
-       PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag"
-               " = %x", cmd_tag, task_tag));
-       if (pm8001_dev->dev_type == SAS_END_DEV)
+       PM8001_EH_DBG(pm8001_ha,
+               pm8001_printk("cmd_tag = %x, abort task tag = 0x%x",
+                       cmd_tag, task_tag));
+       if (pm8001_dev->dev_type == SAS_END_DEVICE)
                opc = OPC_INB_SSP_ABORT;
-       else if (pm8001_dev->dev_type == SATA_DEV)
+       else if (pm8001_dev->dev_type == SAS_SATA_DEV)
                opc = OPC_INB_SATA_ABORT;
        else
                opc = OPC_INB_SMP_ABORT;/* SMP */
@@ -4358,7 +4682,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
  * @ccb: the ccb information.
  * @tmf: task management function.
  */
-static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
 {
        struct sas_task *task = ccb->task;
@@ -4376,11 +4700,11 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
        memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
        sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
        return ret;
 }
 
-static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        void *payload)
 {
        u32 opc = OPC_INB_GET_NVMD_DATA;
@@ -4397,7 +4721,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
        if (!fw_control_context)
                return -ENOMEM;
-       fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0];
+       fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
        fw_control_context->len = ioctl_payload->length;
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
        memset(&nvmd_req, 0, sizeof(nvmd_req));
@@ -4456,11 +4780,11 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        default:
                break;
        }
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
        return rc;
 }
 
-static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        void *payload)
 {
        u32 opc = OPC_INB_SET_NVMD_DATA;
@@ -4479,7 +4803,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
                return -ENOMEM;
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
        memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
-               ioctl_payload->func_specific,
+               &ioctl_payload->func_specific,
                ioctl_payload->length);
        memset(&nvmd_req, 0, sizeof(nvmd_req));
        rc = pm8001_tag_alloc(pm8001_ha, &tag);
@@ -4536,7 +4860,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        default:
                break;
        }
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
        return rc;
 }
 
@@ -4545,7 +4869,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
  * @pm8001_ha: our hba card information.
  * @fw_flash_updata_info: firmware flash update param
  */
-static int
+int
 pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
        void *fw_flash_updata_info, u32 tag)
 {
@@ -4567,11 +4891,11 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
                cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
        payload.sgl_addr_hi =
                cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return ret;
 }
 
-static int
+int
 pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        void *payload)
 {
@@ -4581,29 +4905,14 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        int rc;
        u32 tag;
        struct pm8001_ccb_info *ccb;
-       void *buffer = NULL;
-       dma_addr_t phys_addr;
-       u32 phys_addr_hi;
-       u32 phys_addr_lo;
+       void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr;
+       dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr;
        struct pm8001_ioctl_payload *ioctl_payload = payload;
 
        fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
        if (!fw_control_context)
                return -ENOMEM;
-       fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0];
-       if (fw_control->len != 0) {
-               if (pm8001_mem_alloc(pm8001_ha->pdev,
-                       (void **)&buffer,
-                       &phys_addr,
-                       &phys_addr_hi,
-                       &phys_addr_lo,
-                       fw_control->len, 0) != 0) {
-                               PM8001_FAIL_DBG(pm8001_ha,
-                                       pm8001_printk("Mem alloc failure\n"));
-                               kfree(fw_control_context);
-                               return -ENOMEM;
-               }
-       }
+       fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
        memcpy(buffer, fw_control->buffer, fw_control->len);
        flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
        flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@@ -4613,6 +4922,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        flash_update_info.total_image_len = fw_control->size;
        fw_control_context->fw_control = fw_control;
        fw_control_context->virtAddr = buffer;
+       fw_control_context->phys_addr = phys_addr;
        fw_control_context->len = fw_control->len;
        rc = pm8001_tag_alloc(pm8001_ha, &tag);
        if (rc) {
@@ -4627,7 +4937,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        return rc;
 }
 
-static int
+int
 pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u32 state)
 {
@@ -4648,7 +4958,7 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
        payload.tag = cpu_to_le32(tag);
        payload.device_id = cpu_to_le32(pm8001_dev->device_id);
        payload.nds = cpu_to_le32(state);
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return rc;
 
 }
@@ -4673,7 +4983,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
        payload.SSAHOLT = cpu_to_le32(0xd << 25);
        payload.sata_hol_tmo = cpu_to_le32(80);
        payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return rc;
 
 }
@@ -4706,4 +5016,3 @@ const struct pm8001_dispatch pm8001_8001_dispatch = {
        .set_dev_state_req      = pm8001_chip_set_dev_state_req,
        .sas_re_init_req        = pm8001_chip_sas_re_initialization,
 };
-
index d437309cb1e1c30de4f75aaafc7fb8dd0884a724..d7c1e2034226ea7667201a2a1474400f52994b5d 100644 (file)
 #define LINKRATE_30                    (0x02 << 8)
 #define LINKRATE_60                    (0x04 << 8)
 
+/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
+#define GSM_SM_BASE                    0x4F0000
 struct mpi_msg_hdr{
        __le32  header; /* Bits [11:0]  - Message operation code */
        /* Bits [15:12] - Message Category */
@@ -298,7 +300,7 @@ struct local_phy_ctl_resp {
 
 
 #define OP_BITS 0x0000FF00
-#define ID_BITS 0x0000000F
+#define ID_BITS 0x000000FF
 
 /*
  * brief the data structure of PORT Control Command
index 3d5e522e00fc8ae93a4c9344731f960fa42c0006..e4b9bc7f5410fa5e38a9df530cba183652f1962a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
 
 static struct scsi_transport_template *pm8001_stt;
 
+/**
+ * chip info structure to identify chip key functionality as
+ * encryption available/not, no of ports, hw specific function ref
+ */
 static const struct pm8001_chip_info pm8001_chips[] = {
-       [chip_8001] = {  8, &pm8001_8001_dispatch,},
+       [chip_8001] = {0,  8, &pm8001_8001_dispatch,},
+       [chip_8008] = {0,  8, &pm8001_80xx_dispatch,},
+       [chip_8009] = {1,  8, &pm8001_80xx_dispatch,},
+       [chip_8018] = {0,  16, &pm8001_80xx_dispatch,},
+       [chip_8019] = {1,  16, &pm8001_80xx_dispatch,},
 };
 static int pm8001_id;
 
@@ -155,37 +163,75 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
 }
 
 #ifdef PM8001_USE_TASKLET
+
+/**
+ * tasklet for 64 msi-x interrupt handler
+ * @opaque: the passed general host adapter struct
+ * Note: pm8001_tasklet is common for pm8001 & pm80xx
+ */
 static void pm8001_tasklet(unsigned long opaque)
 {
        struct pm8001_hba_info *pm8001_ha;
+       u32 vec;
        pm8001_ha = (struct pm8001_hba_info *)opaque;
        if (unlikely(!pm8001_ha))
                BUG_ON(1);
-       PM8001_CHIP_DISP->isr(pm8001_ha);
+       vec = pm8001_ha->int_vector;
+       PM8001_CHIP_DISP->isr(pm8001_ha, vec);
+}
+#endif
+
+static struct  pm8001_hba_info *outq_to_hba(u8 *outq)
+{
+       return container_of((outq - *outq), struct pm8001_hba_info, outq[0]);
 }
+
+/**
+ * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
+ * It obtains the vector number and calls the equivalent bottom
+ * half or services directly.
+ * @opaque: the passed outbound queue/vector. Host structure is
+ * retrieved from the same.
+ */
+static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
+{
+       struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque);
+       u8 outq = *(u8 *)opaque;
+       irqreturn_t ret = IRQ_HANDLED;
+       if (unlikely(!pm8001_ha))
+               return IRQ_NONE;
+       if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+               return IRQ_NONE;
+       pm8001_ha->int_vector = outq;
+#ifdef PM8001_USE_TASKLET
+       tasklet_schedule(&pm8001_ha->tasklet);
+#else
+       ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq);
 #endif
+       return ret;
+}
 
+/**
+ * pm8001_interrupt_handler_intx - main INTx interrupt handler.
+ * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
+ */
 
- /**
-  * pm8001_interrupt - when HBA originate a interrupt,we should invoke this
-  * dispatcher to handle each case.
-  * @irq: irq number.
-  * @opaque: the passed general host adapter struct
-  */
-static irqreturn_t pm8001_interrupt(int irq, void *opaque)
+static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
 {
        struct pm8001_hba_info *pm8001_ha;
        irqreturn_t ret = IRQ_HANDLED;
-       struct sas_ha_struct *sha = opaque;
+       struct sas_ha_struct *sha = dev_id;
        pm8001_ha = sha->lldd_ha;
        if (unlikely(!pm8001_ha))
                return IRQ_NONE;
        if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
                return IRQ_NONE;
+
+       pm8001_ha->int_vector = 0;
 #ifdef PM8001_USE_TASKLET
        tasklet_schedule(&pm8001_ha->tasklet);
 #else
-       ret = PM8001_CHIP_DISP->isr(pm8001_ha);
+       ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
 #endif
        return ret;
 }
@@ -195,10 +241,14 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque)
  * @pm8001_ha:our hba structure.
  *
  */
-static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
+                       const struct pci_device_id *ent)
 {
        int i;
        spin_lock_init(&pm8001_ha->lock);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("pm8001_alloc: PHY:%x\n",
+                               pm8001_ha->chip->n_phy));
        for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
                pm8001_phy_init(pm8001_ha, i);
                pm8001_ha->port[i].wide_port_phymap = 0;
@@ -222,30 +272,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
        pm8001_ha->memoryMap.region[IOP].alignment = 32;
 
-       /* MPI Memory region 3 for consumer Index of inbound queues */
-       pm8001_ha->memoryMap.region[CI].num_elements = 1;
-       pm8001_ha->memoryMap.region[CI].element_size = 4;
-       pm8001_ha->memoryMap.region[CI].total_len = 4;
-       pm8001_ha->memoryMap.region[CI].alignment = 4;
-
-       /* MPI Memory region 4 for producer Index of outbound queues */
-       pm8001_ha->memoryMap.region[PI].num_elements = 1;
-       pm8001_ha->memoryMap.region[PI].element_size = 4;
-       pm8001_ha->memoryMap.region[PI].total_len = 4;
-       pm8001_ha->memoryMap.region[PI].alignment = 4;
-
-       /* MPI Memory region 5 inbound queues */
-       pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE;
-       pm8001_ha->memoryMap.region[IB].element_size = 64;
-       pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64;
-       pm8001_ha->memoryMap.region[IB].alignment = 64;
-
-       /* MPI Memory region 6 outbound queues */
-       pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE;
-       pm8001_ha->memoryMap.region[OB].element_size = 64;
-       pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64;
-       pm8001_ha->memoryMap.region[OB].alignment = 64;
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+               /* MPI Memory region 3 for consumer Index of inbound queues */
+               pm8001_ha->memoryMap.region[CI+i].num_elements = 1;
+               pm8001_ha->memoryMap.region[CI+i].element_size = 4;
+               pm8001_ha->memoryMap.region[CI+i].total_len = 4;
+               pm8001_ha->memoryMap.region[CI+i].alignment = 4;
+
+               if ((ent->driver_data) != chip_8001) {
+                       /* MPI Memory region 5 inbound queues */
+                       pm8001_ha->memoryMap.region[IB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[IB+i].element_size = 128;
+                       pm8001_ha->memoryMap.region[IB+i].total_len =
+                                               PM8001_MPI_QUEUE * 128;
+                       pm8001_ha->memoryMap.region[IB+i].alignment = 128;
+               } else {
+                       pm8001_ha->memoryMap.region[IB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[IB+i].element_size = 64;
+                       pm8001_ha->memoryMap.region[IB+i].total_len =
+                                               PM8001_MPI_QUEUE * 64;
+                       pm8001_ha->memoryMap.region[IB+i].alignment = 64;
+               }
+       }
+
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+               /* MPI Memory region 4 for producer Index of outbound queues */
+               pm8001_ha->memoryMap.region[PI+i].num_elements = 1;
+               pm8001_ha->memoryMap.region[PI+i].element_size = 4;
+               pm8001_ha->memoryMap.region[PI+i].total_len = 4;
+               pm8001_ha->memoryMap.region[PI+i].alignment = 4;
+
+               if (ent->driver_data != chip_8001) {
+                       /* MPI Memory region 6 Outbound queues */
+                       pm8001_ha->memoryMap.region[OB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[OB+i].element_size = 128;
+                       pm8001_ha->memoryMap.region[OB+i].total_len =
+                                               PM8001_MPI_QUEUE * 128;
+                       pm8001_ha->memoryMap.region[OB+i].alignment = 128;
+               } else {
+                       /* MPI Memory region 6 Outbound queues */
+                       pm8001_ha->memoryMap.region[OB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[OB+i].element_size = 64;
+                       pm8001_ha->memoryMap.region[OB+i].total_len =
+                                               PM8001_MPI_QUEUE * 64;
+                       pm8001_ha->memoryMap.region[OB+i].alignment = 64;
+               }
 
+       }
        /* Memory region write DMA*/
        pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
        pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
@@ -264,6 +341,9 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
                sizeof(struct pm8001_ccb_info);
 
+       /* Memory region for fw flash */
+       pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
+
        for (i = 0; i < USI_MAX_MEMCNT; i++) {
                if (pm8001_mem_alloc(pm8001_ha->pdev,
                        &pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -281,7 +361,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
 
        pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
        for (i = 0; i < PM8001_MAX_DEVICES; i++) {
-               pm8001_ha->devices[i].dev_type = NO_DEVICE;
+               pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
                pm8001_ha->devices[i].id = i;
                pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
                pm8001_ha->devices[i].running_req = 0;
@@ -339,10 +419,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
                                ioremap(pm8001_ha->io_mem[logicalBar].membase,
                                pm8001_ha->io_mem[logicalBar].memsize);
                        PM8001_INIT_DBG(pm8001_ha,
-                               pm8001_printk("PCI: bar %d, logicalBar %d "
-                               "virt_addr=%lx,len=%d\n", bar, logicalBar,
-                               (unsigned long)
-                               pm8001_ha->io_mem[logicalBar].memvirtaddr,
+                               pm8001_printk("PCI: bar %d, logicalBar %d ",
+                               bar, logicalBar));
+                       PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                               "base addr %llx virt_addr=%llx len=%d\n",
+                               (u64)pm8001_ha->io_mem[logicalBar].membase,
+                               (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr,
                                pm8001_ha->io_mem[logicalBar].memsize));
                } else {
                        pm8001_ha->io_mem[logicalBar].membase   = 0;
@@ -361,8 +443,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
  * @shost: scsi host struct which has been initialized before.
  */
 static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
-                                               u32 chip_id,
-                                               struct Scsi_Host *shost)
+                                const struct pci_device_id *ent,
+                               struct Scsi_Host *shost)
+
 {
        struct pm8001_hba_info *pm8001_ha;
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -374,7 +457,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
 
        pm8001_ha->pdev = pdev;
        pm8001_ha->dev = &pdev->dev;
-       pm8001_ha->chip_id = chip_id;
+       pm8001_ha->chip_id = ent->driver_data;
        pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
        pm8001_ha->irq = pdev->irq;
        pm8001_ha->sas = sha;
@@ -382,12 +465,22 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
        pm8001_ha->id = pm8001_id++;
        pm8001_ha->logging_level = 0x01;
        sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
+       /* IOMB size is 128 for 8088/89 controllers */
+       if (pm8001_ha->chip_id != chip_8001)
+               pm8001_ha->iomb_size = IOMB_SIZE_SPCV;
+       else
+               pm8001_ha->iomb_size = IOMB_SIZE_SPC;
+
 #ifdef PM8001_USE_TASKLET
+       /**
+       * default tasklet for non msi-x interrupt handler/first msi-x
+       * interrupt handler
+       **/
        tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
-               (unsigned long)pm8001_ha);
+                       (unsigned long)pm8001_ha);
 #endif
        pm8001_ioremap(pm8001_ha);
-       if (!pm8001_alloc(pm8001_ha))
+       if (!pm8001_alloc(pm8001_ha, ent))
                return pm8001_ha;
        pm8001_free(pm8001_ha);
        return NULL;
@@ -512,21 +605,50 @@ static void  pm8001_post_sas_ha_init(struct Scsi_Host *shost,
  */
 static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
 {
-       u8 i;
+       u8 i, j;
 #ifdef PM8001_READ_VPD
+       /* For new SPC controllers WWN is stored in flash vpd
+       *  For SPC/SPCve controllers WWN is stored in EEPROM
+       *  For Older SPC WWN is stored in NVMD
+       */
        DECLARE_COMPLETION_ONSTACK(completion);
        struct pm8001_ioctl_payload payload;
+       u16 deviceid;
+       pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
        pm8001_ha->nvmd_completion = &completion;
-       payload.minor_function = 0;
-       payload.length = 128;
-       payload.func_specific = kzalloc(128, GFP_KERNEL);
+
+       if (pm8001_ha->chip_id == chip_8001) {
+               if (deviceid == 0x8081) {
+                       payload.minor_function = 4;
+                       payload.length = 4096;
+               } else {
+                       payload.minor_function = 0;
+                       payload.length = 128;
+               }
+       } else {
+               payload.minor_function = 1;
+               payload.length = 4096;
+       }
+       payload.offset = 0;
+       payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
        PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
        wait_for_completion(&completion);
+
+       for (i = 0, j = 0; i <= 7; i++, j++) {
+               if (pm8001_ha->chip_id == chip_8001) {
+                       if (deviceid == 0x8081)
+                               pm8001_ha->sas_addr[j] =
+                                       payload.func_specific[0x704 + i];
+               } else
+                       pm8001_ha->sas_addr[j] =
+                                       payload.func_specific[0x804 + i];
+       }
+
        for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
-               memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
-                       SAS_ADDR_SIZE);
+               memcpy(&pm8001_ha->phy[i].dev_sas_addr,
+                       pm8001_ha->sas_addr, SAS_ADDR_SIZE);
                PM8001_INIT_DBG(pm8001_ha,
-                       pm8001_printk("phy %d sas_addr = %016llx \n", i,
+                       pm8001_printk("phy %d sas_addr = %016llx\n", i,
                        pm8001_ha->phy[i].dev_sas_addr));
        }
 #else
@@ -547,31 +669,50 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
  * @chip_info: our ha struct.
  * @irq_handler: irq_handler
  */
-static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
-       irq_handler_t irq_handler)
+static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
 {
        u32 i = 0, j = 0;
-       u32 number_of_intr = 1;
+       u32 number_of_intr;
        int flag = 0;
        u32 max_entry;
        int rc;
+       static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
+
+       /* SPCv controllers supports 64 msi-x */
+       if (pm8001_ha->chip_id == chip_8001) {
+               number_of_intr = 1;
+               flag |= IRQF_DISABLED;
+       } else {
+               number_of_intr = PM8001_MAX_MSIX_VEC;
+               flag &= ~IRQF_SHARED;
+               flag |= IRQF_DISABLED;
+       }
+
        max_entry = sizeof(pm8001_ha->msix_entries) /
                sizeof(pm8001_ha->msix_entries[0]);
-       flag |= IRQF_DISABLED;
        for (i = 0; i < max_entry ; i++)
                pm8001_ha->msix_entries[i].entry = i;
        rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
                number_of_intr);
        pm8001_ha->number_of_intr = number_of_intr;
        if (!rc) {
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "pci_enable_msix request ret:%d no of intr %d\n",
+                                       rc, pm8001_ha->number_of_intr));
+
+               for (i = 0; i < number_of_intr; i++)
+                       pm8001_ha->outq[i] = i;
+
                for (i = 0; i < number_of_intr; i++) {
+                       snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
+                                       DRV_NAME"%d", i);
                        if (request_irq(pm8001_ha->msix_entries[i].vector,
-                               irq_handler, flag, DRV_NAME,
-                               SHOST_TO_SAS_HA(pm8001_ha->shost))) {
+                               pm8001_interrupt_handler_msix, flag,
+                               intr_drvname[i], &pm8001_ha->outq[i])) {
                                for (j = 0; j < i; j++)
                                        free_irq(
                                        pm8001_ha->msix_entries[j].vector,
-                                       SHOST_TO_SAS_HA(pm8001_ha->shost));
+                                       &pm8001_ha->outq[j]);
                                pci_disable_msix(pm8001_ha->pdev);
                                break;
                        }
@@ -588,22 +729,24 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
 static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
 {
        struct pci_dev *pdev;
-       irq_handler_t irq_handler = pm8001_interrupt;
        int rc;
 
        pdev = pm8001_ha->pdev;
 
 #ifdef PM8001_USE_MSIX
        if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
-               return pm8001_setup_msix(pm8001_ha, irq_handler);
-       else
+               return pm8001_setup_msix(pm8001_ha);
+       else {
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("MSIX not supported!!!\n"));
                goto intx;
+       }
 #endif
 
 intx:
        /* initialize the INT-X interrupt */
-       rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
-               SHOST_TO_SAS_HA(pm8001_ha->shost));
+       rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
+               DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
        return rc;
 }
 
@@ -621,12 +764,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
 {
        unsigned int rc;
        u32     pci_reg;
+       u8      i = 0;
        struct pm8001_hba_info *pm8001_ha;
        struct Scsi_Host *shost = NULL;
        const struct pm8001_chip_info *chip;
 
        dev_printk(KERN_INFO, &pdev->dev,
-               "pm8001: driver version %s\n", DRV_VERSION);
+               "pm80xx: driver version %s\n", DRV_VERSION);
        rc = pci_enable_device(pdev);
        if (rc)
                goto err_out_enable;
@@ -665,25 +809,39 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
                goto err_out_free;
        }
        pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
-       pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost);
+       /* ent->driver variable is used to differentiate between controllers */
+       pm8001_ha = pm8001_pci_alloc(pdev, ent, shost);
        if (!pm8001_ha) {
                rc = -ENOMEM;
                goto err_out_free;
        }
        list_add_tail(&pm8001_ha->list, &hba_list);
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
        rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
-       if (rc)
+       if (rc) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "chip_init failed [ret: %d]\n", rc));
                goto err_out_ha_free;
+       }
 
        rc = scsi_add_host(shost, &pdev->dev);
        if (rc)
                goto err_out_ha_free;
        rc = pm8001_request_irq(pm8001_ha);
-       if (rc)
+       if (rc) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "pm8001_request_irq failed [ret: %d]\n", rc));
                goto err_out_shost;
+       }
+
+       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
+       if (pm8001_ha->chip_id != chip_8001) {
+               for (i = 1; i < pm8001_ha->number_of_intr; i++)
+                       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
+               /* setup thermal configuration. */
+               pm80xx_set_thermal_config(pm8001_ha);
+       }
 
-       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
        pm8001_init_sas_add(pm8001_ha);
        pm8001_post_sas_ha_init(shost, chip);
        rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
@@ -719,14 +877,15 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
        sas_remove_host(pm8001_ha->shost);
        list_del(&pm8001_ha->list);
        scsi_remove_host(pm8001_ha->shost);
-       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 
 #ifdef PM8001_USE_MSIX
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
                synchronize_irq(pm8001_ha->msix_entries[i].vector);
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               free_irq(pm8001_ha->msix_entries[i].vector, sha);
+               free_irq(pm8001_ha->msix_entries[i].vector,
+                               &pm8001_ha->outq[i]);
        pci_disable_msix(pdev);
 #else
        free_irq(pm8001_ha->irq, sha);
@@ -763,13 +922,14 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                printk(KERN_ERR " PCI PM not supported\n");
                return -ENODEV;
        }
-       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 #ifdef PM8001_USE_MSIX
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
                synchronize_irq(pm8001_ha->msix_entries[i].vector);
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               free_irq(pm8001_ha->msix_entries[i].vector, sha);
+               free_irq(pm8001_ha->msix_entries[i].vector,
+                               &pm8001_ha->outq[i]);
        pci_disable_msix(pdev);
 #else
        free_irq(pm8001_ha->irq, sha);
@@ -798,6 +958,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
        struct pm8001_hba_info *pm8001_ha;
        int rc;
+       u8 i = 0;
        u32 device_state;
        pm8001_ha = sha->lldd_ha;
        device_state = pdev->current_state;
@@ -820,19 +981,33 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
        if (rc)
                goto err_out_disable;
 
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       /* chip soft rst only for spc */
+       if (pm8001_ha->chip_id == chip_8001) {
+               PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("chip soft reset successful\n"));
+       }
        rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
        if (rc)
                goto err_out_disable;
-       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
+
+       /* disable all the interrupt bits */
+       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+
        rc = pm8001_request_irq(pm8001_ha);
        if (rc)
                goto err_out_disable;
-       #ifdef PM8001_USE_TASKLET
+#ifdef PM8001_USE_TASKLET
+       /* default tasklet for non msi-x interrupt handler/first msi-x
+       * interrupt handler */
        tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
-                   (unsigned long)pm8001_ha);
-       #endif
-       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
+                       (unsigned long)pm8001_ha);
+#endif
+       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
+       if (pm8001_ha->chip_id != chip_8001) {
+               for (i = 1; i < pm8001_ha->number_of_intr; i++)
+                       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
+       }
        scsi_unblock_requests(pm8001_ha->shost);
        return 0;
 
@@ -843,14 +1018,45 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
        return rc;
 }
 
+/* update of pci device, vendor id and driver data with
+ * unique value for each of the controller
+ */
 static struct pci_device_id pm8001_pci_table[] = {
-       {
-               PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
-       },
+       { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
        {
                PCI_DEVICE(0x117c, 0x0042),
                .driver_data = chip_8001
        },
+       /* Support for SPC/SPCv/SPCve controllers */
+       { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
        {} /* terminate list */
 };
 
@@ -870,7 +1076,7 @@ static int __init pm8001_init(void)
 {
        int rc = -ENOMEM;
 
-       pm8001_wq = alloc_workqueue("pm8001", 0, 0);
+       pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
        if (!pm8001_wq)
                goto err;
 
@@ -902,7 +1108,8 @@ module_init(pm8001_init);
 module_exit(pm8001_exit);
 
 MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
-MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver");
+MODULE_DESCRIPTION(
+               "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver");
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
index b961112395d5e053145ba6f83ed4bf7e5058c5c5..a85d73de7c80c730260362ae56a7688189459ee6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -68,7 +68,7 @@ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag)
        clear_bit(tag, bitmap);
 }
 
-static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
+void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
 {
        pm8001_tag_clear(pm8001_ha, tag);
 }
@@ -212,10 +212,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
                break;
        case PHY_FUNC_GET_EVENTS:
                spin_lock_irqsave(&pm8001_ha->lock, flags);
-               if (-1 == pm8001_bar4_shift(pm8001_ha,
+               if (pm8001_ha->chip_id == chip_8001) {
+                       if (-1 == pm8001_bar4_shift(pm8001_ha,
                                        (phy_id < 4) ? 0x30000 : 0x40000)) {
-                       spin_unlock_irqrestore(&pm8001_ha->lock, flags);
-                       return -EINVAL;
+                               spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+                               return -EINVAL;
+                       }
                }
                {
                        struct sas_phy *phy = sas_phy->phy;
@@ -228,7 +230,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
                        phy->loss_of_dword_sync_count = qp[3];
                        phy->phy_reset_problem_count = qp[4];
                }
-               pm8001_bar4_shift(pm8001_ha, 0);
+               if (pm8001_ha->chip_id == chip_8001)
+                       pm8001_bar4_shift(pm8001_ha, 0);
                spin_unlock_irqrestore(&pm8001_ha->lock, flags);
                return 0;
        default:
@@ -249,7 +252,9 @@ void pm8001_scan_start(struct Scsi_Host *shost)
        struct pm8001_hba_info *pm8001_ha;
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        pm8001_ha = sha->lldd_ha;
-       PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
+       /* SAS_RE_INITIALIZATION not available in SPCv/ve */
+       if (pm8001_ha->chip_id == chip_8001)
+               PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
        for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
                PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
 }
@@ -352,7 +357,7 @@ static int sas_find_local_port_id(struct domain_device *dev)
   * @tmf: the task management IU
   */
 #define DEV_IS_GONE(pm8001_dev)        \
-       ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)))
+       ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
 static int pm8001_task_exec(struct sas_task *task, const int num,
        gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
 {
@@ -370,7 +375,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
                struct task_status_struct *tsm = &t->task_status;
                tsm->resp = SAS_TASK_UNDELIVERED;
                tsm->stat = SAS_PHY_DOWN;
-               if (dev->dev_type != SATA_DEV)
+               if (dev->dev_type != SAS_SATA_DEV)
                        t->task_done(t);
                return 0;
        }
@@ -548,7 +553,7 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
 {
        u32 dev;
        for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
-               if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) {
+               if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
                        pm8001_ha->devices[dev].id = dev;
                        return &pm8001_ha->devices[dev];
                }
@@ -560,13 +565,31 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
        }
        return NULL;
 }
+/**
+  * pm8001_find_dev - find a matching pm8001_device
+  * @pm8001_ha: our hba card information
+  */
+struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
+                                       u32 device_id)
+{
+       u32 dev;
+       for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
+               if (pm8001_ha->devices[dev].device_id == device_id)
+                       return &pm8001_ha->devices[dev];
+       }
+       if (dev == PM8001_MAX_DEVICES) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
+                               "DEVICE FOUND !!!\n"));
+       }
+       return NULL;
+}
 
 static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
 {
        u32 id = pm8001_dev->id;
        memset(pm8001_dev, 0, sizeof(*pm8001_dev));
        pm8001_dev->id = id;
-       pm8001_dev->dev_type = NO_DEVICE;
+       pm8001_dev->dev_type = SAS_PHY_UNUSED;
        pm8001_dev->device_id = PM8001_MAX_DEVICES;
        pm8001_dev->sas_device = NULL;
 }
@@ -624,7 +647,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
                        res = -1;
                }
        } else {
-               if (dev->dev_type == SATA_DEV) {
+               if (dev->dev_type == SAS_SATA_DEV) {
                        pm8001_device->attached_phy =
                                dev->rphy->identify.phy_identifier;
                                flag = 1; /* directly sata*/
@@ -634,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
        PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
        spin_unlock_irqrestore(&pm8001_ha->lock, flags);
        wait_for_completion(&completion);
-       if (dev->dev_type == SAS_END_DEV)
+       if (dev->dev_type == SAS_END_DEVICE)
                msleep(50);
        pm8001_ha->flags = PM8001F_RUN_TIME;
        return 0;
@@ -648,7 +671,7 @@ int pm8001_dev_found(struct domain_device *dev)
        return pm8001_dev_found_notify(dev);
 }
 
-static void pm8001_task_done(struct sas_task *task)
+void pm8001_task_done(struct sas_task *task)
 {
        if (!del_timer(&task->slow_task->timer))
                return;
@@ -904,7 +927,7 @@ void pm8001_open_reject_retry(
                struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
 
                pm8001_dev = ccb->device;
-               if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))
+               if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
                        continue;
                if (!device_to_close) {
                        uintptr_t d = (uintptr_t)pm8001_dev
@@ -995,6 +1018,72 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
        return rc;
 }
 
+/*
+* This function handle the IT_NEXUS_XXX event or completion
+* status code for SSP/SATA/SMP I/O request.
+*/
+int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
+{
+       int rc = TMF_RESP_FUNC_FAILED;
+       struct pm8001_device *pm8001_dev;
+       struct pm8001_hba_info *pm8001_ha;
+       struct sas_phy *phy;
+       u32 device_id = 0;
+
+       if (!dev || !dev->lldd_dev)
+               return -1;
+
+       pm8001_dev = dev->lldd_dev;
+       device_id = pm8001_dev->device_id;
+       pm8001_ha = pm8001_find_ha_by_dev(dev);
+
+       PM8001_EH_DBG(pm8001_ha,
+                       pm8001_printk("I_T_Nexus handler invoked !!"));
+
+       phy = sas_get_local_phy(dev);
+
+       if (dev_is_sata(dev)) {
+               DECLARE_COMPLETION_ONSTACK(completion_setstate);
+               if (scsi_is_sas_phy_local(phy)) {
+                       rc = 0;
+                       goto out;
+               }
+               /* send internal ssp/sata/smp abort command to FW */
+               rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+                                                       dev, 1, 0);
+               msleep(100);
+
+               /* deregister the target device */
+               pm8001_dev_gone_notify(dev);
+               msleep(200);
+
+               /*send phy reset to hard reset target */
+               rc = sas_phy_reset(phy, 1);
+               msleep(2000);
+               pm8001_dev->setds_completion = &completion_setstate;
+
+               wait_for_completion(&completion_setstate);
+       } else {
+               /* send internal ssp/sata/smp abort command to FW */
+               rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+                                                       dev, 1, 0);
+               msleep(100);
+
+               /* deregister the target device */
+               pm8001_dev_gone_notify(dev);
+               msleep(200);
+
+               /*send phy reset to hard reset target */
+               rc = sas_phy_reset(phy, 1);
+               msleep(2000);
+       }
+       PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
+               pm8001_dev->device_id, rc));
+out:
+       sas_put_local_phy(phy);
+
+       return rc;
+}
 /* mandatory SAM-3, the task reset the specified LUN*/
 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
 {
index 11008205aeb331c9b86be09deb8c26733cd133b2..570819464d90b40568477eac1e4c283ae10c3169 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -57,8 +57,8 @@
 #include <linux/atomic.h>
 #include "pm8001_defs.h"
 
-#define DRV_NAME               "pm8001"
-#define DRV_VERSION            "0.1.36"
+#define DRV_NAME               "pm80xx"
+#define DRV_VERSION            "0.1.37"
 #define PM8001_FAIL_LOGGING    0x01 /* Error message logging */
 #define PM8001_INIT_LOGGING    0x02 /* driver init logging */
 #define PM8001_DISC_LOGGING    0x04 /* discovery layer logging */
@@ -66,8 +66,8 @@
 #define PM8001_EH_LOGGING      0x10 /* libsas EH function logging*/
 #define PM8001_IOCTL_LOGGING   0x20 /* IOCTL message logging */
 #define PM8001_MSG_LOGGING     0x40 /* misc message logging */
-#define pm8001_printk(format, arg...)  printk(KERN_INFO "%s %d:" format,\
-                               __func__, __LINE__, ## arg)
+#define pm8001_printk(format, arg...)  printk(KERN_INFO "pm80xx %s %d:" \
+                       format, __func__, __LINE__, ## arg)
 #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD)  \
 do {                                           \
        if (unlikely(HBA->logging_level & LEVEL))       \
@@ -103,11 +103,12 @@ do {                                              \
 #define PM8001_READ_VPD
 
 
-#define DEV_IS_EXPANDER(type)  ((type == EDGE_DEV) || (type == FANOUT_DEV))
+#define DEV_IS_EXPANDER(type)  ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
 
 #define PM8001_NAME_LENGTH             32/* generic length of strings */
 extern struct list_head hba_list;
 extern const struct pm8001_dispatch pm8001_8001_dispatch;
+extern const struct pm8001_dispatch pm8001_80xx_dispatch;
 
 struct pm8001_hba_info;
 struct pm8001_ccb_info;
@@ -131,15 +132,15 @@ struct pm8001_ioctl_payload {
 struct pm8001_dispatch {
        char *name;
        int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
-       int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature);
+       int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
        void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
        int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
        void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
-       irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha);
+       irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
        u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
-       int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha);
-       void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha);
-       void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha);
+       int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+       void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+       void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
        void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
        int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
                struct pm8001_ccb_info *ccb);
@@ -173,6 +174,7 @@ struct pm8001_dispatch {
 };
 
 struct pm8001_chip_info {
+       u32     encrypt;
        u32     n_phy;
        const struct pm8001_dispatch    *dispatch;
 };
@@ -204,7 +206,7 @@ struct pm8001_phy {
 };
 
 struct pm8001_device {
-       enum sas_dev_type       dev_type;
+       enum sas_device_type    dev_type;
        struct domain_device    *sas_device;
        u32                     attached_phy;
        u32                     id;
@@ -256,7 +258,20 @@ struct mpi_mem_req {
        struct mpi_mem          region[USI_MAX_MEMCNT];
 };
 
-struct main_cfg_table {
+struct encrypt {
+       u32     cipher_mode;
+       u32     sec_mode;
+       u32     status;
+       u32     flag;
+};
+
+struct sas_phy_attribute_table {
+       u32     phystart1_16[16];
+       u32     outbound_hw_event_pid1_16[16];
+};
+
+union main_cfg_table {
+       struct {
        u32                     signature;
        u32                     interface_rev;
        u32                     firmware_rev;
@@ -292,19 +307,69 @@ struct main_cfg_table {
        u32                     fatal_err_dump_length1;
        u32                     hda_mode_flag;
        u32                     anolog_setup_table_offset;
+       u32                     rsvd[4];
+       } pm8001_tbl;
+
+       struct {
+       u32                     signature;
+       u32                     interface_rev;
+       u32                     firmware_rev;
+       u32                     max_out_io;
+       u32                     max_sgl;
+       u32                     ctrl_cap_flag;
+       u32                     gst_offset;
+       u32                     inbound_queue_offset;
+       u32                     outbound_queue_offset;
+       u32                     inbound_q_nppd_hppd;
+       u32                     rsvd[8];
+       u32                     crc_core_dump;
+       u32                     rsvd1;
+       u32                     upper_event_log_addr;
+       u32                     lower_event_log_addr;
+       u32                     event_log_size;
+       u32                     event_log_severity;
+       u32                     upper_pcs_event_log_addr;
+       u32                     lower_pcs_event_log_addr;
+       u32                     pcs_event_log_size;
+       u32                     pcs_event_log_severity;
+       u32                     fatal_err_interrupt;
+       u32                     fatal_err_dump_offset0;
+       u32                     fatal_err_dump_length0;
+       u32                     fatal_err_dump_offset1;
+       u32                     fatal_err_dump_length1;
+       u32                     gpio_led_mapping;
+       u32                     analog_setup_table_offset;
+       u32                     int_vec_table_offset;
+       u32                     phy_attr_table_offset;
+       u32                     port_recovery_timer;
+       u32                     interrupt_reassertion_delay;
+       } pm80xx_tbl;
 };
-struct general_status_table {
+
+union general_status_table {
+       struct {
        u32                     gst_len_mpistate;
        u32                     iq_freeze_state0;
        u32                     iq_freeze_state1;
        u32                     msgu_tcnt;
        u32                     iop_tcnt;
-       u32                     reserved;
+       u32                     rsvd;
        u32                     phy_state[8];
-       u32                     reserved1;
-       u32                     reserved2;
-       u32                     reserved3;
+       u32                     gpio_input_val;
+       u32                     rsvd1[2];
+       u32                     recover_err_info[8];
+       } pm8001_tbl;
+       struct {
+       u32                     gst_len_mpistate;
+       u32                     iq_freeze_state0;
+       u32                     iq_freeze_state1;
+       u32                     msgu_tcnt;
+       u32                     iop_tcnt;
+       u32                     rsvd[9];
+       u32                     gpio_input_val;
+       u32                     rsvd1[2];
        u32                     recover_err_info[8];
+       } pm80xx_tbl;
 };
 struct inbound_queue_table {
        u32                     element_pri_size_cnt;
@@ -351,15 +416,21 @@ struct pm8001_hba_info {
        struct device           *dev;
        struct pm8001_hba_memspace io_mem[6];
        struct mpi_mem_req      memoryMap;
+       struct encrypt          encrypt_info; /* support encryption */
        void __iomem    *msg_unit_tbl_addr;/*Message Unit Table Addr*/
        void __iomem    *main_cfg_tbl_addr;/*Main Config Table Addr*/
        void __iomem    *general_stat_tbl_addr;/*General Status Table Addr*/
        void __iomem    *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
        void __iomem    *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
-       struct main_cfg_table   main_cfg_tbl;
-       struct general_status_table     gs_tbl;
-       struct inbound_queue_table      inbnd_q_tbl[PM8001_MAX_INB_NUM];
-       struct outbound_queue_table     outbnd_q_tbl[PM8001_MAX_OUTB_NUM];
+       void __iomem    *pspa_q_tbl_addr;
+                       /*MPI SAS PHY attributes Queue Config Table Addr*/
+       void __iomem    *ivt_tbl_addr; /*MPI IVT Table Addr */
+       union main_cfg_table    main_cfg_tbl;
+       union general_status_table      gs_tbl;
+       struct inbound_queue_table      inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
+       struct outbound_queue_table     outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM];
+       struct sas_phy_attribute_table  phy_attr_table;
+                                       /* MPI SAS PHY attributes */
        u8                      sas_addr[SAS_ADDR_SIZE];
        struct sas_ha_struct    *sas;/* SCSI/SAS glue */
        struct Scsi_Host        *shost;
@@ -372,10 +443,12 @@ struct pm8001_hba_info {
        struct pm8001_port      port[PM8001_MAX_PHYS];
        u32                     id;
        u32                     irq;
+       u32                     iomb_size; /* SPC and SPCV IOMB size */
        struct pm8001_device    *devices;
        struct pm8001_ccb_info  *ccb_info;
 #ifdef PM8001_USE_MSIX
-       struct msix_entry       msix_entries[16];/*for msi-x interrupt*/
+       struct msix_entry       msix_entries[PM8001_MAX_MSIX_VEC];
+                                       /*for msi-x interrupt*/
        int                     number_of_intr;/*will be used in remove()*/
 #endif
 #ifdef PM8001_USE_TASKLET
@@ -383,7 +456,10 @@ struct pm8001_hba_info {
 #endif
        u32                     logging_level;
        u32                     fw_status;
+       u32                     smp_exp_mode;
+       u32                     int_vector;
        const struct firmware   *fw_image;
+       u8                      outq[PM8001_MAX_MSIX_VEC];
 };
 
 struct pm8001_work {
@@ -419,6 +495,9 @@ struct pm8001_fw_image_header {
 #define FLASH_UPDATE_DNLD_NOT_SUPPORTED                0x10
 #define FLASH_UPDATE_DISABLED                  0x11
 
+#define        NCQ_READ_LOG_FLAG                       0x80000000
+#define        NCQ_ABORT_ALL_FLAG                      0x40000000
+#define        NCQ_2ND_RLE_FLAG                        0x20000000
 /**
  * brief param structure for firmware flash update.
  */
@@ -484,6 +563,7 @@ int pm8001_dev_found(struct domain_device *dev);
 void pm8001_dev_gone(struct domain_device *dev);
 int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
 int pm8001_I_T_nexus_reset(struct domain_device *dev);
+int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
 int pm8001_query_task(struct sas_task *task);
 void pm8001_open_reject_retry(
        struct pm8001_hba_info *pm8001_ha,
@@ -493,6 +573,61 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
        dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
        u32 mem_size, u32 align);
 
+void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
+int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+                       struct inbound_queue_table *circularQ,
+                       u32 opCode, void *payload, u32 responseQueue);
+int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
+                               u16 messageSize, void **messagePtr);
+u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+                       struct outbound_queue_table *circularQ, u8 bc);
+u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+                       struct outbound_queue_table *circularQ,
+                       void **messagePtr1, u8 *pBC);
+int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
+                       struct pm8001_device *pm8001_dev, u32 state);
+int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+                                       void *payload);
+int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
+                                       void *fw_flash_updata_info, u32 tag);
+int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
+int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
+int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+                               struct pm8001_ccb_info *ccb,
+                               struct pm8001_tmf_task *tmf);
+int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+                               struct pm8001_device *pm8001_dev,
+                               u8 flag, u32 task_tag, u32 cmd_tag);
+int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
+void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
+void pm8001_work_fn(struct work_struct *work);
+int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha,
+                                       void *data, int handler);
+void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate);
+void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr);
+void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i);
+int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb);
+int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+struct sas_task *pm8001_alloc_task(void);
+void pm8001_task_done(struct sas_task *task);
+void pm8001_free_task(struct sas_task *task);
+void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
+struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
+                                       u32 device_id);
+int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
+
 int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
 
 /* ctl shared API */
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
new file mode 100644 (file)
index 0000000..302514d
--- /dev/null
@@ -0,0 +1,4130 @@
+/*
+ * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 PMC-Sierra, Inc.,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+ #include <linux/slab.h>
+ #include "pm8001_sas.h"
+ #include "pm80xx_hwi.h"
+ #include "pm8001_chips.h"
+ #include "pm8001_ctl.h"
+
+#define SMP_DIRECT 1
+#define SMP_INDIRECT 2
+/**
+ * read_main_config_table - read the configure table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature    =
+               pm8001_mr32(address, MAIN_SIGNATURE_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev =
+               pm8001_mr32(address, MAIN_INTERFACE_REVISION);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev =
+               pm8001_mr32(address, MAIN_FW_REVISION);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io   =
+               pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl      =
+               pm8001_mr32(address, MAIN_MAX_SGL_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag =
+               pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset   =
+               pm8001_mr32(address, MAIN_GST_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset =
+               pm8001_mr32(address, MAIN_IBQ_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset =
+               pm8001_mr32(address, MAIN_OBQ_OFFSET);
+
+       /* read Error Dump Offset and Length */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
+
+       /* read GPIO LED settings from the configuration table */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping =
+               pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET);
+
+       /* read analog Setting offset from the configuration table */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset =
+               pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
+
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset =
+               pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
+               pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
+}
+
+/**
+ * read_general_status_table - read the general status table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->general_stat_tbl_addr;
+       pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate   =
+                       pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0   =
+                       pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1   =
+                       pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt          =
+                       pm8001_mr32(address, GST_MSGUTCNT_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt           =
+                       pm8001_mr32(address, GST_IOPTCNT_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val     =
+                       pm8001_mr32(address, GST_GPIO_INPUT_VAL);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET0);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET1);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET2);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET3);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET4);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET5);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET6);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] =
+                        pm8001_mr32(address, GST_RERRINFO_OFFSET7);
+}
+/**
+ * read_phy_attr_table - read the phy attribute table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->pspa_q_tbl_addr;
+       pm8001_ha->phy_attr_table.phystart1_16[0] =
+                       pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[1] =
+                       pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[2] =
+                       pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[3] =
+                       pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[4] =
+                       pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[5] =
+                       pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[6] =
+                       pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[7] =
+                       pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[8] =
+                       pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[9] =
+                       pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[10] =
+                       pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[11] =
+                       pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[12] =
+                       pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[13] =
+                       pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[14] =
+                       pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[15] =
+                       pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET);
+
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET);
+
+}
+
+/**
+ * read_inbnd_queue_table - read the inbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+       void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+               u32 offset = i * 0x20;
+               pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
+                       get_pci_bar_index(pm8001_mr32(address,
+                               (offset + IB_PIPCI_BAR)));
+               pm8001_ha->inbnd_q_tbl[i].pi_offset =
+                       pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET));
+       }
+}
+
+/**
+ * read_outbnd_queue_table - read the outbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+       void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+               u32 offset = i * 0x24;
+               pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
+                       get_pci_bar_index(pm8001_mr32(address,
+                               (offset + OB_CIPCI_BAR)));
+               pm8001_ha->outbnd_q_tbl[i].ci_offset =
+                       pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET));
+       }
+}
+
+/**
+ * init_default_table_values - init the default table.
+ * @pm8001_ha: our hba card information
+ */
+static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+       u32 offsetib, offsetob;
+       void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
+       void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
+
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr         =
+               pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr         =
+               pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size               =
+                                                       PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity           = 0x01;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr     =
+               pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr     =
+               pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size           =
+                                                       PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity       = 0x01;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt          = 0x01;
+
+       /* Disable end to end CRC checking */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
+
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+               pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt  =
+                       PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
+               pm8001_ha->inbnd_q_tbl[i].upper_base_addr       =
+                       pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
+               pm8001_ha->inbnd_q_tbl[i].lower_base_addr       =
+               pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
+               pm8001_ha->inbnd_q_tbl[i].base_virt             =
+                       (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
+               pm8001_ha->inbnd_q_tbl[i].total_length          =
+                       pm8001_ha->memoryMap.region[IB + i].total_len;
+               pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr    =
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
+               pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr    =
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
+               pm8001_ha->inbnd_q_tbl[i].ci_virt               =
+                       pm8001_ha->memoryMap.region[CI + i].virt_ptr;
+               offsetib = i * 0x20;
+               pm8001_ha->inbnd_q_tbl[i].pi_pci_bar            =
+                       get_pci_bar_index(pm8001_mr32(addressib,
+                               (offsetib + 0x14)));
+               pm8001_ha->inbnd_q_tbl[i].pi_offset             =
+                       pm8001_mr32(addressib, (offsetib + 0x18));
+               pm8001_ha->inbnd_q_tbl[i].producer_idx          = 0;
+               pm8001_ha->inbnd_q_tbl[i].consumer_index        = 0;
+       }
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+               pm8001_ha->outbnd_q_tbl[i].element_size_cnt     =
+                       PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
+               pm8001_ha->outbnd_q_tbl[i].upper_base_addr      =
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
+               pm8001_ha->outbnd_q_tbl[i].lower_base_addr      =
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
+               pm8001_ha->outbnd_q_tbl[i].base_virt            =
+                       (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
+               pm8001_ha->outbnd_q_tbl[i].total_length         =
+                       pm8001_ha->memoryMap.region[OB + i].total_len;
+               pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr   =
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
+               pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr   =
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
+               /* interrupt vector based on oq */
+               pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24);
+               pm8001_ha->outbnd_q_tbl[i].pi_virt              =
+                       pm8001_ha->memoryMap.region[PI + i].virt_ptr;
+               offsetob = i * 0x24;
+               pm8001_ha->outbnd_q_tbl[i].ci_pci_bar           =
+                       get_pci_bar_index(pm8001_mr32(addressob,
+                       offsetob + 0x14));
+               pm8001_ha->outbnd_q_tbl[i].ci_offset            =
+                       pm8001_mr32(addressob, (offsetob + 0x18));
+               pm8001_ha->outbnd_q_tbl[i].consumer_idx         = 0;
+               pm8001_ha->outbnd_q_tbl[i].producer_index       = 0;
+       }
+}
+
+/**
+ * update_main_config_table - update the main default table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+       pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd);
+       pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr);
+       pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr);
+       pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
+       pm8001_mw32(address, MAIN_EVENT_LOG_OPTION,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
+       pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
+       pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
+
+       /* SPCv specific */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF;
+       /* Set GPIOLED to 0x2 for LED indicator */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
+       pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
+
+       pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
+       pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
+}
+
+/**
+ * update_inbnd_queue_table - update the inbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+                                        int number)
+{
+       void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+       u16 offset = number * 0x20;
+       pm8001_mw32(address, offset + IB_PROPERITY_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
+       pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
+       pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
+       pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
+       pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
+}
+
+/**
+ * update_outbnd_queue_table - update the outbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+                                                int number)
+{
+       void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+       u16 offset = number * 0x24;
+       pm8001_mw32(address, offset + OB_PROPERITY_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
+       pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
+       pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
+       pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
+       pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
+       pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
+}
+
+/**
+ * mpi_init_check - check firmware initialization status.
+ * @pm8001_ha: our hba card information
+ */
+static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 max_wait_count;
+       u32 value;
+       u32 gst_len_mpistate;
+
+       /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
+       table is updated */
+       pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
+       /* wait until Inbound DoorBell Clear Register toggled */
+       max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+               value &= SPCv_MSGU_CFG_TABLE_UPDATE;
+       } while ((value != 0) && (--max_wait_count));
+
+       if (!max_wait_count)
+               return -1;
+       /* check the MPI-State for initialization upto 100ms*/
+       max_wait_count = 100 * 1000;/* 100 msec */
+       do {
+               udelay(1);
+               gst_len_mpistate =
+                       pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+                                       GST_GSTLEN_MPIS_OFFSET);
+       } while ((GST_MPI_STATE_INIT !=
+               (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count));
+       if (!max_wait_count)
+               return -1;
+
+       /* check MPI Initialization error */
+       gst_len_mpistate = gst_len_mpistate >> 16;
+       if (0x0000 != gst_len_mpistate)
+               return -1;
+
+       return 0;
+}
+
+/**
+ * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
+ * @pm8001_ha: our hba card information
+ */
+static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 value;
+       u32 max_wait_count;
+       u32 max_wait_time;
+       int ret = 0;
+
+       /* reset / PCIe ready */
+       max_wait_time = max_wait_count = 100 * 1000;    /* 100 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while ((value == 0xFFFFFFFF) && (--max_wait_count));
+
+       /* check ila status */
+       max_wait_time = max_wait_count = 1000 * 1000;   /* 1000 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while (((value & SCRATCH_PAD_ILA_READY) !=
+                       SCRATCH_PAD_ILA_READY) && (--max_wait_count));
+       if (!max_wait_count)
+               ret = -1;
+       else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" ila ready status in %d millisec\n",
+                               (max_wait_time - max_wait_count)));
+       }
+
+       /* check RAAE status */
+       max_wait_time = max_wait_count = 1800 * 1000;   /* 1800 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while (((value & SCRATCH_PAD_RAAE_READY) !=
+                               SCRATCH_PAD_RAAE_READY) && (--max_wait_count));
+       if (!max_wait_count)
+               ret = -1;
+       else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" raae ready status in %d millisec\n",
+                                       (max_wait_time - max_wait_count)));
+       }
+
+       /* check iop0 status */
+       max_wait_time = max_wait_count = 600 * 1000;    /* 600 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) &&
+                       (--max_wait_count));
+       if (!max_wait_count)
+               ret = -1;
+       else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" iop0 ready status in %d millisec\n",
+                               (max_wait_time - max_wait_count)));
+       }
+
+       /* check iop1 status only for 16 port controllers */
+       if ((pm8001_ha->chip_id != chip_8008) &&
+                       (pm8001_ha->chip_id != chip_8009)) {
+               /* 200 milli sec */
+               max_wait_time = max_wait_count = 200 * 1000;
+               do {
+                       udelay(1);
+                       value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+               } while (((value & SCRATCH_PAD_IOP1_READY) !=
+                               SCRATCH_PAD_IOP1_READY) && (--max_wait_count));
+               if (!max_wait_count)
+                       ret = -1;
+               else {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "iop1 ready status in %d millisec\n",
+                               (max_wait_time - max_wait_count)));
+               }
+       }
+
+       return ret;
+}
+
+static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *base_addr;
+       u32     value;
+       u32     offset;
+       u32     pcibar;
+       u32     pcilogic;
+
+       value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+       offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
+                               offset, value));
+       pcilogic = (value & 0xFC000000) >> 26;
+       pcibar = get_pci_bar_index(pcilogic);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
+       pm8001_ha->main_cfg_tbl_addr = base_addr =
+               pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
+       pm8001_ha->general_stat_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) &
+                                       0xFFFFFF);
+       pm8001_ha->inbnd_q_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) &
+                                       0xFFFFFF);
+       pm8001_ha->outbnd_q_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) &
+                                       0xFFFFFF);
+       pm8001_ha->ivt_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) &
+                                       0xFFFFFF);
+       pm8001_ha->pspa_q_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
+                                       0xFFFFFF);
+
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("GST OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("INBND OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("OBND OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("IVT OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("PSPA OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("addr - main cfg %p general status %p\n",
+                       pm8001_ha->main_cfg_tbl_addr,
+                       pm8001_ha->general_stat_tbl_addr));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("addr - inbnd %p obnd %p\n",
+                       pm8001_ha->inbnd_q_tbl_addr,
+                       pm8001_ha->outbnd_q_tbl_addr));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("addr - pspa %p ivt %p\n",
+                       pm8001_ha->pspa_q_tbl_addr,
+                       pm8001_ha->ivt_tbl_addr));
+}
+
+/**
+ * pm80xx_set_thermal_config - support the thermal configuration
+ * @pm8001_ha: our hba card information.
+ */
+int
+pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
+{
+       struct set_ctrl_cfg_req payload;
+       struct inbound_queue_table *circularQ;
+       int rc;
+       u32 tag;
+       u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+
+       memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               return -1;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+       payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
+                       (THERMAL_ENABLE << 8) | THERMAL_OP_CODE;
+       payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+       return rc;
+
+}
+
+/**
+* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol
+* Timer configuration page
+* @pm8001_ha: our hba card information.
+*/
+static int
+pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
+{
+       struct set_ctrl_cfg_req payload;
+       struct inbound_queue_table *circularQ;
+       SASProtocolTimerConfig_t SASConfigPage;
+       int rc;
+       u32 tag;
+       u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+
+       memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
+       memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t));
+
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+
+       if (rc)
+               return -1;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+
+       SASConfigPage.pageCode        =  SAS_PROTOCOL_TIMER_CONFIG_PAGE;
+       SASConfigPage.MST_MSI         =  3 << 15;
+       SASConfigPage.STP_SSP_MCT_TMO =  (STP_MCT_TMO << 16) | SSP_MCT_TMO;
+       SASConfigPage.STP_FRM_TMO     = (SAS_MAX_OPEN_TIME << 24) |
+                               (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
+       SASConfigPage.STP_IDLE_TMO    =  STP_IDLE_TIME;
+
+       if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
+               SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
+
+
+       SASConfigPage.OPNRJT_RTRY_INTVL =         (SAS_MFD << 16) |
+                                               SAS_OPNRJT_RTRY_INTVL;
+       SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO =  (SAS_DOPNRJT_RTRY_TMO << 16)
+                                               | SAS_COPNRJT_RTRY_TMO;
+       SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR =  (SAS_DOPNRJT_RTRY_THR << 16)
+                                               | SAS_COPNRJT_RTRY_THR;
+       SASConfigPage.MAX_AIP =  SAS_MAX_AIP;
+
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.pageCode "
+                       "0x%08x\n", SASConfigPage.pageCode));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.MST_MSI "
+                       " 0x%08x\n", SASConfigPage.MST_MSI));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO "
+                       " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.STP_FRM_TMO "
+                       " 0x%08x\n", SASConfigPage.STP_FRM_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.STP_IDLE_TMO "
+                       " 0x%08x\n", SASConfigPage.STP_IDLE_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL "
+                       " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO "
+                       " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR "
+                       " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
+       PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP "
+                       " 0x%08x\n", SASConfigPage.MAX_AIP));
+
+       memcpy(&payload.cfg_pg, &SASConfigPage,
+                        sizeof(SASProtocolTimerConfig_t));
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+       return rc;
+}
+
+/**
+ * pm80xx_get_encrypt_info - Check for encryption
+ * @pm8001_ha: our hba card information.
+ */
+static int
+pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 scratch3_value;
+       int ret;
+
+       /* Read encryption status from SCRATCH PAD 3 */
+       scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
+
+       if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+                                       SCRATCH_PAD3_ENC_READY) {
+               if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+                       pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                               SCRATCH_PAD3_SMF_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                               SCRATCH_PAD3_SMA_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                               SCRATCH_PAD3_SMB_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+               pm8001_ha->encrypt_info.status = 0;
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X."
+                       "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
+                       scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+                       pm8001_ha->encrypt_info.sec_mode,
+                       pm8001_ha->encrypt_info.status));
+               ret = 0;
+       } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) ==
+                                       SCRATCH_PAD3_ENC_DISABLED) {
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
+                       scratch3_value));
+               pm8001_ha->encrypt_info.status = 0xFFFFFFFF;
+               pm8001_ha->encrypt_info.cipher_mode = 0;
+               pm8001_ha->encrypt_info.sec_mode = 0;
+               return 0;
+       } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+                               SCRATCH_PAD3_ENC_DIS_ERR) {
+               pm8001_ha->encrypt_info.status =
+                       (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
+               if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+                       pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMF_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMA_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMB_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X."
+                       "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+                       scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+                       pm8001_ha->encrypt_info.sec_mode,
+                       pm8001_ha->encrypt_info.status));
+               ret = -1;
+       } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+                                SCRATCH_PAD3_ENC_ENA_ERR) {
+
+               pm8001_ha->encrypt_info.status =
+                       (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
+               if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+                       pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMF_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMA_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMB_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X."
+                       "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+                       scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+                       pm8001_ha->encrypt_info.sec_mode,
+                       pm8001_ha->encrypt_info.status));
+               ret = -1;
+       }
+       return ret;
+}
+
+/**
+ * pm80xx_encrypt_update - update flash with encryption informtion
+ * @pm8001_ha: our hba card information.
+ */
+static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
+{
+       struct kek_mgmt_req payload;
+       struct inbound_queue_table *circularQ;
+       int rc;
+       u32 tag;
+       u32 opc = OPC_INB_KEK_MANAGEMENT;
+
+       memset(&payload, 0, sizeof(struct kek_mgmt_req));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               return -1;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+       /* Currently only one key is used. New KEK index is 1.
+        * Current KEK index is 1. Store KEK to NVRAM is 1.
+        */
+       payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
+                                       KEK_MGMT_SUBOP_KEYCARDUPDATE);
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+       return rc;
+}
+
+/**
+ * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
+ * @pm8001_ha: our hba card information
+ */
+static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
+{
+       int ret;
+       u8 i = 0;
+
+       /* check the firmware status */
+       if (-1 == check_fw_ready(pm8001_ha)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Firmware is not ready!\n"));
+               return -EBUSY;
+       }
+
+       /* Initialize pci space address eg: mpi offset */
+       init_pci_device_addresses(pm8001_ha);
+       init_default_table_values(pm8001_ha);
+       read_main_config_table(pm8001_ha);
+       read_general_status_table(pm8001_ha);
+       read_inbnd_queue_table(pm8001_ha);
+       read_outbnd_queue_table(pm8001_ha);
+       read_phy_attr_table(pm8001_ha);
+
+       /* update main config table ,inbound table and outbound table */
+       update_main_config_table(pm8001_ha);
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++)
+               update_inbnd_queue_table(pm8001_ha, i);
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++)
+               update_outbnd_queue_table(pm8001_ha, i);
+
+       /* notify firmware update finished and check initialization status */
+       if (0 == mpi_init_check(pm8001_ha)) {
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("MPI initialize successful!\n"));
+       } else
+               return -EBUSY;
+
+       /* send SAS protocol timer configuration page to FW */
+       ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
+
+       /* Check for encryption */
+       if (pm8001_ha->chip->encrypt) {
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("Checking for encryption\n"));
+               ret = pm80xx_get_encrypt_info(pm8001_ha);
+               if (ret == -1) {
+                       PM8001_INIT_DBG(pm8001_ha,
+                               pm8001_printk("Encryption error !!\n"));
+                       if (pm8001_ha->encrypt_info.status == 0x81) {
+                               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                                       "Encryption enabled with error."
+                                       "Saving encryption key to flash\n"));
+                               pm80xx_encrypt_update(pm8001_ha);
+                       }
+               }
+       }
+       return 0;
+}
+
+static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 max_wait_count;
+       u32 value;
+       u32 gst_len_mpistate;
+       init_pci_device_addresses(pm8001_ha);
+       /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
+       table is stop */
+       pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
+
+       /* wait until Inbound DoorBell Clear Register toggled */
+       max_wait_count = 2 * 1000 * 1000;       /* 2 sec for spcv/ve */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+               value &= SPCv_MSGU_CFG_TABLE_RESET;
+       } while ((value != 0) && (--max_wait_count));
+
+       if (!max_wait_count) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("TIMEOUT:IBDB value/=%x\n", value));
+               return -1;
+       }
+
+       /* check the MPI-State for termination in progress */
+       /* wait until Inbound DoorBell Clear Register toggled */
+       max_wait_count = 2 * 1000 * 1000;       /* 2 sec for spcv/ve */
+       do {
+               udelay(1);
+               gst_len_mpistate =
+                       pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+                       GST_GSTLEN_MPIS_OFFSET);
+               if (GST_MPI_STATE_UNINIT ==
+                       (gst_len_mpistate & GST_MPI_STATE_MASK))
+                       break;
+       } while (--max_wait_count);
+       if (!max_wait_count) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk(" TIME OUT MPI State = 0x%x\n",
+                               gst_len_mpistate & GST_MPI_STATE_MASK));
+               return -1;
+       }
+
+       return 0;
+}
+
+/**
+ * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
+ * the FW register status to the originated status.
+ * @pm8001_ha: our hba card information
+ */
+
+static int
+pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 regval;
+       u32 bootloader_state;
+
+       /* Check if MPI is in ready state to reset */
+       if (mpi_uninit_check(pm8001_ha) != 0) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("MPI state is not ready\n"));
+               return -1;
+       }
+
+       /* checked for reset register normal state; 0x0 */
+       regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("reset register before write : 0x%x\n", regval));
+
+       pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
+       mdelay(500);
+
+       regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
+       PM8001_INIT_DBG(pm8001_ha,
+       pm8001_printk("reset register after write 0x%x\n", regval));
+
+       if ((regval & SPCv_SOFT_RESET_READ_MASK) ==
+                       SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" soft reset successful [regval: 0x%x]\n",
+                                       regval));
+       } else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" soft reset failed [regval: 0x%x]\n",
+                                       regval));
+
+               /* check bootloader is successfully executed or in HDA mode */
+               bootloader_state =
+                       pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
+                       SCRATCH_PAD1_BOOTSTATE_MASK;
+
+               if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state - HDA mode SEEPROM\n"));
+               } else if (bootloader_state ==
+                               SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state - HDA mode Bootstrap Pin\n"));
+               } else if (bootloader_state ==
+                               SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state - HDA mode soft reset\n"));
+               } else if (bootloader_state ==
+                                       SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state-HDA mode critical error\n"));
+               }
+               return -EBUSY;
+       }
+
+       /* check the firmware status after reset */
+       if (-1 == check_fw_ready(pm8001_ha)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Firmware is not ready!\n"));
+               return -EBUSY;
+       }
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("SPCv soft reset Complete\n"));
+       return 0;
+}
+
+static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
+{
+        u32 i;
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("chip reset start\n"));
+
+       /* do SPCv chip reset. */
+       pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("SPC soft reset Complete\n"));
+
+       /* Check this ..whether delay is required or no */
+       /* delay 10 usec */
+       udelay(10);
+
+       /* wait for 20 msec until the firmware gets reloaded */
+       i = 20;
+       do {
+               mdelay(1);
+       } while ((--i) != 0);
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("chip reset finished\n"));
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+{
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+}
+
+/**
+ * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+{
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+       u32 mask;
+       mask = (u32)(1 << vec);
+
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+       return;
+#endif
+       pm80xx_chip_intx_interrupt_enable(pm8001_ha);
+
+}
+
+/**
+ * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+       u32 mask;
+       if (vec == 0xFF)
+               mask = 0xFFFFFFFF;
+       else
+               mask = (u32)(1 << vec);
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+       return;
+#endif
+       pm80xx_chip_intx_interrupt_disable(pm8001_ha);
+}
+
+static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct task_abort_req task_abort;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_ABORT;
+       int ret;
+
+       if (!pm8001_ha_dev) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+               return;
+       }
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
+                                               "allocate task\n"));
+               return;
+       }
+
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res)
+               return;
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       memset(&task_abort, 0, sizeof(task_abort));
+       task_abort.abort_all = cpu_to_le32(1);
+       task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       task_abort.tag = cpu_to_le32(ccb_tag);
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+
+}
+
+static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       struct sata_start_req sata_cmd;
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct host_to_dev_fis fis;
+       struct domain_device *dev;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate task !!!\n"));
+               return;
+       }
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate tag !!!\n"));
+               return;
+       }
+
+       /* allocate domain device by ourselves as libsas
+        * is not going to provide any
+       */
+       dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
+       if (!dev) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Domain device cannot be allocated\n"));
+               sas_free_task(task);
+               return;
+       } else {
+               task->dev = dev;
+               task->dev->lldd_dev = pm8001_ha_dev;
+       }
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+       pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
+       pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
+
+       memset(&sata_cmd, 0, sizeof(sata_cmd));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       /* construct read log FIS */
+       memset(&fis, 0, sizeof(struct host_to_dev_fis));
+       fis.fis_type = 0x27;
+       fis.flags = 0x80;
+       fis.command = ATA_CMD_READ_LOG_EXT;
+       fis.lbal = 0x10;
+       fis.sector_count = 0x1;
+
+       sata_cmd.tag = cpu_to_le32(ccb_tag);
+       sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
+       memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
+
+       res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+
+}
+
+/**
+ * mpi_ssp_completion- process the event that FW response to the SSP request.
+ * @pm8001_ha: our hba card information
+ * @piomb: the message contents of this outbound message.
+ *
+ * When FW has completed a ssp request for example a IO request, after it has
+ * filled the SG data with the data, it will trigger this event represent
+ * that he has finished the job,please check the coresponding buffer.
+ * So we will tell the caller who maybe waiting the result to tell upper layer
+ * that the task has been finished.
+ */
+static void
+mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+       struct sas_task *t;
+       struct pm8001_ccb_info *ccb;
+       unsigned long flags;
+       u32 status;
+       u32 param;
+       u32 tag;
+       struct ssp_completion_resp *psspPayload;
+       struct task_status_struct *ts;
+       struct ssp_response_iu *iu;
+       struct pm8001_device *pm8001_dev;
+       psspPayload = (struct ssp_completion_resp *)(piomb + 4);
+       status = le32_to_cpu(psspPayload->status);
+       tag = le32_to_cpu(psspPayload->tag);
+       ccb = &pm8001_ha->ccb_info[tag];
+       if ((status == IO_ABORTED) && ccb->open_retry) {
+               /* Being completed by another */
+               ccb->open_retry = 0;
+               return;
+       }
+       pm8001_dev = ccb->device;
+       param = le32_to_cpu(psspPayload->param);
+       t = ccb->task;
+
+       if (status && status != IO_UNDERFLOW)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("sas IO status 0x%x\n", status));
+       if (unlikely(!t || !t->lldd_task || !t->dev))
+               return;
+       ts = &t->task_status;
+       switch (status) {
+       case IO_SUCCESS:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_SUCCESS ,param = 0x%x\n",
+                               param));
+               if (param == 0) {
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+               } else {
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAS_PROTO_RESPONSE;
+                       ts->residual = param;
+                       iu = &psspPayload->ssp_resp_iu;
+                       sas_ssp_task_response(pm8001_ha->dev, t, iu);
+               }
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ABORTED IOMB Tag\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               break;
+       case IO_UNDERFLOW:
+               /* SSP Completion with error */
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n",
+                               param));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               ts->residual = param;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_NO_DEVICE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_NO_DEVICE\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_PHY_DOWN;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               /* Force the midlayer to retry */
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               if (!t->uldd_task)
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_DMA:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_XFER_ERROR_DMA\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_OFFSET_MISMATCH:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_PORT_IN_RESET:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_PORT_IN_RESET\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_DS_NON_OPERATIONAL:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               if (!t->uldd_task)
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_DS_NON_OPERATIONAL);
+               break;
+       case IO_DS_IN_RECOVERY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_RECOVERY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_TM_TAG_NOT_FOUND:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", status));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       }
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("scsi_status = 0x%x\n ",
+               psspPayload->ssp_resp_iu.status));
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "task 0x%p done with io_status 0x%x resp 0x%x "
+                       "stat 0x%x but aborted by upper layer!\n",
+                       t, status, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* in order to force CPU ordering */
+               t->task_done(t);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+       struct sas_task *t;
+       unsigned long flags;
+       struct task_status_struct *ts;
+       struct pm8001_ccb_info *ccb;
+       struct pm8001_device *pm8001_dev;
+       struct ssp_event_resp *psspPayload =
+               (struct ssp_event_resp *)(piomb + 4);
+       u32 event = le32_to_cpu(psspPayload->event);
+       u32 tag = le32_to_cpu(psspPayload->tag);
+       u32 port_id = le32_to_cpu(psspPayload->port_id);
+
+       ccb = &pm8001_ha->ccb_info[tag];
+       t = ccb->task;
+       pm8001_dev = ccb->device;
+       if (event)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("sas IO status 0x%x\n", event));
+       if (unlikely(!t || !t->lldd_task || !t->dev))
+               return;
+       ts = &t->task_status;
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
+                               port_id, tag, event));
+       switch (event) {
+       case IO_OVERFLOW:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               ts->residual = 0;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
+               return;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               if (!t->uldd_task)
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
+               return;
+       case IO_XFER_ERROR_UNEXPECTED_PHASE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_OFFSET_MISMATCH:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+               /* TBC: used default set values */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_CMD_FRAME_ISSUED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+               return;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", event));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "task 0x%p done with event 0x%x resp 0x%x "
+                       "stat 0x%x but aborted by upper layer!\n",
+                       t, event, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* in order to force CPU ordering */
+               t->task_done(t);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct sas_task *t;
+       struct pm8001_ccb_info *ccb;
+       u32 param;
+       u32 status;
+       u32 tag;
+       struct sata_completion_resp *psataPayload;
+       struct task_status_struct *ts;
+       struct ata_task_resp *resp ;
+       u32 *sata_resp;
+       struct pm8001_device *pm8001_dev;
+       unsigned long flags;
+
+       psataPayload = (struct sata_completion_resp *)(piomb + 4);
+       status = le32_to_cpu(psataPayload->status);
+       tag = le32_to_cpu(psataPayload->tag);
+
+       if (!tag) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("tag null\n"));
+               return;
+       }
+       ccb = &pm8001_ha->ccb_info[tag];
+       param = le32_to_cpu(psataPayload->param);
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("ccb null\n"));
+               return;
+       }
+
+       if (t) {
+               if (t->dev && (t->dev->lldd_dev))
+                       pm8001_dev = t->dev->lldd_dev;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task null\n"));
+               return;
+       }
+
+       if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
+               && unlikely(!t || !t->lldd_task || !t->dev)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task or dev null\n"));
+               return;
+       }
+
+       ts = &t->task_status;
+       if (!ts) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("ts null\n"));
+               return;
+       }
+
+       switch (status) {
+       case IO_SUCCESS:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+               if (param == 0) {
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+                       /* check if response is for SEND READ LOG */
+                       if (pm8001_dev &&
+                               (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
+                               /* set new bit for abort_all */
+                               pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
+                               /* clear bit for read log */
+                               pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+                               pm80xx_send_abort_all(pm8001_ha, pm8001_dev);
+                               /* Free the tag */
+                               pm8001_tag_free(pm8001_ha, tag);
+                               sas_free_task(t);
+                               return;
+                       }
+               } else {
+                       u8 len;
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAS_PROTO_RESPONSE;
+                       ts->residual = param;
+                       PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
+                               param));
+                       sata_resp = &psataPayload->sata_resp[0];
+                       resp = (struct ata_task_resp *)ts->buf;
+                       if (t->ata_task.dma_xfer == 0 &&
+                       t->data_dir == PCI_DMA_FROMDEVICE) {
+                               len = sizeof(struct pio_setup_fis);
+                               PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("PIO read len = %d\n", len));
+                       } else if (t->ata_task.use_ncq) {
+                               len = sizeof(struct set_dev_bits_fis);
+                               PM8001_IO_DBG(pm8001_ha,
+                                       pm8001_printk("FPDMA len = %d\n", len));
+                       } else {
+                               len = sizeof(struct dev_to_host_fis);
+                               PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("other len = %d\n", len));
+                       }
+                       if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
+                               resp->frame_len = len;
+                               memcpy(&resp->ending_fis[0], sata_resp, len);
+                               ts->buf_valid_size = sizeof(*resp);
+                       } else
+                               PM8001_IO_DBG(pm8001_ha,
+                                       pm8001_printk("response to large\n"));
+               }
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ABORTED IOMB Tag\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+               /* following cases are to do cases */
+       case IO_UNDERFLOW:
+               /* SATA Completion with error */
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_UNDERFLOW param = %d\n", param));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               ts->residual = param;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_NO_DEVICE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_NO_DEVICE\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_PHY_DOWN;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_INTERRUPTED;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*in order to force CPU ordering*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/* ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_DMA:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_DMA\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               break;
+       case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_PORT_IN_RESET:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_PORT_IN_RESET\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_DS_NON_OPERATIONAL:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha, pm8001_dev,
+                                       IO_DS_NON_OPERATIONAL);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_DS_IN_RECOVERY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_RECOVERY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_DS_IN_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_ERROR\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha, pm8001_dev,
+                                       IO_DS_IN_ERROR);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", status));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task 0x%p done with io_status 0x%x"
+                       " resp 0x%x stat 0x%x but aborted by upper layer!\n",
+                       t, status, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else if (t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* ditto */
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       } else if (!t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/*ditto*/
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+       struct sas_task *t;
+       struct task_status_struct *ts;
+       struct pm8001_ccb_info *ccb;
+       struct pm8001_device *pm8001_dev;
+       struct sata_event_resp *psataPayload =
+               (struct sata_event_resp *)(piomb + 4);
+       u32 event = le32_to_cpu(psataPayload->event);
+       u32 tag = le32_to_cpu(psataPayload->tag);
+       u32 port_id = le32_to_cpu(psataPayload->port_id);
+       u32 dev_id = le32_to_cpu(psataPayload->device_id);
+       unsigned long flags;
+
+       ccb = &pm8001_ha->ccb_info[tag];
+
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("No CCB !!!. returning\n"));
+               return;
+       }
+       if (event)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("SATA EVENT 0x%x\n", event));
+
+       /* Check if this is NCQ error */
+       if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+               /* find device using device id */
+               pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
+               /* send read log extension */
+               if (pm8001_dev)
+                       pm80xx_send_read_log(pm8001_ha, pm8001_dev);
+               return;
+       }
+
+       if (unlikely(!t || !t->lldd_task || !t->dev)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task or dev null\n"));
+               return;
+       }
+
+       ts = &t->task_status;
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
+                               port_id, tag, event));
+       switch (event) {
+       case IO_OVERFLOW:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               ts->residual = 0;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_INTERRUPTED;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_PEER_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_UNEXPECTED_PHASE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_OFFSET_MISMATCH:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_CMD_FRAME_ISSUED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+               break;
+       case IO_XFER_PIO_SETUP_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+               /* TBC: used default set values */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_DMA_ACTIVATE_TIMEOUT:
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n"));
+               /* TBC: used default set values */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", event));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task 0x%p done with io_status 0x%x"
+                       " resp 0x%x stat 0x%x but aborted by upper layer!\n",
+                       t, event, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else if (t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* ditto */
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       } else if (!t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/*ditto*/
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       u32 param, i;
+       struct sas_task *t;
+       struct pm8001_ccb_info *ccb;
+       unsigned long flags;
+       u32 status;
+       u32 tag;
+       struct smp_completion_resp *psmpPayload;
+       struct task_status_struct *ts;
+       struct pm8001_device *pm8001_dev;
+       char *pdma_respaddr = NULL;
+
+       psmpPayload = (struct smp_completion_resp *)(piomb + 4);
+       status = le32_to_cpu(psmpPayload->status);
+       tag = le32_to_cpu(psmpPayload->tag);
+
+       ccb = &pm8001_ha->ccb_info[tag];
+       param = le32_to_cpu(psmpPayload->param);
+       t = ccb->task;
+       ts = &t->task_status;
+       pm8001_dev = ccb->device;
+       if (status)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("smp IO status 0x%x\n", status));
+       if (unlikely(!t || !t->lldd_task || !t->dev))
+               return;
+
+       switch (status) {
+
+       case IO_SUCCESS:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_GOOD;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+                       PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("DIRECT RESPONSE Length:%d\n",
+                                               param));
+                       pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
+                                               ((u64)sg_dma_address
+                                               (&t->smp_task.smp_resp))));
+                       for (i = 0; i < param; i++) {
+                               *(pdma_respaddr+i) = psmpPayload->_r_a[i];
+                               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                                       "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
+                                       i, *(pdma_respaddr+i),
+                                       psmpPayload->_r_a[i]));
+                       }
+               }
+               break;
+       case IO_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ABORTED IOMB\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_OVERFLOW:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               ts->residual = 0;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_NO_DEVICE:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_PHY_DOWN;
+               break;
+       case IO_ERROR_HW_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_BUSY;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_BUSY;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_BUSY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(\
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_RX_FRAME:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_ERROR_INTERNAL_SMP_RESOURCE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_QUEUE_FULL;
+               break;
+       case IO_PORT_IN_RESET:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_PORT_IN_RESET\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_DS_NON_OPERATIONAL:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_DS_IN_RECOVERY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_RECOVERY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", status));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               /* not allowed case. Therefore, return failed status */
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "task 0x%p done with io_status 0x%x resp 0x%x"
+                       "stat 0x%x but aborted by upper layer!\n",
+                       t, status, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* in order to force CPU ordering */
+               t->task_done(t);
+       }
+}
+
+/**
+ * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
+ * @pm8001_ha: our hba card information
+ * @Qnum: the outbound queue message number.
+ * @SEA: source of event to ack
+ * @port_id: port id.
+ * @phyId: phy id.
+ * @param0: parameter 0.
+ * @param1: parameter 1.
+ */
+static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
+       u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
+{
+       struct hw_event_ack_req  payload;
+       u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
+
+       struct inbound_queue_table *circularQ;
+
+       memset((u8 *)&payload, 0, sizeof(payload));
+       circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
+       payload.tag = cpu_to_le32(1);
+       payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
+               ((phyId & 0xFF) << 24) | (port_id & 0xFF));
+       payload.param0 = cpu_to_le32(param0);
+       payload.param1 = cpu_to_le32(param1);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+}
+
+static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+       u32 phyId, u32 phy_op);
+
+/**
+ * hw_event_sas_phy_up -FW tells me a SAS phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+
+       u8 link_rate =
+               (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+       u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+       struct pm8001_port *port = &pm8001_ha->port[port_id];
+       struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       unsigned long flags;
+       u8 deviceType = pPayload->sas_identify.dev_type;
+       port->port_state = portstate;
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+               "portid:%d; phyid:%d; linkrate:%d; "
+               "portstate:%x; devicetype:%x\n",
+               port_id, phy_id, link_rate, portstate, deviceType));
+
+       switch (deviceType) {
+       case SAS_PHY_UNUSED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("device type no device.\n"));
+               break;
+       case SAS_END_DEVICE:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+               pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
+                       PHY_NOTIFY_ENABLE_SPINUP);
+               port->port_attached = 1;
+               pm8001_get_lrate_mode(phy, link_rate);
+               break;
+       case SAS_EDGE_EXPANDER_DEVICE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("expander device.\n"));
+               port->port_attached = 1;
+               pm8001_get_lrate_mode(phy, link_rate);
+               break;
+       case SAS_FANOUT_EXPANDER_DEVICE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("fanout expander device.\n"));
+               port->port_attached = 1;
+               pm8001_get_lrate_mode(phy, link_rate);
+               break;
+       default:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("unknown device type(%x)\n", deviceType));
+               break;
+       }
+       phy->phy_type |= PORT_TYPE_SAS;
+       phy->identify.device_type = deviceType;
+       phy->phy_attached = 1;
+       if (phy->identify.device_type == SAS_END_DEVICE)
+               phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
+       else if (phy->identify.device_type != SAS_PHY_UNUSED)
+               phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
+       phy->sas_phy.oob_mode = SAS_OOB_MODE;
+       sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+       spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+       memcpy(phy->frame_rcvd, &pPayload->sas_identify,
+               sizeof(struct sas_identify_frame)-4);
+       phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
+       pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+       spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+       if (pm8001_ha->flags == PM8001F_RUN_TIME)
+               mdelay(200);/*delay a moment to wait disk to spinup*/
+       pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_sata_phy_up -FW tells me a SATA phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u8 link_rate =
+               (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+
+       u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+       struct pm8001_port *port = &pm8001_ha->port[port_id];
+       struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       unsigned long flags;
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+               "port id %d, phy id %d link_rate %d portstate 0x%x\n",
+                               port_id, phy_id, link_rate, portstate));
+
+       port->port_state = portstate;
+       port->port_attached = 1;
+       pm8001_get_lrate_mode(phy, link_rate);
+       phy->phy_type |= PORT_TYPE_SATA;
+       phy->phy_attached = 1;
+       phy->sas_phy.oob_mode = SATA_OOB_MODE;
+       sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+       spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+       memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
+               sizeof(struct dev_to_host_fis));
+       phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+       phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+       phy->identify.device_type = SAS_SATA_DEV;
+       pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+       spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+       pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_phy_down -we should notify the libsas the phy is down.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+       u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+       struct pm8001_port *port = &pm8001_ha->port[port_id];
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       port->port_state = portstate;
+       phy->phy_type = 0;
+       phy->identify.device_type = 0;
+       phy->phy_attached = 0;
+       memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
+       switch (portstate) {
+       case PORT_VALID:
+               break;
+       case PORT_INVALID:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" PortInvalid portID %d\n", port_id));
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" Last phy Down and port invalid\n"));
+               port->port_attached = 0;
+               pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+                       port_id, phy_id, 0, 0);
+               break;
+       case PORT_IN_RESET:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" Port In Reset portID %d\n", port_id));
+               break;
+       case PORT_NOT_ESTABLISHED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+               port->port_attached = 0;
+               break;
+       case PORT_LOSTCOMM:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" Last phy Down and port invalid\n"));
+               port->port_attached = 0;
+               pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+                       port_id, phy_id, 0, 0);
+               break;
+       default:
+               port->port_attached = 0;
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" phy Down and(default) = 0x%x\n",
+                       portstate));
+               break;
+
+       }
+}
+
+static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct phy_start_resp *pPayload =
+               (struct phy_start_resp *)(piomb + 4);
+       u32 status =
+               le32_to_cpu(pPayload->status);
+       u32 phy_id =
+               le32_to_cpu(pPayload->phyid);
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
+                               status, phy_id));
+       if (status == 0) {
+               phy->phy_state = 1;
+               if (pm8001_ha->flags == PM8001F_RUN_TIME)
+                       complete(phy->enable_completion);
+       }
+       return 0;
+
+}
+
+/**
+ * mpi_thermal_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct thermal_hw_event *pPayload =
+               (struct thermal_hw_event *)(piomb + 4);
+
+       u32 thermal_event = le32_to_cpu(pPayload->thermal_event);
+       u32 rht_lht = le32_to_cpu(pPayload->rht_lht);
+
+       if (thermal_event & 0x40) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Local high temperature violated!\n"));
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Measured local high temperature %d\n",
+                               ((rht_lht & 0xFF00) >> 8)));
+       }
+       if (thermal_event & 0x10) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Remote high temperature violated!\n"));
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Measured remote high temperature %d\n",
+                               ((rht_lht & 0xFF000000) >> 24)));
+       }
+       return 0;
+}
+
+/**
+ * mpi_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       unsigned long flags;
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+       u16 eventType =
+               (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
+       u8 status =
+               (u8)((lr_status_evt_portid & 0x0F000000) >> 24);
+
+       struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+       PM8001_MSG_DBG(pm8001_ha,
+               pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
+                               port_id, phy_id, eventType, status));
+
+       switch (eventType) {
+
+       case HW_EVENT_SAS_PHY_UP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
+               hw_event_sas_phy_up(pm8001_ha, piomb);
+               break;
+       case HW_EVENT_SATA_PHY_UP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
+               hw_event_sata_phy_up(pm8001_ha, piomb);
+               break;
+       case HW_EVENT_SATA_SPINUP_HOLD:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
+               sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+               break;
+       case HW_EVENT_PHY_DOWN:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PHY_DOWN\n"));
+               sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+               phy->phy_attached = 0;
+               phy->phy_state = 0;
+               hw_event_phy_down(pm8001_ha, piomb);
+               break;
+       case HW_EVENT_PORT_INVALID:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_INVALID\n"));
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       /* the broadcast change primitive received, tell the LIBSAS this event
+       to revalidate the sas domain*/
+       case HW_EVENT_BROADCAST_CHANGE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
+                       port_id, phy_id, 1, 0);
+               spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+               sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
+               spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+               break;
+       case HW_EVENT_PHY_ERROR:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PHY_ERROR\n"));
+               sas_phy_disconnected(&phy->sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+               break;
+       case HW_EVENT_BROADCAST_EXP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
+               spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+               sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
+               spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+               break;
+       case HW_EVENT_LINK_ERR_INVALID_DWORD:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_DISPARITY_ERROR,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_CODE_VIOLATION:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_CODE_VIOLATION,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_MALFUNCTION:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_MALFUNCTION\n"));
+               break;
+       case HW_EVENT_BROADCAST_SES:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
+               spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+               sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
+               spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+               break;
+       case HW_EVENT_INBOUND_CRC_ERROR:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_INBOUND_CRC_ERROR,
+                       port_id, phy_id, 0, 0);
+               break;
+       case HW_EVENT_HARD_RESET_RECEIVED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
+               sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+               break;
+       case HW_EVENT_ID_FRAME_TIMEOUT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_PORT_RESET_TIMER_TMO:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_PORT_RECOVERY_TIMER_TMO,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_PORT_RECOVER:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+               break;
+       case HW_EVENT_PORT_RESET_COMPLETE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+               break;
+       case EVENT_BROADCAST_ASYNCH_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
+               break;
+       default:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("Unknown event type 0x%x\n", eventType));
+               break;
+       }
+       return 0;
+}
+
+/**
+ * mpi_phy_stop_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct phy_stop_resp *pPayload =
+               (struct phy_stop_resp *)(piomb + 4);
+       u32 status =
+               le32_to_cpu(pPayload->status);
+       u32 phyid =
+               le32_to_cpu(pPayload->phyid);
+       struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("phy:0x%x status:0x%x\n",
+                                       phyid, status));
+       if (status == 0)
+               phy->phy_state = 0;
+       return 0;
+}
+
+/**
+ * mpi_set_controller_config_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       struct set_ctrl_cfg_resp *pPayload =
+                       (struct set_ctrl_cfg_resp *)(piomb + 4);
+       u32 status = le32_to_cpu(pPayload->status);
+       u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
+
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
+                       status, err_qlfr_pgcd));
+
+       return 0;
+}
+
+/**
+ * mpi_get_controller_config_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_get_phy_profile_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_flash_op_ext_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_set_phy_profile_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_kek_management_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4);
+
+       u32 status = le32_to_cpu(pPayload->status);
+       u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop);
+       u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr);
+
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+               "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n",
+               status, kidx_new_curr_ksop, err_qlfr));
+
+       return 0;
+}
+
+/**
+ * mpi_dek_management_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * ssp_coalesced_comp_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * process_one_iomb - process one outbound Queue memory block
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       __le32 pHeader = *(__le32 *)piomb;
+       u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF);
+
+       switch (opc) {
+       case OPC_OUB_ECHO:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
+               break;
+       case OPC_OUB_HW_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_HW_EVENT\n"));
+               mpi_hw_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_THERM_HW_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_THERMAL_EVENT\n"));
+               mpi_thermal_hw_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_COMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_COMP\n"));
+               mpi_ssp_completion(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SMP_COMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SMP_COMP\n"));
+               mpi_smp_completion(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_LOCAL_PHY_CNTRL:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
+               pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEV_REGIST:
+               PM8001_MSG_DBG(pm8001_ha,
+               pm8001_printk("OPC_OUB_DEV_REGIST\n"));
+               pm8001_mpi_reg_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEREG_DEV:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("unresgister the deviece\n"));
+               pm8001_mpi_dereg_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_DEV_HANDLE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
+               break;
+       case OPC_OUB_SATA_COMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SATA_COMP\n"));
+               mpi_sata_completion(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SATA_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SATA_EVENT\n"));
+               mpi_sata_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_EVENT\n"));
+               mpi_ssp_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEV_HANDLE_ARRIV:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
+               /*This is for target*/
+               break;
+       case OPC_OUB_SSP_RECV_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
+               /*This is for target*/
+               break;
+       case OPC_OUB_FW_FLASH_UPDATE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
+               pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GPIO_RESPONSE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
+               break;
+       case OPC_OUB_GPIO_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
+               break;
+       case OPC_OUB_GENERAL_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
+               pm8001_mpi_general_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_ABORT_RSP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SATA_ABORT_RSP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SAS_DIAG_MODE_START_END:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
+               break;
+       case OPC_OUB_SAS_DIAG_EXECUTE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
+               break;
+       case OPC_OUB_GET_TIME_STAMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
+               break;
+       case OPC_OUB_SAS_HW_EVENT_ACK:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
+               break;
+       case OPC_OUB_PORT_CONTROL:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
+               break;
+       case OPC_OUB_SMP_ABORT_RSP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_NVMD_DATA:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
+               pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SET_NVMD_DATA:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
+               pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEVICE_HANDLE_REMOVAL:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
+               break;
+       case OPC_OUB_SET_DEVICE_STATE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
+               pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_DEVICE_STATE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
+               break;
+       case OPC_OUB_SET_DEV_INFO:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
+               break;
+       /* spcv specifc commands */
+       case OPC_OUB_PHY_START_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_PHY_START_RESP opcode:%x\n", opc));
+               mpi_phy_start_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_PHY_STOP_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc));
+               mpi_phy_stop_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SET_CONTROLLER_CONFIG:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc));
+               mpi_set_controller_config_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_CONTROLLER_CONFIG:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc));
+               mpi_get_controller_config_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_PHY_PROFILE:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc));
+               mpi_get_phy_profile_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_FLASH_OP_EXT:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc));
+               mpi_flash_op_ext_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SET_PHY_PROFILE:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc));
+               mpi_set_phy_profile_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_KEK_MANAGEMENT_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc));
+               mpi_kek_management_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEK_MANAGEMENT_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc));
+               mpi_dek_management_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_COALESCED_COMP_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc));
+               ssp_coalesced_comp_resp(pm8001_ha, piomb);
+               break;
+       default:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
+               break;
+       }
+}
+
+static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+       struct outbound_queue_table *circularQ;
+       void *pMsg1 = NULL;
+       u8 uninitialized_var(bc);
+       u32 ret = MPI_IO_STATUS_FAIL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&pm8001_ha->lock, flags);
+       circularQ = &pm8001_ha->outbnd_q_tbl[vec];
+       do {
+               ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+               if (MPI_IO_STATUS_SUCCESS == ret) {
+                       /* process the outbound message */
+                       process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
+                       /* free the message from the outbound circular buffer */
+                       pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
+                                                       circularQ, bc);
+               }
+               if (MPI_IO_STATUS_BUSY == ret) {
+                       /* Update the producer index from SPC */
+                       circularQ->producer_index =
+                               cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
+                       if (le32_to_cpu(circularQ->producer_index) ==
+                               circularQ->consumer_idx)
+                               /* OQ is empty */
+                               break;
+               }
+       } while (1);
+       spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+       return ret;
+}
+
+/* PCI_DMA_... to our direction translation. */
+static const u8 data_dir_flags[] = {
+       [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
+       [PCI_DMA_TODEVICE]      = DATA_DIR_OUT,/* OUTBOUND */
+       [PCI_DMA_FROMDEVICE]    = DATA_DIR_IN,/* INBOUND */
+       [PCI_DMA_NONE]          = DATA_DIR_NONE,/* NO TRANSFER */
+};
+
+static void build_smp_cmd(u32 deviceID, __le32 hTag,
+                       struct smp_req *psmp_cmd, int mode, int length)
+{
+       psmp_cmd->tag = hTag;
+       psmp_cmd->device_id = cpu_to_le32(deviceID);
+       if (mode == SMP_DIRECT) {
+               length = length - 4; /* subtract crc */
+               psmp_cmd->len_ip_ir = cpu_to_le32(length << 16);
+       } else {
+               psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
+       }
+}
+
+/**
+ * pm8001_chip_smp_req - send a SMP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_ccb_info *ccb)
+{
+       int elem, rc;
+       struct sas_task *task = ccb->task;
+       struct domain_device *dev = task->dev;
+       struct pm8001_device *pm8001_dev = dev->lldd_dev;
+       struct scatterlist *sg_req, *sg_resp;
+       u32 req_len, resp_len;
+       struct smp_req smp_cmd;
+       u32 opc;
+       struct inbound_queue_table *circularQ;
+       char *preq_dma_addr = NULL;
+       __le64 tmp_addr;
+       u32 i, length;
+
+       memset(&smp_cmd, 0, sizeof(smp_cmd));
+       /*
+        * DMA-map SMP request, response buffers
+        */
+       sg_req = &task->smp_task.smp_req;
+       elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+       if (!elem)
+               return -ENOMEM;
+       req_len = sg_dma_len(sg_req);
+
+       sg_resp = &task->smp_task.smp_resp;
+       elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+       if (!elem) {
+               rc = -ENOMEM;
+               goto err_out;
+       }
+       resp_len = sg_dma_len(sg_resp);
+       /* must be in dwords */
+       if ((req_len & 0x3) || (resp_len & 0x3)) {
+               rc = -EINVAL;
+               goto err_out_2;
+       }
+
+       opc = OPC_INB_SMP_REQUEST;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
+
+       length = sg_req->length;
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("SMP Frame Length %d\n", sg_req->length));
+       if (!(length - 8))
+               pm8001_ha->smp_exp_mode = SMP_DIRECT;
+       else
+               pm8001_ha->smp_exp_mode = SMP_INDIRECT;
+
+       /* DIRECT MODE support only in spcv/ve */
+       pm8001_ha->smp_exp_mode = SMP_DIRECT;
+
+       tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
+       preq_dma_addr = (char *)phys_to_virt(tmp_addr);
+
+       /* INDIRECT MODE command settings. Use DMA */
+       if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("SMP REQUEST INDIRECT MODE\n"));
+               /* for SPCv indirect mode. Place the top 4 bytes of
+                * SMP Request header here. */
+               for (i = 0; i < 4; i++)
+                       smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
+               /* exclude top 4 bytes for SMP req header */
+               smp_cmd.long_smp_req.long_req_addr =
+                       cpu_to_le64((u64)sg_dma_address
+                               (&task->smp_task.smp_req) - 4);
+               /* exclude 4 bytes for SMP req header and CRC */
+               smp_cmd.long_smp_req.long_req_size =
+                       cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
+               smp_cmd.long_smp_req.long_resp_addr =
+                               cpu_to_le64((u64)sg_dma_address
+                                       (&task->smp_task.smp_resp));
+               smp_cmd.long_smp_req.long_resp_size =
+                               cpu_to_le32((u32)sg_dma_len
+                                       (&task->smp_task.smp_resp)-4);
+       } else { /* DIRECT MODE */
+               smp_cmd.long_smp_req.long_req_addr =
+                       cpu_to_le64((u64)sg_dma_address
+                                       (&task->smp_task.smp_req));
+               smp_cmd.long_smp_req.long_req_size =
+                       cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
+               smp_cmd.long_smp_req.long_resp_addr =
+                       cpu_to_le64((u64)sg_dma_address
+                               (&task->smp_task.smp_resp));
+               smp_cmd.long_smp_req.long_resp_size =
+                       cpu_to_le32
+                       ((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
+       }
+       if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("SMP REQUEST DIRECT MODE\n"));
+               for (i = 0; i < length; i++)
+                       if (i < 16) {
+                               smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
+                               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                                       "Byte[%d]:%x (DMA data:%x)\n",
+                                       i, smp_cmd.smp_req16[i],
+                                       *(preq_dma_addr)));
+                       } else {
+                               smp_cmd.smp_req[i] = *(preq_dma_addr+i);
+                               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                                       "Byte[%d]:%x (DMA data:%x)\n",
+                                       i, smp_cmd.smp_req[i],
+                                       *(preq_dma_addr)));
+                       }
+       }
+
+       build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
+                               &smp_cmd, pm8001_ha->smp_exp_mode, length);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
+       return 0;
+
+err_out_2:
+       dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
+                       PCI_DMA_FROMDEVICE);
+err_out:
+       dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
+                       PCI_DMA_TODEVICE);
+       return rc;
+}
+
+static int check_enc_sas_cmd(struct sas_task *task)
+{
+       if ((task->ssp_task.cdb[0] == READ_10)
+               || (task->ssp_task.cdb[0] == WRITE_10)
+               || (task->ssp_task.cdb[0] == WRITE_VERIFY))
+               return 1;
+       else
+               return 0;
+}
+
+static int check_enc_sat_cmd(struct sas_task *task)
+{
+       int ret = 0;
+       switch (task->ata_task.fis.command) {
+       case ATA_CMD_FPDMA_READ:
+       case ATA_CMD_READ_EXT:
+       case ATA_CMD_READ:
+       case ATA_CMD_FPDMA_WRITE:
+       case ATA_CMD_WRITE_EXT:
+       case ATA_CMD_WRITE:
+       case ATA_CMD_PIO_READ:
+       case ATA_CMD_PIO_READ_EXT:
+       case ATA_CMD_PIO_WRITE:
+       case ATA_CMD_PIO_WRITE_EXT:
+               ret = 1;
+               break;
+       default:
+               ret = 0;
+               break;
+       }
+       return ret;
+}
+
+/**
+ * pm80xx_chip_ssp_io_req - send a SSP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_ccb_info *ccb)
+{
+       struct sas_task *task = ccb->task;
+       struct domain_device *dev = task->dev;
+       struct pm8001_device *pm8001_dev = dev->lldd_dev;
+       struct ssp_ini_io_start_req ssp_cmd;
+       u32 tag = ccb->ccb_tag;
+       int ret;
+       u64 phys_addr;
+       struct inbound_queue_table *circularQ;
+       static u32 inb;
+       static u32 outb;
+       u32 opc = OPC_INB_SSPINIIOSTART;
+       memset(&ssp_cmd, 0, sizeof(ssp_cmd));
+       memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+       /* data address domain added for spcv; set to 0 by host,
+        * used internally by controller
+        * 0 for SAS 1.1 and SAS 2.0 compatible TLR
+        */
+       ssp_cmd.dad_dir_m_tlr =
+               cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);
+       ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+       ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
+       ssp_cmd.tag = cpu_to_le32(tag);
+       if (task->ssp_task.enable_first_burst)
+               ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
+       ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
+       ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
+       memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16);
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       /* Check if encryption is set */
+       if (pm8001_ha->chip->encrypt &&
+               !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption enabled.Sending Encrypt SAS command 0x%x\n",
+                       task->ssp_task.cdb[0]));
+               opc = OPC_INB_SSP_INI_DIF_ENC_IO;
+               /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/
+               ssp_cmd.dad_dir_m_tlr = cpu_to_le32
+                       ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0);
+
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter,
+                                               ccb->n_elem, ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       ssp_cmd.enc_addr_low =
+                               cpu_to_le32(lower_32_bits(phys_addr));
+                       ssp_cmd.enc_addr_high =
+                               cpu_to_le32(upper_32_bits(phys_addr));
+                       ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       ssp_cmd.enc_addr_low =
+                               cpu_to_le32(lower_32_bits(dma_addr));
+                       ssp_cmd.enc_addr_high =
+                               cpu_to_le32(upper_32_bits(dma_addr));
+                       ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.enc_esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       ssp_cmd.enc_addr_low = 0;
+                       ssp_cmd.enc_addr_high = 0;
+                       ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.enc_esgl = 0;
+               }
+               /* XTS mode. All other fields are 0 */
+               ssp_cmd.key_cmode = 0x6 << 4;
+               /* set tweak values. Should be the start lba */
+               ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cdb[2] << 24) |
+                                               (task->ssp_task.cdb[3] << 16) |
+                                               (task->ssp_task.cdb[4] << 8) |
+                                               (task->ssp_task.cdb[5]));
+       } else {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Sending Normal SAS command 0x%x inb q %x\n",
+                       task->ssp_task.cdb[0], inb));
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter, ccb->n_elem,
+                                       ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       ssp_cmd.addr_low =
+                               cpu_to_le32(lower_32_bits(phys_addr));
+                       ssp_cmd.addr_high =
+                               cpu_to_le32(upper_32_bits(phys_addr));
+                       ssp_cmd.esgl = cpu_to_le32(1<<31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+                       ssp_cmd.addr_high =
+                               cpu_to_le32(upper_32_bits(dma_addr));
+                       ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       ssp_cmd.addr_low = 0;
+                       ssp_cmd.addr_high = 0;
+                       ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.esgl = 0;
+               }
+       }
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++);
+
+       /* rotate the outb queue */
+       outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+
+       return ret;
+}
+
+static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_ccb_info *ccb)
+{
+       struct sas_task *task = ccb->task;
+       struct domain_device *dev = task->dev;
+       struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
+       u32 tag = ccb->ccb_tag;
+       int ret;
+       static u32 inb;
+       static u32 outb;
+       struct sata_start_req sata_cmd;
+       u32 hdr_tag, ncg_tag = 0;
+       u64 phys_addr;
+       u32 ATAP = 0x0;
+       u32 dir;
+       struct inbound_queue_table *circularQ;
+       unsigned long flags;
+       u32 opc = OPC_INB_SATA_HOST_OPSTART;
+       memset(&sata_cmd, 0, sizeof(sata_cmd));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       if (task->data_dir == PCI_DMA_NONE) {
+               ATAP = 0x04; /* no data*/
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
+       } else if (likely(!task->ata_task.device_control_reg_update)) {
+               if (task->ata_task.dma_xfer) {
+                       ATAP = 0x06; /* DMA */
+                       PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
+               } else {
+                       ATAP = 0x05; /* PIO*/
+                       PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
+               }
+               if (task->ata_task.use_ncq &&
+                       dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
+                       ATAP = 0x07; /* FPDMA */
+                       PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
+               }
+       }
+       if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
+               task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+               ncg_tag = hdr_tag;
+       }
+       dir = data_dir_flags[task->data_dir] << 8;
+       sata_cmd.tag = cpu_to_le32(tag);
+       sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+
+       sata_cmd.sata_fis = task->ata_task.fis;
+       if (likely(!task->ata_task.device_control_reg_update))
+               sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
+       sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
+
+       /* Check if encryption is set */
+       if (pm8001_ha->chip->encrypt &&
+               !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
+                       sata_cmd.sata_fis.command));
+               opc = OPC_INB_SATA_DIF_ENC_IO;
+
+               /* set encryption bit */
+               sata_cmd.ncqtag_atap_dir_m_dad =
+                       cpu_to_le32(((ncg_tag & 0xff)<<16)|
+                               ((ATAP & 0x3f) << 10) | 0x20 | dir);
+                                                       /* dad (bit 0-1) is 0 */
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter,
+                                               ccb->n_elem, ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
+                       sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
+                       sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
+                       sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
+                       sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.enc_esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       sata_cmd.enc_addr_low = 0;
+                       sata_cmd.enc_addr_high = 0;
+                       sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.enc_esgl = 0;
+               }
+               /* XTS mode. All other fields are 0 */
+               sata_cmd.key_index_mode = 0x6 << 4;
+               /* set tweak values. Should be the start lba */
+               sata_cmd.twk_val0 =
+                       cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
+                                       (sata_cmd.sata_fis.lbah << 16) |
+                                       (sata_cmd.sata_fis.lbam << 8) |
+                                       (sata_cmd.sata_fis.lbal));
+               sata_cmd.twk_val1 =
+                       cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) |
+                                        (sata_cmd.sata_fis.lbam_exp));
+       } else {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Sending Normal SATA command 0x%x inb %x\n",
+                       sata_cmd.sata_fis.command, inb));
+               /* dad (bit 0-1) is 0 */
+               sata_cmd.ncqtag_atap_dir_m_dad =
+                       cpu_to_le32(((ncg_tag & 0xff)<<16) |
+                                       ((ATAP & 0x3f) << 10) | dir);
+
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter,
+                                       ccb->n_elem, ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       sata_cmd.addr_low = lower_32_bits(phys_addr);
+                       sata_cmd.addr_high = upper_32_bits(phys_addr);
+                       sata_cmd.esgl = cpu_to_le32(1 << 31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       sata_cmd.addr_low = lower_32_bits(dma_addr);
+                       sata_cmd.addr_high = upper_32_bits(dma_addr);
+                       sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       sata_cmd.addr_low = 0;
+                       sata_cmd.addr_high = 0;
+                       sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.esgl = 0;
+               }
+                       /* scsi cdb */
+                       sata_cmd.atapi_scsi_cdb[0] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[0]) |
+                               (task->ata_task.atapi_packet[1] << 8) |
+                               (task->ata_task.atapi_packet[2] << 16) |
+                               (task->ata_task.atapi_packet[3] << 24)));
+                       sata_cmd.atapi_scsi_cdb[1] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[4]) |
+                               (task->ata_task.atapi_packet[5] << 8) |
+                               (task->ata_task.atapi_packet[6] << 16) |
+                               (task->ata_task.atapi_packet[7] << 24)));
+                       sata_cmd.atapi_scsi_cdb[2] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[8]) |
+                               (task->ata_task.atapi_packet[9] << 8) |
+                               (task->ata_task.atapi_packet[10] << 16) |
+                               (task->ata_task.atapi_packet[11] << 24)));
+                       sata_cmd.atapi_scsi_cdb[3] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[12]) |
+                               (task->ata_task.atapi_packet[13] << 8) |
+                               (task->ata_task.atapi_packet[14] << 16) |
+                               (task->ata_task.atapi_packet[15] << 24)));
+       }
+
+       /* Check for read log for failed drive and return */
+       if (sata_cmd.sata_fis.command == 0x2f) {
+               if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
+                       struct task_status_struct *ts;
+
+                       pm8001_ha_dev->id &= 0xDFFFFFFF;
+                       ts = &task->task_status;
+
+                       spin_lock_irqsave(&task->task_state_lock, flags);
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+                       task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+                       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+                       task->task_state_flags |= SAS_TASK_STATE_DONE;
+                       if (unlikely((task->task_state_flags &
+                                       SAS_TASK_STATE_ABORTED))) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("task 0x%p resp 0x%x "
+                                       " stat 0x%x but aborted by upper layer "
+                                       "\n", task, ts->resp, ts->stat));
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               return 0;
+                       } else if (task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/* ditto */
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       } else if (!task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/*ditto*/
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       }
+               }
+       }
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+                                               &sata_cmd, outb++);
+
+       /* rotate the outb queue */
+       outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+       return ret;
+}
+
+/**
+ * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int
+pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+{
+       struct phy_start_req payload;
+       struct inbound_queue_table *circularQ;
+       int ret;
+       u32 tag = 0x01;
+       u32 opcode = OPC_INB_PHYSTART;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       memset(&payload, 0, sizeof(payload));
+       payload.tag = cpu_to_le32(tag);
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
+       /*
+        ** [0:7]       PHY Identifier
+        ** [8:11]      link rate 1.5G, 3G, 6G
+        ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
+        ** [14]        0b disable spin up hold; 1b enable spin up hold
+        ** [15] ob no change in current PHY analig setup 1b enable using SPAST
+        */
+       payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+                       LINKMODE_AUTO | LINKRATE_15 |
+                       LINKRATE_30 | LINKRATE_60 | phy_id);
+       /* SSC Disable and SAS Analog ST configuration */
+       /**
+       payload.ase_sh_lm_slr_phyid =
+               cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE |
+               LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 |
+               phy_id);
+       Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need
+       **/
+
+       payload.sas_identify.dev_type = SAS_END_DEVICE;
+       payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
+       memcpy(payload.sas_identify.sas_addr,
+               pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+       payload.sas_identify.phy_id = phy_id;
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+       return ret;
+}
+
+/**
+ * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+       u8 phy_id)
+{
+       struct phy_stop_req payload;
+       struct inbound_queue_table *circularQ;
+       int ret;
+       u32 tag = 0x01;
+       u32 opcode = OPC_INB_PHYSTOP;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       memset(&payload, 0, sizeof(payload));
+       payload.tag = cpu_to_le32(tag);
+       payload.phy_id = cpu_to_le32(phy_id);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+       return ret;
+}
+
+/**
+ * see comments on pm8001_mpi_reg_resp.
+ */
+static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_device *pm8001_dev, u32 flag)
+{
+       struct reg_dev_req payload;
+       u32     opc;
+       u32 stp_sspsmp_sata = 0x4;
+       struct inbound_queue_table *circularQ;
+       u32 linkrate, phy_id;
+       int rc, tag = 0xdeadbeef;
+       struct pm8001_ccb_info *ccb;
+       u8 retryFlag = 0x1;
+       u16 firstBurstSize = 0;
+       u16 ITNT = 2000;
+       struct domain_device *dev = pm8001_dev->sas_device;
+       struct domain_device *parent_dev = dev->parent;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       memset(&payload, 0, sizeof(payload));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               return rc;
+       ccb = &pm8001_ha->ccb_info[tag];
+       ccb->device = pm8001_dev;
+       ccb->ccb_tag = tag;
+       payload.tag = cpu_to_le32(tag);
+
+       if (flag == 1) {
+               stp_sspsmp_sata = 0x02; /*direct attached sata */
+       } else {
+               if (pm8001_dev->dev_type == SAS_SATA_DEV)
+                       stp_sspsmp_sata = 0x00; /* stp*/
+               else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
+                       pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                       pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+                       stp_sspsmp_sata = 0x01; /*ssp or smp*/
+       }
+       if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+               phy_id = parent_dev->ex_dev.ex_phy->phy_id;
+       else
+               phy_id = pm8001_dev->attached_phy;
+
+       opc = OPC_INB_REG_DEV;
+
+       linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
+                       pm8001_dev->sas_device->linkrate : dev->port->linkrate;
+
+       payload.phyid_portid =
+               cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) |
+               ((phy_id & 0xFF) << 8));
+
+       payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) |
+               ((linkrate & 0x0F) << 24) |
+               ((stp_sspsmp_sata & 0x03) << 28));
+       payload.firstburstsize_ITNexustimeout =
+               cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
+
+       memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
+               SAS_ADDR_SIZE);
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+       return rc;
+}
+
+/**
+ * pm80xx_chip_phy_ctl_req - support the local phy operation
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to operate
+ * @phy_op:
+ */
+static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+       u32 phyId, u32 phy_op)
+{
+       struct local_phy_ctl_req payload;
+       struct inbound_queue_table *circularQ;
+       int ret;
+       u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+       memset(&payload, 0, sizeof(payload));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(1);
+       payload.phyop_phyid =
+               cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+       return ret;
+}
+
+static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 value;
+#ifdef PM8001_USE_MSIX
+       return 1;
+#endif
+       value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
+       if (value)
+               return 1;
+       return 0;
+
+}
+
+/**
+ * pm8001_chip_isr - PM8001 isr handler.
+ * @pm8001_ha: our hba card information.
+ * @irq: irq number.
+ * @stat: stat.
+ */
+static irqreturn_t
+pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+       pm80xx_chip_interrupt_disable(pm8001_ha, vec);
+       process_oq(pm8001_ha, vec);
+       pm80xx_chip_interrupt_enable(pm8001_ha, vec);
+       return IRQ_HANDLED;
+}
+
+const struct pm8001_dispatch pm8001_80xx_dispatch = {
+       .name                   = "pmc80xx",
+       .chip_init              = pm80xx_chip_init,
+       .chip_soft_rst          = pm80xx_chip_soft_rst,
+       .chip_rst               = pm80xx_hw_chip_rst,
+       .chip_iounmap           = pm8001_chip_iounmap,
+       .isr                    = pm80xx_chip_isr,
+       .is_our_interupt        = pm80xx_chip_is_our_interupt,
+       .isr_process_oq         = process_oq,
+       .interrupt_enable       = pm80xx_chip_interrupt_enable,
+       .interrupt_disable      = pm80xx_chip_interrupt_disable,
+       .make_prd               = pm8001_chip_make_sg,
+       .smp_req                = pm80xx_chip_smp_req,
+       .ssp_io_req             = pm80xx_chip_ssp_io_req,
+       .sata_req               = pm80xx_chip_sata_req,
+       .phy_start_req          = pm80xx_chip_phy_start_req,
+       .phy_stop_req           = pm80xx_chip_phy_stop_req,
+       .reg_dev_req            = pm80xx_chip_reg_dev_req,
+       .dereg_dev_req          = pm8001_chip_dereg_dev_req,
+       .phy_ctl_req            = pm80xx_chip_phy_ctl_req,
+       .task_abort             = pm8001_chip_abort_task,
+       .ssp_tm_req             = pm8001_chip_ssp_tm_req,
+       .get_nvmd_req           = pm8001_chip_get_nvmd_req,
+       .set_nvmd_req           = pm8001_chip_set_nvmd_req,
+       .fw_flash_update_req    = pm8001_chip_fw_flash_update_req,
+       .set_dev_state_req      = pm8001_chip_set_dev_state_req,
+};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
new file mode 100644 (file)
index 0000000..2b760ba
--- /dev/null
@@ -0,0 +1,1523 @@
+/*
+ * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions, and the following disclaimer,
+ *     without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *     substantially similar to the "NO WARRANTY" disclaimer below
+ *     ("Disclaimer") and any redistribution must be conditioned upon
+ *     including a substantially similar Disclaimer requirement for further
+ *     binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *     of any contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef _PMC8001_REG_H_
+#define _PMC8001_REG_H_
+
+#include <linux/types.h>
+#include <scsi/libsas.h>
+
+/* for Request Opcode of IOMB */
+#define OPC_INB_ECHO                           1       /* 0x000 */
+#define OPC_INB_PHYSTART                       4       /* 0x004 */
+#define OPC_INB_PHYSTOP                                5       /* 0x005 */
+#define OPC_INB_SSPINIIOSTART                  6       /* 0x006 */
+#define OPC_INB_SSPINITMSTART                  7       /* 0x007 */
+/* 0x8 RESV IN SPCv */
+#define OPC_INB_RSVD                           8       /* 0x008 */
+#define OPC_INB_DEV_HANDLE_ACCEPT              9       /* 0x009 */
+#define OPC_INB_SSPTGTIOSTART                  10      /* 0x00A */
+#define OPC_INB_SSPTGTRSPSTART                 11      /* 0x00B */
+/* 0xC, 0xD, 0xE removed in SPCv */
+#define OPC_INB_SSP_ABORT                      15      /* 0x00F */
+#define OPC_INB_DEREG_DEV_HANDLE               16      /* 0x010 */
+#define OPC_INB_GET_DEV_HANDLE                 17      /* 0x011 */
+#define OPC_INB_SMP_REQUEST                    18      /* 0x012 */
+/* 0x13 SMP_RESPONSE is removed in SPCv */
+#define OPC_INB_SMP_ABORT                      20      /* 0x014 */
+/* 0x16 RESV IN SPCv */
+#define OPC_INB_RSVD1                          22      /* 0x016 */
+#define OPC_INB_SATA_HOST_OPSTART              23      /* 0x017 */
+#define OPC_INB_SATA_ABORT                     24      /* 0x018 */
+#define OPC_INB_LOCAL_PHY_CONTROL              25      /* 0x019 */
+/* 0x1A RESV IN SPCv */
+#define OPC_INB_RSVD2                          26      /* 0x01A */
+#define OPC_INB_FW_FLASH_UPDATE                        32      /* 0x020 */
+#define OPC_INB_GPIO                           34      /* 0x022 */
+#define OPC_INB_SAS_DIAG_MODE_START_END                35      /* 0x023 */
+#define OPC_INB_SAS_DIAG_EXECUTE               36      /* 0x024 */
+/* 0x25 RESV IN SPCv */
+#define OPC_INB_RSVD3                          37      /* 0x025 */
+#define OPC_INB_GET_TIME_STAMP                 38      /* 0x026 */
+#define OPC_INB_PORT_CONTROL                   39      /* 0x027 */
+#define OPC_INB_GET_NVMD_DATA                  40      /* 0x028 */
+#define OPC_INB_SET_NVMD_DATA                  41      /* 0x029 */
+#define OPC_INB_SET_DEVICE_STATE               42      /* 0x02A */
+#define OPC_INB_GET_DEVICE_STATE               43      /* 0x02B */
+#define OPC_INB_SET_DEV_INFO                   44      /* 0x02C */
+/* 0x2D RESV IN SPCv */
+#define OPC_INB_RSVD4                          45      /* 0x02D */
+#define OPC_INB_SGPIO_REGISTER                 46      /* 0x02E */
+#define OPC_INB_PCIE_DIAG_EXEC                 47      /* 0x02F */
+#define OPC_INB_SET_CONTROLLER_CONFIG          48      /* 0x030 */
+#define OPC_INB_GET_CONTROLLER_CONFIG          49      /* 0x031 */
+#define OPC_INB_REG_DEV                                50      /* 0x032 */
+#define OPC_INB_SAS_HW_EVENT_ACK               51      /* 0x033 */
+#define OPC_INB_GET_DEVICE_INFO                        52      /* 0x034 */
+#define OPC_INB_GET_PHY_PROFILE                        53      /* 0x035 */
+#define OPC_INB_FLASH_OP_EXT                   54      /* 0x036 */
+#define OPC_INB_SET_PHY_PROFILE                        55      /* 0x037 */
+#define OPC_INB_KEK_MANAGEMENT                 256     /* 0x100 */
+#define OPC_INB_DEK_MANAGEMENT                 257     /* 0x101 */
+#define OPC_INB_SSP_INI_DIF_ENC_IO             258     /* 0x102 */
+#define OPC_INB_SATA_DIF_ENC_IO                        259     /* 0x103 */
+
+/* for Response Opcode of IOMB */
+#define OPC_OUB_ECHO                                   1       /* 0x001 */
+#define OPC_OUB_RSVD                                   4       /* 0x004 */
+#define OPC_OUB_SSP_COMP                               5       /* 0x005 */
+#define OPC_OUB_SMP_COMP                               6       /* 0x006 */
+#define OPC_OUB_LOCAL_PHY_CNTRL                                7       /* 0x007 */
+#define OPC_OUB_RSVD1                                  10      /* 0x00A */
+#define OPC_OUB_DEREG_DEV                              11      /* 0x00B */
+#define OPC_OUB_GET_DEV_HANDLE                         12      /* 0x00C */
+#define OPC_OUB_SATA_COMP                              13      /* 0x00D */
+#define OPC_OUB_SATA_EVENT                             14      /* 0x00E */
+#define OPC_OUB_SSP_EVENT                              15      /* 0x00F */
+#define OPC_OUB_RSVD2                                  16      /* 0x010 */
+/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/
+#define OPC_OUB_SSP_RECV_EVENT                         18      /* 0x012 */
+#define OPC_OUB_RSVD3                                  19      /* 0x013 */
+#define OPC_OUB_FW_FLASH_UPDATE                                20      /* 0x014 */
+#define OPC_OUB_GPIO_RESPONSE                          22      /* 0x016 */
+#define OPC_OUB_GPIO_EVENT                             23      /* 0x017 */
+#define OPC_OUB_GENERAL_EVENT                          24      /* 0x018 */
+#define OPC_OUB_SSP_ABORT_RSP                          26      /* 0x01A */
+#define OPC_OUB_SATA_ABORT_RSP                         27      /* 0x01B */
+#define OPC_OUB_SAS_DIAG_MODE_START_END                        28      /* 0x01C */
+#define OPC_OUB_SAS_DIAG_EXECUTE                       29      /* 0x01D */
+#define OPC_OUB_GET_TIME_STAMP                         30      /* 0x01E */
+#define OPC_OUB_RSVD4                                  31      /* 0x01F */
+#define OPC_OUB_PORT_CONTROL                           32      /* 0x020 */
+#define OPC_OUB_SKIP_ENTRY                             33      /* 0x021 */
+#define OPC_OUB_SMP_ABORT_RSP                          34      /* 0x022 */
+#define OPC_OUB_GET_NVMD_DATA                          35      /* 0x023 */
+#define OPC_OUB_SET_NVMD_DATA                          36      /* 0x024 */
+#define OPC_OUB_DEVICE_HANDLE_REMOVAL                  37      /* 0x025 */
+#define OPC_OUB_SET_DEVICE_STATE                       38      /* 0x026 */
+#define OPC_OUB_GET_DEVICE_STATE                       39      /* 0x027 */
+#define OPC_OUB_SET_DEV_INFO                           40      /* 0x028 */
+#define OPC_OUB_RSVD5                                  41      /* 0x029 */
+#define OPC_OUB_HW_EVENT                               1792    /* 0x700 */
+#define OPC_OUB_DEV_HANDLE_ARRIV                       1824    /* 0x720 */
+#define OPC_OUB_THERM_HW_EVENT                         1840    /* 0x730 */
+#define OPC_OUB_SGPIO_RESP                             2094    /* 0x82E */
+#define OPC_OUB_PCIE_DIAG_EXECUTE                      2095    /* 0x82F */
+#define OPC_OUB_DEV_REGIST                             2098    /* 0x832 */
+#define OPC_OUB_SAS_HW_EVENT_ACK                       2099    /* 0x833 */
+#define OPC_OUB_GET_DEVICE_INFO                                2100    /* 0x834 */
+/* spcv specific commands */
+#define OPC_OUB_PHY_START_RESP                         2052    /* 0x804 */
+#define OPC_OUB_PHY_STOP_RESP                          2053    /* 0x805 */
+#define OPC_OUB_SET_CONTROLLER_CONFIG                  2096    /* 0x830 */
+#define OPC_OUB_GET_CONTROLLER_CONFIG                  2097    /* 0x831 */
+#define OPC_OUB_GET_PHY_PROFILE                                2101    /* 0x835 */
+#define OPC_OUB_FLASH_OP_EXT                           2102    /* 0x836 */
+#define OPC_OUB_SET_PHY_PROFILE                                2103    /* 0x837 */
+#define OPC_OUB_KEK_MANAGEMENT_RESP                    2304    /* 0x900 */
+#define OPC_OUB_DEK_MANAGEMENT_RESP                    2305    /* 0x901 */
+#define OPC_OUB_SSP_COALESCED_COMP_RESP                        2306    /* 0x902 */
+
+/* for phy start*/
+#define SSC_DISABLE_15                 (0x01 << 16)
+#define SSC_DISABLE_30                 (0x02 << 16)
+#define SSC_DISABLE_60                 (0x04 << 16)
+#define SAS_ASE                                (0x01 << 15)
+#define SPINHOLD_DISABLE               (0x00 << 14)
+#define SPINHOLD_ENABLE                        (0x01 << 14)
+#define LINKMODE_SAS                   (0x01 << 12)
+#define LINKMODE_DSATA                 (0x02 << 12)
+#define LINKMODE_AUTO                  (0x03 << 12)
+#define LINKRATE_15                    (0x01 << 8)
+#define LINKRATE_30                    (0x02 << 8)
+#define LINKRATE_60                    (0x06 << 8)
+
+/* Thermal related */
+#define        THERMAL_ENABLE                  0x1
+#define        THERMAL_LOG_ENABLE              0x1
+#define THERMAL_OP_CODE                        0x6
+#define LTEMPHIL                        70
+#define RTEMPHIL                       100
+
+/* Encryption info */
+#define SCRATCH_PAD3_ENC_DISABLED      0x00000000
+#define SCRATCH_PAD3_ENC_DIS_ERR       0x00000001
+#define SCRATCH_PAD3_ENC_ENA_ERR       0x00000002
+#define SCRATCH_PAD3_ENC_READY         0x00000003
+#define SCRATCH_PAD3_ENC_MASK          SCRATCH_PAD3_ENC_READY
+
+#define SCRATCH_PAD3_XTS_ENABLED               (1 << 14)
+#define SCRATCH_PAD3_SMA_ENABLED               (1 << 4)
+#define SCRATCH_PAD3_SMB_ENABLED               (1 << 5)
+#define SCRATCH_PAD3_SMF_ENABLED               0
+#define SCRATCH_PAD3_SM_MASK                   0x000000F0
+#define SCRATCH_PAD3_ERR_CODE                  0x00FF0000
+
+#define SEC_MODE_SMF                           0x0
+#define SEC_MODE_SMA                           0x100
+#define SEC_MODE_SMB                           0x200
+#define CIPHER_MODE_ECB                                0x00000001
+#define CIPHER_MODE_XTS                                0x00000002
+#define KEK_MGMT_SUBOP_KEYCARDUPDATE           0x4
+
+/* SAS protocol timer configuration page */
+#define SAS_PROTOCOL_TIMER_CONFIG_PAGE  0x04
+#define STP_MCT_TMO                     32
+#define SSP_MCT_TMO                     32
+#define SAS_MAX_OPEN_TIME                              5
+#define SMP_MAX_CONN_TIMER              0xFF
+#define STP_FRM_TIMER                   0
+#define STP_IDLE_TIME                   5 /* 5 us; controller default */
+#define SAS_MFD                         0
+#define SAS_OPNRJT_RTRY_INTVL           2
+#define SAS_DOPNRJT_RTRY_TMO            128
+#define SAS_COPNRJT_RTRY_TMO            128
+
+/*
+  Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
+  Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
+  is DOPNRJT_RTRY_TMO
+*/
+#define SAS_DOPNRJT_RTRY_THR            23438
+#define SAS_COPNRJT_RTRY_THR            23438
+#define SAS_MAX_AIP                     0x200000
+#define IT_NEXUS_TIMEOUT       0x7D0
+#define PORT_RECOVERY_TIMEOUT  ((IT_NEXUS_TIMEOUT/100) + 30)
+
+struct mpi_msg_hdr {
+       __le32  header; /* Bits [11:0] - Message operation code */
+       /* Bits [15:12] - Message Category */
+       /* Bits [21:16] - Outboundqueue ID for the
+       operation completion message */
+       /* Bits [23:22] - Reserved */
+       /* Bits [28:24] - Buffer Count, indicates how
+       many buffer are allocated for the massage */
+       /* Bits [30:29] - Reserved */
+       /* Bits [31] - Message Valid bit */
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to describe enable the phy (128 bytes)
+ */
+struct phy_start_req {
+       __le32  tag;
+       __le32  ase_sh_lm_slr_phyid;
+       struct sas_identify_frame sas_identify; /* 28 Bytes */
+       __le32 spasti;
+       u32     reserved[21];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to disable the phy (128 bytes)
+ */
+struct phy_stop_req {
+       __le32  tag;
+       __le32  phy_id;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/* set device bits fis - device to host */
+struct set_dev_bits_fis {
+       u8      fis_type;       /* 0xA1*/
+       u8      n_i_pmport;
+       /* b7 : n Bit. Notification bit. If set device needs attention. */
+       /* b6 : i Bit. Interrupt Bit */
+       /* b5-b4: reserved2 */
+       /* b3-b0: PM Port */
+       u8      status;
+       u8      error;
+       u32     _r_a;
+} __attribute__ ((packed));
+/* PIO setup FIS - device to host */
+struct pio_setup_fis {
+       u8      fis_type;       /* 0x5f */
+       u8      i_d_pmPort;
+       /* b7 : reserved */
+       /* b6 : i bit. Interrupt bit */
+       /* b5 : d bit. data transfer direction. set to 1 for device to host
+       xfer */
+       /* b4 : reserved */
+       /* b3-b0: PM Port */
+       u8      status;
+       u8      error;
+       u8      lbal;
+       u8      lbam;
+       u8      lbah;
+       u8      device;
+       u8      lbal_exp;
+       u8      lbam_exp;
+       u8      lbah_exp;
+       u8      _r_a;
+       u8      sector_count;
+       u8      sector_count_exp;
+       u8      _r_b;
+       u8      e_status;
+       u8      _r_c[2];
+       u8      transfer_count;
+} __attribute__ ((packed));
+
+/*
+ * brief the data structure of SATA Completion Response
+ * use to describe the sata task response (64 bytes)
+ */
+struct sata_completion_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  param;
+       u32     sata_resp[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SAS HW Event Notification
+ * use to alert the host about the hardware event(64 bytes)
+ */
+/* updated outbound struct for spcv */
+
+struct hw_event_resp {
+       __le32  lr_status_evt_portid;
+       __le32  evt_param;
+       __le32  phyid_npip_portstate;
+       struct sas_identify_frame       sas_identify;
+       struct dev_to_host_fis  sata_fis;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure for thermal event notification
+ */
+
+struct thermal_hw_event {
+       __le32  thermal_event;
+       __le32  rht_lht;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of REGISTER DEVICE Command
+ * use to describe MPI REGISTER DEVICE Command (64 bytes)
+ */
+
+struct reg_dev_req {
+       __le32  tag;
+       __le32  phyid_portid;
+       __le32  dtype_dlr_mcn_ir_retry;
+       __le32  firstburstsize_ITNexustimeout;
+       u8      sas_addr[SAS_ADDR_SIZE];
+       __le32  upper_device_id;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of DEREGISTER DEVICE Command
+ * use to request spc to remove all internal resources associated
+ * with the device id (64 bytes)
+ */
+
+struct dereg_dev_req {
+       __le32  tag;
+       __le32  device_id;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of DEVICE_REGISTRATION Response
+ * use to notify the completion of the device registration (64 bytes)
+ */
+struct dev_reg_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  device_id;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of Local PHY Control Command
+ * use to issue PHY CONTROL to local phy (64 bytes)
+ */
+struct local_phy_ctl_req {
+       __le32  tag;
+       __le32  phyop_phyid;
+       u32     reserved1[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Local Phy Control Response
+ * use to describe MPI Local Phy Control Response (64 bytes)
+ */
+ struct local_phy_ctl_resp {
+       __le32  tag;
+       __le32  phyop_phyid;
+       __le32  status;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+#define OP_BITS 0x0000FF00
+#define ID_BITS 0x000000FF
+
+/*
+ * brief the data structure of PORT Control Command
+ * use to control port properties (64 bytes)
+ */
+
+struct port_ctl_req {
+       __le32  tag;
+       __le32  portop_portid;
+       __le32  param0;
+       __le32  param1;
+       u32     reserved1[27];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of HW Event Ack Command
+ * use to acknowledge receive HW event (64 bytes)
+ */
+struct hw_event_ack_req {
+       __le32  tag;
+       __le32  phyid_sea_portid;
+       __le32  param0;
+       __le32  param1;
+       u32     reserved1[27];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY_START Response Command
+ * indicates the completion of PHY_START command (64 bytes)
+ */
+struct phy_start_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  phyid;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY_STOP Response Command
+ * indicates the completion of PHY_STOP command (64 bytes)
+ */
+struct phy_stop_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  phyid;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP Completion Response
+ * use to indicate a SSP Completion (n bytes)
+ */
+struct ssp_completion_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  param;
+       __le32  ssptag_rescv_rescpad;
+       struct ssp_response_iu ssp_resp_iu;
+       __le32  residual_count;
+} __attribute__((packed, aligned(4)));
+
+#define SSP_RESCV_BIT  0x00010000
+
+/*
+ * brief the data structure of SATA EVNET response
+ * use to indicate a SATA Completion (64 bytes)
+ */
+struct sata_event_resp {
+       __le32 tag;
+       __le32 event;
+       __le32 port_id;
+       __le32 device_id;
+       u32 reserved;
+       __le32 event_param0;
+       __le32 event_param1;
+       __le32 sata_addr_h32;
+       __le32 sata_addr_l32;
+       __le32 e_udt1_udt0_crc;
+       __le32 e_udt5_udt4_udt3_udt2;
+       __le32 a_udt1_udt0_crc;
+       __le32 a_udt5_udt4_udt3_udt2;
+       __le32 hwdevid_diferr;
+       __le32 err_framelen_byteoffset;
+       __le32 err_dataframe;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP EVNET esponse
+ * use to indicate a SSP Completion (64 bytes)
+ */
+struct ssp_event_resp {
+       __le32 tag;
+       __le32 event;
+       __le32 port_id;
+       __le32 device_id;
+       __le32 ssp_tag;
+       __le32 event_param0;
+       __le32 event_param1;
+       __le32 sas_addr_h32;
+       __le32 sas_addr_l32;
+       __le32 e_udt1_udt0_crc;
+       __le32 e_udt5_udt4_udt3_udt2;
+       __le32 a_udt1_udt0_crc;
+       __le32 a_udt5_udt4_udt3_udt2;
+       __le32 hwdevid_diferr;
+       __le32 err_framelen_byteoffset;
+       __le32 err_dataframe;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of General Event Notification Response
+ * use to describe MPI General Event Notification Response (64 bytes)
+ */
+struct general_event_resp {
+       __le32  status;
+       __le32  inb_IOMB_payload[14];
+} __attribute__((packed, aligned(4)));
+
+#define GENERAL_EVENT_PAYLOAD  14
+#define OPCODE_BITS    0x00000fff
+
+/*
+ * brief the data structure of SMP Request Command
+ * use to describe MPI SMP REQUEST Command (64 bytes)
+ */
+struct smp_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  len_ip_ir;
+       /* Bits [0] - Indirect response */
+       /* Bits [1] - Indirect Payload */
+       /* Bits [15:2] - Reserved */
+       /* Bits [23:16] - direct payload Len */
+       /* Bits [31:24] - Reserved */
+       u8      smp_req16[16];
+       union {
+               u8      smp_req[32];
+               struct {
+                       __le64 long_req_addr;/* sg dma address, LE */
+                       __le32 long_req_size;/* LE */
+                       u32     _r_a;
+                       __le64 long_resp_addr;/* sg dma address, LE */
+                       __le32 long_resp_size;/* LE */
+                       u32     _r_b;
+                       } long_smp_req;/* sequencer extension */
+       };
+       __le32  rsvd[16];
+} __attribute__((packed, aligned(4)));
+/*
+ * brief the data structure of SMP Completion Response
+ * use to describe MPI SMP Completion Response (64 bytes)
+ */
+struct smp_completion_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  param;
+       u8      _r_a[252];
+} __attribute__((packed, aligned(4)));
+
+/*
+ *brief the data structure of SSP SMP SATA Abort Command
+ * use to describe MPI SSP SMP & SATA Abort Command (64 bytes)
+ */
+struct task_abort_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  tag_to_abort;
+       __le32  abort_all;
+       u32     reserved[27];
+} __attribute__((packed, aligned(4)));
+
+/* These flags used for SSP SMP & SATA Abort */
+#define ABORT_MASK             0x3
+#define ABORT_SINGLE           0x0
+#define ABORT_ALL              0x1
+
+/**
+ * brief the data structure of SSP SATA SMP Abort Response
+ * use to describe SSP SMP & SATA Abort Response ( 64 bytes)
+ */
+struct task_abort_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  scp;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Command
+ * use to describe MPI SAS Diagnostic Start/End Command (64 bytes)
+ */
+struct sas_diag_start_end_req {
+       __le32  tag;
+       __le32  operation_phyid;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Command
+ * use to describe MPI SAS Diagnostic Execute Command (64 bytes)
+ */
+struct sas_diag_execute_req {
+       __le32  tag;
+       __le32  cmdtype_cmddesc_phyid;
+       __le32  pat1_pat2;
+       __le32  threshold;
+       __le32  codepat_errmsk;
+       __le32  pmon;
+       __le32  pERF1CTL;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+#define SAS_DIAG_PARAM_BYTES 24
+
+/*
+ * brief the data structure of Set Device State Command
+ * use to describe MPI Set Device State Command (64 bytes)
+ */
+struct set_dev_state_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  nds;
+       u32     reserved[28];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SATA Start Command
+ * use to describe MPI SATA IO Start Command (64 bytes)
+ * Note: This structure is common for normal / encryption I/O
+ */
+
+struct sata_start_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  data_len;
+       __le32  ncqtag_atap_dir_m_dad;
+       struct host_to_dev_fis  sata_fis;
+       u32     reserved1;
+       u32     reserved2;      /* dword 11. rsvd for normal I/O. */
+                               /* EPLE Descl for enc I/O */
+       u32     addr_low;       /* dword 12. rsvd for enc I/O */
+       u32     addr_high;      /* dword 13. reserved for enc I/O */
+       __le32  len;            /* dword 14: length for normal I/O. */
+                               /* EPLE Desch for enc I/O */
+       __le32  esgl;           /* dword 15. rsvd for enc I/O */
+       __le32  atapi_scsi_cdb[4];      /* dword 16-19. rsvd for enc I/O */
+       /* The below fields are reserved for normal I/O */
+       __le32  key_index_mode; /* dword 20 */
+       __le32  sector_cnt_enss;/* dword 21 */
+       __le32  keytagl;        /* dword 22 */
+       __le32  keytagh;        /* dword 23 */
+       __le32  twk_val0;       /* dword 24 */
+       __le32  twk_val1;       /* dword 25 */
+       __le32  twk_val2;       /* dword 26 */
+       __le32  twk_val3;       /* dword 27 */
+       __le32  enc_addr_low;   /* dword 28. Encryption SGL address high */
+       __le32  enc_addr_high;  /* dword 29. Encryption SGL address low */
+       __le32  enc_len;        /* dword 30. Encryption length */
+       __le32  enc_esgl;       /* dword 31. Encryption esgl bit */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI TM Start Command
+ * use to describe MPI SSP INI TM Start Command (64 bytes)
+ */
+struct ssp_ini_tm_start_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  relate_tag;
+       __le32  tmf;
+       u8      lun[8];
+       __le32  ds_ads_m;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+struct ssp_info_unit {
+       u8      lun[8];/* SCSI Logical Unit Number */
+       u8      reserved1;/* reserved */
+       u8      efb_prio_attr;
+       /* B7 : enabledFirstBurst */
+       /* B6-3 : taskPriority */
+       /* B2-0 : taskAttribute */
+       u8      reserved2;      /* reserved */
+       u8      additional_cdb_len;
+       /* B7-2 : additional_cdb_len */
+       /* B1-0 : reserved */
+       u8      cdb[16];/* The SCSI CDB up to 16 bytes length */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI IO Start Command
+ * use to describe MPI SSP INI IO Start Command (64 bytes)
+ * Note: This structure is common for normal / encryption I/O
+ */
+struct ssp_ini_io_start_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  data_len;
+       __le32  dad_dir_m_tlr;
+       struct ssp_info_unit    ssp_iu;
+       __le32  addr_low;       /* dword 12: sgl low for normal I/O. */
+                               /* epl_descl for encryption I/O */
+       __le32  addr_high;      /* dword 13: sgl hi for normal I/O */
+                               /* dpl_descl for encryption I/O */
+       __le32  len;            /* dword 14: len for normal I/O. */
+                               /* edpl_desch for encryption I/O */
+       __le32  esgl;           /* dword 15: ESGL bit for normal I/O. */
+                               /* user defined tag mask for enc I/O */
+       /* The below fields are reserved for normal I/O */
+       u8      udt[12];        /* dword 16-18 */
+       __le32  sectcnt_ios;    /* dword 19 */
+       __le32  key_cmode;      /* dword 20 */
+       __le32  ks_enss;        /* dword 21 */
+       __le32  keytagl;        /* dword 22 */
+       __le32  keytagh;        /* dword 23 */
+       __le32  twk_val0;       /* dword 24 */
+       __le32  twk_val1;       /* dword 25 */
+       __le32  twk_val2;       /* dword 26 */
+       __le32  twk_val3;       /* dword 27 */
+       __le32  enc_addr_low;   /* dword 28: Encryption sgl addr low */
+       __le32  enc_addr_high;  /* dword 29: Encryption sgl addr hi */
+       __le32  enc_len;        /* dword 30: Encryption length */
+       __le32  enc_esgl;       /* dword 31: ESGL bit for encryption */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND
+ * use to initiate SSP I/O operation with optional DIF/ENC
+ */
+struct ssp_dif_enc_io_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  data_len;
+       __le32  dirMTlr;
+       __le32  sspiu0;
+       __le32  sspiu1;
+       __le32  sspiu2;
+       __le32  sspiu3;
+       __le32  sspiu4;
+       __le32  sspiu5;
+       __le32  sspiu6;
+       __le32  epl_des;
+       __le32  dpl_desl_ndplr;
+       __le32  dpl_desh;
+       __le32  uum_uuv_bss_difbits;
+       u8      udt[12];
+       __le32  sectcnt_ios;
+       __le32  key_cmode;
+       __le32  ks_enss;
+       __le32  keytagl;
+       __le32  keytagh;
+       __le32  twk_val0;
+       __le32  twk_val1;
+       __le32  twk_val2;
+       __le32  twk_val3;
+       __le32  addr_low;
+       __le32  addr_high;
+       __le32  len;
+       __le32  esgl;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Firmware download
+ * use to describe MPI FW DOWNLOAD Command (64 bytes)
+ */
+struct fw_flash_Update_req {
+       __le32  tag;
+       __le32  cur_image_offset;
+       __le32  cur_image_len;
+       __le32  total_image_len;
+       u32     reserved0[7];
+       __le32  sgl_addr_lo;
+       __le32  sgl_addr_hi;
+       __le32  len;
+       __le32  ext_reserved;
+       u32     reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+#define FWFLASH_IOMB_RESERVED_LEN 0x07
+/**
+ * brief the data structure of FW_FLASH_UPDATE Response
+ * use to describe MPI FW_FLASH_UPDATE Response (64 bytes)
+ *
+ */
+ struct fw_flash_Update_resp {
+       __le32  tag;
+       __le32  status;
+       u32     reserved[13];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Get NVM Data Command
+ * use to get data from NVM in HBA(64 bytes)
+ */
+struct get_nvm_data_req {
+       __le32  tag;
+       __le32  len_ir_vpdd;
+       __le32  vpd_offset;
+       u32     reserved[8];
+       __le32  resp_addr_lo;
+       __le32  resp_addr_hi;
+       __le32  resp_len;
+       u32     reserved1[17];
+} __attribute__((packed, aligned(4)));
+
+struct set_nvm_data_req {
+       __le32  tag;
+       __le32  len_ir_vpdd;
+       __le32  vpd_offset;
+       u32     reserved[8];
+       __le32  resp_addr_lo;
+       __le32  resp_addr_hi;
+       __le32  resp_len;
+       u32     reserved1[17];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SET CONTROLLER CONFIG COMMAND
+ * use to modify controller configuration
+ */
+struct set_ctrl_cfg_req {
+       __le32  tag;
+       __le32  cfg_pg[14];
+       u32     reserved[16];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for GET CONTROLLER CONFIG COMMAND
+ * use to get controller configuration page
+ */
+struct get_ctrl_cfg_req {
+       __le32  tag;
+       __le32  pgcd;
+       __le32  int_vec;
+       u32     reserved[28];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for KEK_MANAGEMENT COMMAND
+ * use for KEK management
+ */
+struct kek_mgmt_req {
+       __le32  tag;
+       __le32  new_curidx_ksop;
+       u32     reserved;
+       __le32  kblob[12];
+       u32     reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for DEK_MANAGEMENT COMMAND
+ * use for DEK management
+ */
+struct dek_mgmt_req {
+       __le32  tag;
+       __le32  kidx_dsop;
+       __le32  dekidx;
+       __le32  addr_l;
+       __le32  addr_h;
+       __le32  nent;
+       __le32  dbf_tblsize;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SET PHY PROFILE COMMAND
+ * use to retrive phy specific information
+ */
+struct set_phy_profile_req {
+       __le32  tag;
+       __le32  ppc_phyid;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for GET PHY PROFILE COMMAND
+ * use to retrive phy specific information
+ */
+struct get_phy_profile_req {
+       __le32  tag;
+       __le32  ppc_phyid;
+       __le32  profile[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for EXT FLASH PARTITION
+ * use to manage ext flash partition
+ */
+struct ext_flash_partition_req {
+       __le32  tag;
+       __le32  cmd;
+       __le32  offset;
+       __le32  len;
+       u32     reserved[7];
+       __le32  addr_low;
+       __le32  addr_high;
+       __le32  len1;
+       __le32  ext;
+       u32     reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+#define TWI_DEVICE     0x0
+#define C_SEEPROM      0x1
+#define VPD_FLASH      0x4
+#define AAP1_RDUMP     0x5
+#define IOP_RDUMP      0x6
+#define EXPAN_ROM      0x7
+
+#define IPMode         0x80000000
+#define NVMD_TYPE      0x0000000F
+#define NVMD_STAT      0x0000FFFF
+#define NVMD_LEN       0xFF000000
+/**
+ * brief the data structure of Get NVMD Data Response
+ * use to describe MPI Get NVMD Data Response (64 bytes)
+ */
+struct get_nvm_data_resp {
+       __le32          tag;
+       __le32          ir_tda_bn_dps_das_nvm;
+       __le32          dlen_status;
+       __le32          nvm_data[12];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Response
+ * use to describe MPI SAS Diagnostic Start/End Response (64 bytes)
+ *
+ */
+struct sas_diag_start_end_resp {
+       __le32          tag;
+       __le32          status;
+       u32             reserved[13];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Response
+ * use to describe MPI SAS Diagnostic Execute Response (64 bytes)
+ *
+ */
+struct sas_diag_execute_resp {
+       __le32          tag;
+       __le32          cmdtype_cmddesc_phyid;
+       __le32          Status;
+       __le32          ReportData;
+       u32             reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Set Device State Response
+ * use to describe MPI Set Device State Response (64 bytes)
+ *
+ */
+struct set_dev_state_resp {
+       __le32          tag;
+       __le32          status;
+       __le32          device_id;
+       __le32          pds_nds;
+       u32             reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/* new outbound structure for spcv - begins */
+/**
+ * brief the data structure for SET CONTROLLER CONFIG COMMAND
+ * use to modify controller configuration
+ */
+struct set_ctrl_cfg_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 err_qlfr_pgcd;
+       u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+struct get_ctrl_cfg_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 err_qlfr;
+       __le32 confg_page[12];
+} __attribute__((packed, aligned(4)));
+
+struct kek_mgmt_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 kidx_new_curr_ksop;
+       __le32 err_qlfr;
+       u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+struct dek_mgmt_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 kekidx_tbls_dsop;
+       __le32 dekidx;
+       __le32 err_qlfr;
+       u32 reserved[10];
+} __attribute__((packed, aligned(4)));
+
+struct get_phy_profile_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 ppc_phyid;
+       __le32 ppc_specific_rsp[12];
+} __attribute__((packed, aligned(4)));
+
+struct flash_op_ext_resp {
+       __le32 tag;
+       __le32 cmd;
+       __le32 status;
+       __le32 epart_size;
+       __le32 epart_sect_size;
+       u32 reserved[10];
+} __attribute__((packed, aligned(4)));
+
+struct set_phy_profile_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 ppc_phyid;
+       __le32 ppc_specific_rsp[12];
+} __attribute__((packed, aligned(4)));
+
+struct ssp_coalesced_comp_resp {
+       __le32 coal_cnt;
+       __le32 tag0;
+       __le32 ssp_tag0;
+       __le32 tag1;
+       __le32 ssp_tag1;
+       __le32 add_tag_ssp_tag[10];
+} __attribute__((packed, aligned(4)));
+
+/* new outbound structure for spcv - ends */
+
+/* brief data structure for SAS protocol timer configuration page.
+ *
+ */
+struct SASProtocolTimerConfig {
+       __le32 pageCode;                        /* 0 */
+       __le32 MST_MSI;                         /* 1 */
+       __le32 STP_SSP_MCT_TMO;                 /* 2 */
+       __le32 STP_FRM_TMO;                     /* 3 */
+       __le32 STP_IDLE_TMO;                    /* 4 */
+       __le32 OPNRJT_RTRY_INTVL;               /* 5 */
+       __le32 Data_Cmd_OPNRJT_RTRY_TMO;        /* 6 */
+       __le32 Data_Cmd_OPNRJT_RTRY_THR;        /* 7 */
+       __le32 MAX_AIP;                         /* 8 */
+} __attribute__((packed, aligned(4)));
+
+typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
+
+#define NDS_BITS 0x0F
+#define PDS_BITS 0xF0
+
+/*
+ * HW Events type
+ */
+
+#define HW_EVENT_RESET_START                   0x01
+#define HW_EVENT_CHIP_RESET_COMPLETE           0x02
+#define HW_EVENT_PHY_STOP_STATUS               0x03
+#define HW_EVENT_SAS_PHY_UP                    0x04
+#define HW_EVENT_SATA_PHY_UP                   0x05
+#define HW_EVENT_SATA_SPINUP_HOLD              0x06
+#define HW_EVENT_PHY_DOWN                      0x07
+#define HW_EVENT_PORT_INVALID                  0x08
+#define HW_EVENT_BROADCAST_CHANGE              0x09
+#define HW_EVENT_PHY_ERROR                     0x0A
+#define HW_EVENT_BROADCAST_SES                 0x0B
+#define HW_EVENT_INBOUND_CRC_ERROR             0x0C
+#define HW_EVENT_HARD_RESET_RECEIVED           0x0D
+#define HW_EVENT_MALFUNCTION                   0x0E
+#define HW_EVENT_ID_FRAME_TIMEOUT              0x0F
+#define HW_EVENT_BROADCAST_EXP                 0x10
+#define HW_EVENT_PHY_START_STATUS              0x11
+#define HW_EVENT_LINK_ERR_INVALID_DWORD                0x12
+#define HW_EVENT_LINK_ERR_DISPARITY_ERROR      0x13
+#define HW_EVENT_LINK_ERR_CODE_VIOLATION       0x14
+#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH  0x15
+#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED     0x16
+#define HW_EVENT_PORT_RECOVERY_TIMER_TMO       0x17
+#define HW_EVENT_PORT_RECOVER                  0x18
+#define HW_EVENT_PORT_RESET_TIMER_TMO          0x19
+#define HW_EVENT_PORT_RESET_COMPLETE           0x20
+#define EVENT_BROADCAST_ASYNCH_EVENT           0x21
+
+/* port state */
+#define PORT_NOT_ESTABLISHED                   0x00
+#define PORT_VALID                             0x01
+#define PORT_LOSTCOMM                          0x02
+#define PORT_IN_RESET                          0x04
+#define PORT_3RD_PARTY_RESET                   0x07
+#define PORT_INVALID                           0x08
+
+/*
+ * SSP/SMP/SATA IO Completion Status values
+ */
+
+#define IO_SUCCESS                             0x00
+#define IO_ABORTED                             0x01
+#define IO_OVERFLOW                            0x02
+#define IO_UNDERFLOW                           0x03
+#define IO_FAILED                              0x04
+#define IO_ABORT_RESET                         0x05
+#define IO_NOT_VALID                           0x06
+#define IO_NO_DEVICE                           0x07
+#define IO_ILLEGAL_PARAMETER                   0x08
+#define IO_LINK_FAILURE                                0x09
+#define IO_PROG_ERROR                          0x0A
+
+#define IO_EDC_IN_ERROR                                0x0B
+#define IO_EDC_OUT_ERROR                       0x0C
+#define IO_ERROR_HW_TIMEOUT                    0x0D
+#define IO_XFER_ERROR_BREAK                    0x0E
+#define IO_XFER_ERROR_PHY_NOT_READY            0x0F
+#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED       0x10
+#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION               0x11
+#define IO_OPEN_CNX_ERROR_BREAK                                0x12
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS                        0x13
+#define IO_OPEN_CNX_ERROR_BAD_DESTINATION              0x14
+#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED        0x15
+#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY           0x16
+#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION            0x17
+/* This error code 0x18 is not used on SPCv */
+#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR                        0x18
+#define IO_XFER_ERROR_NAK_RECEIVED                     0x19
+#define IO_XFER_ERROR_ACK_NAK_TIMEOUT                  0x1A
+#define IO_XFER_ERROR_PEER_ABORTED                     0x1B
+#define IO_XFER_ERROR_RX_FRAME                         0x1C
+#define IO_XFER_ERROR_DMA                              0x1D
+#define IO_XFER_ERROR_CREDIT_TIMEOUT                   0x1E
+#define IO_XFER_ERROR_SATA_LINK_TIMEOUT                        0x1F
+#define IO_XFER_ERROR_SATA                             0x20
+
+/* This error code 0x22 is not used on SPCv */
+#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST              0x22
+#define IO_XFER_ERROR_REJECTED_NCQ_MODE                        0x21
+#define IO_XFER_ERROR_ABORTED_NCQ_MODE                 0x23
+#define IO_XFER_OPEN_RETRY_TIMEOUT                     0x24
+/* This error code 0x25 is not used on SPCv */
+#define IO_XFER_SMP_RESP_CONNECTION_ERROR              0x25
+#define IO_XFER_ERROR_UNEXPECTED_PHASE                 0x26
+#define IO_XFER_ERROR_XFER_RDY_OVERRUN                 0x27
+#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED            0x28
+#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT                0x30
+
+/* The following error code 0x31 and 0x32 are not using (obsolete) */
+#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK   0x31
+#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK        0x32
+
+#define IO_XFER_ERROR_OFFSET_MISMATCH                  0x34
+#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN               0x35
+#define IO_XFER_CMD_FRAME_ISSUED                       0x36
+#define IO_ERROR_INTERNAL_SMP_RESOURCE                 0x37
+#define IO_PORT_IN_RESET                               0x38
+#define IO_DS_NON_OPERATIONAL                          0x39
+#define IO_DS_IN_RECOVERY                              0x3A
+#define IO_TM_TAG_NOT_FOUND                            0x3B
+#define IO_XFER_PIO_SETUP_ERROR                                0x3C
+#define IO_SSP_EXT_IU_ZERO_LEN_ERROR                   0x3D
+#define IO_DS_IN_ERROR                                 0x3E
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY             0x3F
+#define IO_ABORT_IN_PROGRESS                           0x40
+#define IO_ABORT_DELAYED                               0x41
+#define IO_INVALID_LENGTH                              0x42
+
+/********** additional response event values *****************/
+
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT         0x43
+#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED   0x44
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO       0x45
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST                0x46
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE   0x47
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED        0x48
+#define IO_DS_INVALID                                  0x49
+/* WARNING: the value is not contiguous from here */
+#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR    0x52
+#define IO_XFER_DMA_ACTIVATE_TIMEOUT           0x53
+#define IO_XFER_ERROR_INTERNAL_CRC_ERROR       0x54
+#define MPI_IO_RQE_BUSY_FULL                   0x55
+#define IO_XFER_ERR_EOB_DATA_OVERRUN           0x56
+#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME     0x57
+#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED       0x58
+
+#define MPI_ERR_IO_RESOURCE_UNAVAILABLE                0x1004
+#define MPI_ERR_ATAPI_DEVICE_BUSY              0x1024
+
+#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS                0x2040
+/*
+ * An encryption IO request failed due to DEK Key Tag mismatch.
+ * The key tag supplied in the encryption IOMB does not match with
+ * the Key Tag in the referenced DEK Entry.
+ */
+#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH      0x2041
+#define IO_XFR_ERROR_CIPHER_MODE_INVALID       0x2042
+/*
+ * An encryption I/O request failed because the initial value (IV)
+ * in the unwrapped DEK blob didn't match the IV used to unwrap it.
+ */
+#define IO_XFR_ERROR_DEK_IV_MISMATCH           0x2043
+/* An encryption I/O request failed due to an internal RAM ECC or
+ * interface error while unwrapping the DEK. */
+#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR   0x2044
+/* An encryption I/O request failed due to an internal RAM ECC or
+ * interface error while unwrapping the DEK. */
+#define IO_XFR_ERROR_INTERNAL_RAM              0x2045
+/*
+ * An encryption I/O request failed
+ * because the DEK index specified in the I/O was outside the bounds of
+ * the total number of entries in the host DEK table.
+ */
+#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046
+
+/* define DIF IO response error status code */
+#define IO_XFR_ERROR_DIF_MISMATCH                      0x3000
+#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH      0x3001
+#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH                0x3002
+#define IO_XFR_ERROR_DIF_CRC_MISMATCH                  0x3003
+
+/* define operator management response status and error qualifier code */
+#define OPR_MGMT_OP_NOT_SUPPORTED                      0x2060
+#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL         0x2061
+#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND          0x2062
+#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH                0x2063
+#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED      0x2064
+#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL           0x2022
+#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE   0x2023
+/***************** additional response event values ***************/
+
+/* WARNING: This error code must always be the last number.
+ * If you add error code, modify this code also
+ * It is used as an index
+ */
+#define IO_ERROR_UNKNOWN_GENERIC                       0x2023
+
+/* MSGU CONFIGURATION TABLE*/
+
+#define SPCv_MSGU_CFG_TABLE_UPDATE             0x01
+#define SPCv_MSGU_CFG_TABLE_RESET              0x02
+#define SPCv_MSGU_CFG_TABLE_FREEZE             0x04
+#define SPCv_MSGU_CFG_TABLE_UNFREEZE           0x08
+#define MSGU_IBDB_SET                          0x00
+#define MSGU_HOST_INT_STATUS                   0x08
+#define MSGU_HOST_INT_MASK                     0x0C
+#define MSGU_IOPIB_INT_STATUS                  0x18
+#define MSGU_IOPIB_INT_MASK                    0x1C
+#define MSGU_IBDB_CLEAR                                0x20
+
+#define MSGU_MSGU_CONTROL                      0x24
+#define MSGU_ODR                               0x20
+#define MSGU_ODCR                              0x28
+
+#define MSGU_ODMR                              0x30
+#define MSGU_ODMR_U                            0x34
+#define MSGU_ODMR_CLR                          0x38
+#define MSGU_ODMR_CLR_U                                0x3C
+#define MSGU_OD_RSVD                           0x40
+
+#define MSGU_SCRATCH_PAD_0                     0x44
+#define MSGU_SCRATCH_PAD_1                     0x48
+#define MSGU_SCRATCH_PAD_2                     0x4C
+#define MSGU_SCRATCH_PAD_3                     0x50
+#define MSGU_HOST_SCRATCH_PAD_0                        0x54
+#define MSGU_HOST_SCRATCH_PAD_1                        0x58
+#define MSGU_HOST_SCRATCH_PAD_2                        0x5C
+#define MSGU_HOST_SCRATCH_PAD_3                        0x60
+#define MSGU_HOST_SCRATCH_PAD_4                        0x64
+#define MSGU_HOST_SCRATCH_PAD_5                        0x68
+#define MSGU_HOST_SCRATCH_PAD_6                        0x6C
+#define MSGU_HOST_SCRATCH_PAD_7                        0x70
+
+/* bit definition for ODMR register */
+#define ODMR_MASK_ALL                  0xFFFFFFFF/* mask all
+                                       interrupt vector */
+#define ODMR_CLEAR_ALL                 0       /* clear all
+                                       interrupt vector */
+/* bit definition for ODCR register */
+#define ODCR_CLEAR_ALL                 0xFFFFFFFF /* mask all
+                                       interrupt vector*/
+/* MSIX Interupts */
+#define MSIX_TABLE_OFFSET              0x2000
+#define MSIX_TABLE_ELEMENT_SIZE                0x10
+#define MSIX_INTERRUPT_CONTROL_OFFSET  0xC
+#define MSIX_TABLE_BASE                        (MSIX_TABLE_OFFSET + \
+                                       MSIX_INTERRUPT_CONTROL_OFFSET)
+#define MSIX_INTERRUPT_DISABLE         0x1
+#define MSIX_INTERRUPT_ENABLE          0x0
+
+/* state definition for Scratch Pad1 register */
+#define SCRATCH_PAD_RAAE_READY         0x3
+#define SCRATCH_PAD_ILA_READY          0xC
+#define SCRATCH_PAD_BOOT_LOAD_SUCCESS  0x0
+#define SCRATCH_PAD_IOP0_READY         0xC00
+#define SCRATCH_PAD_IOP1_READY         0x3000
+
+/* boot loader state */
+#define SCRATCH_PAD1_BOOTSTATE_MASK            0x70    /* Bit 4-6 */
+#define SCRATCH_PAD1_BOOTSTATE_SUCESS          0x0     /* Load successful */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM     0x10    /* HDA SEEPROM */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP   0x20    /* HDA BootStrap Pins */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET   0x30    /* HDA Soft Reset */
+#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR      0x40    /* HDA critical error */
+#define SCRATCH_PAD1_BOOTSTATE_R1              0x50    /* Reserved */
+#define SCRATCH_PAD1_BOOTSTATE_R2              0x60    /* Reserved */
+#define SCRATCH_PAD1_BOOTSTATE_FATAL           0x70    /* Fatal Error */
+
+ /* state definition for Scratch Pad2 register */
+#define SCRATCH_PAD2_POR               0x00    /* power on state */
+#define SCRATCH_PAD2_SFR               0x01    /* soft reset state */
+#define SCRATCH_PAD2_ERR               0x02    /* error state */
+#define SCRATCH_PAD2_RDY               0x03    /* ready state */
+#define SCRATCH_PAD2_FWRDY_RST         0x04    /* FW rdy for soft reset flag */
+#define SCRATCH_PAD2_IOPRDY_RST                0x08    /* IOP ready for soft reset */
+#define SCRATCH_PAD2_STATE_MASK                0xFFFFFFF4 /* ScratchPad 2
+ Mask, bit1-0 State */
+#define SCRATCH_PAD2_RESERVED          0x000003FC/* Scratch Pad1
+ Reserved bit 2 to 9 */
+
+#define SCRATCH_PAD_ERROR_MASK         0xFFFFFC00 /* Error mask bits */
+#define SCRATCH_PAD_STATE_MASK         0x00000003 /* State Mask bits */
+
+/* main configuration offset - byte offset */
+#define MAIN_SIGNATURE_OFFSET          0x00 /* DWORD 0x00 */
+#define MAIN_INTERFACE_REVISION                0x04 /* DWORD 0x01 */
+#define MAIN_FW_REVISION               0x08 /* DWORD 0x02 */
+#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */
+#define MAIN_MAX_SGL_OFFSET            0x10 /* DWORD 0x04 */
+#define MAIN_CNTRL_CAP_OFFSET          0x14 /* DWORD 0x05 */
+#define MAIN_GST_OFFSET                        0x18 /* DWORD 0x06 */
+#define MAIN_IBQ_OFFSET                        0x1C /* DWORD 0x07 */
+#define MAIN_OBQ_OFFSET                        0x20 /* DWORD 0x08 */
+#define MAIN_IQNPPD_HPPD_OFFSET                0x24 /* DWORD 0x09 */
+
+/* 0x28 - 0x4C - RSVD */
+#define MAIN_EVENT_CRC_CHECK           0x48 /* DWORD 0x12 */
+#define MAIN_EVENT_LOG_ADDR_HI         0x50 /* DWORD 0x14 */
+#define MAIN_EVENT_LOG_ADDR_LO         0x54 /* DWORD 0x15 */
+#define MAIN_EVENT_LOG_BUFF_SIZE       0x58 /* DWORD 0x16 */
+#define MAIN_EVENT_LOG_OPTION          0x5C /* DWORD 0x17 */
+#define MAIN_PCS_EVENT_LOG_ADDR_HI     0x60 /* DWORD 0x18 */
+#define MAIN_PCS_EVENT_LOG_ADDR_LO     0x64 /* DWORD 0x19 */
+#define MAIN_PCS_EVENT_LOG_BUFF_SIZE   0x68 /* DWORD 0x1A */
+#define MAIN_PCS_EVENT_LOG_OPTION      0x6C /* DWORD 0x1B */
+#define MAIN_FATAL_ERROR_INTERRUPT     0x70 /* DWORD 0x1C */
+#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */
+#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */
+#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */
+#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */
+#define MAIN_GPIO_LED_FLAGS_OFFSET     0x84 /* DWORD 0x21 */
+#define MAIN_ANALOG_SETUP_OFFSET       0x88 /* DWORD 0x22 */
+
+#define MAIN_INT_VECTOR_TABLE_OFFSET   0x8C /* DWORD 0x23 */
+#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */
+#define MAIN_PORT_RECOVERY_TIMER       0x94 /* DWORD 0x25 */
+#define MAIN_INT_REASSERTION_DELAY     0x98 /* DWORD 0x26 */
+
+/* Gereral Status Table offset - byte offset */
+#define GST_GSTLEN_MPIS_OFFSET         0x00
+#define GST_IQ_FREEZE_STATE0_OFFSET    0x04
+#define GST_IQ_FREEZE_STATE1_OFFSET    0x08
+#define GST_MSGUTCNT_OFFSET            0x0C
+#define GST_IOPTCNT_OFFSET             0x10
+/* 0x14 - 0x34 - RSVD */
+#define GST_GPIO_INPUT_VAL             0x38
+/* 0x3c - 0x40 - RSVD */
+#define GST_RERRINFO_OFFSET0           0x44
+#define GST_RERRINFO_OFFSET1           0x48
+#define GST_RERRINFO_OFFSET2           0x4c
+#define GST_RERRINFO_OFFSET3           0x50
+#define GST_RERRINFO_OFFSET4           0x54
+#define GST_RERRINFO_OFFSET5           0x58
+#define GST_RERRINFO_OFFSET6           0x5c
+#define GST_RERRINFO_OFFSET7           0x60
+
+/* General Status Table - MPI state */
+#define GST_MPI_STATE_UNINIT           0x00
+#define GST_MPI_STATE_INIT             0x01
+#define GST_MPI_STATE_TERMINATION      0x02
+#define GST_MPI_STATE_ERROR            0x03
+#define GST_MPI_STATE_MASK             0x07
+
+/* Per SAS PHY Attributes */
+
+#define PSPA_PHYSTATE0_OFFSET          0x00 /* Dword V */
+#define PSPA_OB_HW_EVENT_PID0_OFFSET   0x04 /* DWORD V+1 */
+#define PSPA_PHYSTATE1_OFFSET          0x08 /* Dword V+2 */
+#define PSPA_OB_HW_EVENT_PID1_OFFSET   0x0C /* DWORD V+3 */
+#define PSPA_PHYSTATE2_OFFSET          0x10 /* Dword V+4 */
+#define PSPA_OB_HW_EVENT_PID2_OFFSET   0x14 /* DWORD V+5 */
+#define PSPA_PHYSTATE3_OFFSET          0x18 /* Dword V+6 */
+#define PSPA_OB_HW_EVENT_PID3_OFFSET   0x1C /* DWORD V+7 */
+#define PSPA_PHYSTATE4_OFFSET          0x20 /* Dword V+8 */
+#define PSPA_OB_HW_EVENT_PID4_OFFSET   0x24 /* DWORD V+9 */
+#define PSPA_PHYSTATE5_OFFSET          0x28 /* Dword V+10 */
+#define PSPA_OB_HW_EVENT_PID5_OFFSET   0x2C /* DWORD V+11 */
+#define PSPA_PHYSTATE6_OFFSET          0x30 /* Dword V+12 */
+#define PSPA_OB_HW_EVENT_PID6_OFFSET   0x34 /* DWORD V+13 */
+#define PSPA_PHYSTATE7_OFFSET          0x38 /* Dword V+14 */
+#define PSPA_OB_HW_EVENT_PID7_OFFSET   0x3C /* DWORD V+15 */
+#define PSPA_PHYSTATE8_OFFSET          0x40 /* DWORD V+16 */
+#define PSPA_OB_HW_EVENT_PID8_OFFSET   0x44 /* DWORD V+17 */
+#define PSPA_PHYSTATE9_OFFSET          0x48 /* DWORD V+18 */
+#define PSPA_OB_HW_EVENT_PID9_OFFSET   0x4C /* DWORD V+19 */
+#define PSPA_PHYSTATE10_OFFSET         0x50 /* DWORD V+20 */
+#define PSPA_OB_HW_EVENT_PID10_OFFSET  0x54 /* DWORD V+21 */
+#define PSPA_PHYSTATE11_OFFSET         0x58 /* DWORD V+22 */
+#define PSPA_OB_HW_EVENT_PID11_OFFSET  0x5C /* DWORD V+23 */
+#define PSPA_PHYSTATE12_OFFSET         0x60 /* DWORD V+24 */
+#define PSPA_OB_HW_EVENT_PID12_OFFSET  0x64 /* DWORD V+25 */
+#define PSPA_PHYSTATE13_OFFSET         0x68 /* DWORD V+26 */
+#define PSPA_OB_HW_EVENT_PID13_OFFSET  0x6c /* DWORD V+27 */
+#define PSPA_PHYSTATE14_OFFSET         0x70 /* DWORD V+28 */
+#define PSPA_OB_HW_EVENT_PID14_OFFSET  0x74 /* DWORD V+29 */
+#define PSPA_PHYSTATE15_OFFSET         0x78 /* DWORD V+30 */
+#define PSPA_OB_HW_EVENT_PID15_OFFSET  0x7c /* DWORD V+31 */
+/* end PSPA */
+
+/* inbound queue configuration offset - byte offset */
+#define IB_PROPERITY_OFFSET            0x00
+#define IB_BASE_ADDR_HI_OFFSET         0x04
+#define IB_BASE_ADDR_LO_OFFSET         0x08
+#define IB_CI_BASE_ADDR_HI_OFFSET      0x0C
+#define IB_CI_BASE_ADDR_LO_OFFSET      0x10
+#define IB_PIPCI_BAR                   0x14
+#define IB_PIPCI_BAR_OFFSET            0x18
+#define IB_RESERVED_OFFSET             0x1C
+
+/* outbound queue configuration offset - byte offset */
+#define OB_PROPERITY_OFFSET            0x00
+#define OB_BASE_ADDR_HI_OFFSET         0x04
+#define OB_BASE_ADDR_LO_OFFSET         0x08
+#define OB_PI_BASE_ADDR_HI_OFFSET      0x0C
+#define OB_PI_BASE_ADDR_LO_OFFSET      0x10
+#define OB_CIPCI_BAR                   0x14
+#define OB_CIPCI_BAR_OFFSET            0x18
+#define OB_INTERRUPT_COALES_OFFSET     0x1C
+#define OB_DYNAMIC_COALES_OFFSET       0x20
+#define OB_PROPERTY_INT_ENABLE         0x40000000
+
+#define MBIC_NMI_ENABLE_VPE0_IOP       0x000418
+#define MBIC_NMI_ENABLE_VPE0_AAP1      0x000418
+/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */
+#define PCIE_EVENT_INTERRUPT_ENABLE    0x003040
+#define PCIE_EVENT_INTERRUPT           0x003044
+#define PCIE_ERROR_INTERRUPT_ENABLE    0x003048
+#define PCIE_ERROR_INTERRUPT           0x00304C
+
+/* SPCV soft reset */
+#define SPC_REG_SOFT_RESET 0x00001000
+#define SPCv_NORMAL_RESET_VALUE                0x1
+
+#define SPCv_SOFT_RESET_READ_MASK              0xC0
+#define SPCv_SOFT_RESET_NO_RESET               0x0
+#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED   0x40
+#define SPCv_SOFT_RESET_HDA_MODE_OCCURED       0x80
+#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED     0xC0
+
+/* signature definition for host scratch pad0 register */
+#define SPC_SOFT_RESET_SIGNATURE       0x252acbcd
+/* Signature for Soft Reset */
+
+/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */
+#define SPC_REG_RESET                  0x000000/* reset register */
+
+/* bit definition for SPC_RESET register */
+#define SPC_REG_RESET_OSSP             0x00000001
+#define SPC_REG_RESET_RAAE             0x00000002
+#define SPC_REG_RESET_PCS_SPBC         0x00000004
+#define SPC_REG_RESET_PCS_IOP_SS       0x00000008
+#define SPC_REG_RESET_PCS_AAP1_SS      0x00000010
+#define SPC_REG_RESET_PCS_AAP2_SS      0x00000020
+#define SPC_REG_RESET_PCS_LM           0x00000040
+#define SPC_REG_RESET_PCS              0x00000080
+#define SPC_REG_RESET_GSM              0x00000100
+#define SPC_REG_RESET_DDR2             0x00010000
+#define SPC_REG_RESET_BDMA_CORE                0x00020000
+#define SPC_REG_RESET_BDMA_SXCBI       0x00040000
+#define SPC_REG_RESET_PCIE_AL_SXCBI    0x00080000
+#define SPC_REG_RESET_PCIE_PWR         0x00100000
+#define SPC_REG_RESET_PCIE_SFT         0x00200000
+#define SPC_REG_RESET_PCS_SXCBI                0x00400000
+#define SPC_REG_RESET_LMS_SXCBI                0x00800000
+#define SPC_REG_RESET_PMIC_SXCBI       0x01000000
+#define SPC_REG_RESET_PMIC_CORE                0x02000000
+#define SPC_REG_RESET_PCIE_PC_SXCBI    0x04000000
+#define SPC_REG_RESET_DEVICE           0x80000000
+
+/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */
+#define SPCV_IBW_AXI_TRANSLATION_LOW   0x001010
+
+#define MBIC_AAP1_ADDR_BASE            0x060000
+#define MBIC_IOP_ADDR_BASE             0x070000
+#define GSM_ADDR_BASE                  0x0700000
+/* Dynamic map through Bar4 - 0x00700000 */
+#define GSM_CONFIG_RESET               0x00000000
+#define RAM_ECC_DB_ERR                 0x00000018
+#define GSM_READ_ADDR_PARITY_INDIC     0x00000058
+#define GSM_WRITE_ADDR_PARITY_INDIC    0x00000060
+#define GSM_WRITE_DATA_PARITY_INDIC    0x00000068
+#define GSM_READ_ADDR_PARITY_CHECK     0x00000038
+#define GSM_WRITE_ADDR_PARITY_CHECK    0x00000040
+#define GSM_WRITE_DATA_PARITY_CHECK    0x00000048
+
+#define RB6_ACCESS_REG                 0x6A0000
+#define HDAC_EXEC_CMD                  0x0002
+#define HDA_C_PA                       0xcb
+#define HDA_SEQ_ID_BITS                        0x00ff0000
+#define HDA_GSM_OFFSET_BITS            0x00FFFFFF
+#define HDA_GSM_CMD_OFFSET_BITS                0x42C0
+#define HDA_GSM_RSP_OFFSET_BITS                0x42E0
+
+#define MBIC_AAP1_ADDR_BASE            0x060000
+#define MBIC_IOP_ADDR_BASE             0x070000
+#define GSM_ADDR_BASE                  0x0700000
+#define SPC_TOP_LEVEL_ADDR_BASE                0x000000
+#define GSM_CONFIG_RESET_VALUE         0x00003b00
+#define GPIO_ADDR_BASE                 0x00090000
+#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET  0x0000010c
+
+/* RB6 offset */
+#define SPC_RB6_OFFSET                 0x80C0
+/* Magic number of soft reset for RB6 */
+#define RB6_MAGIC_NUMBER_RST           0x1234
+
+/* Device Register status */
+#define DEVREG_SUCCESS                                 0x00
+#define DEVREG_FAILURE_OUT_OF_RESOURCE                 0x01
+#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED       0x02
+#define DEVREG_FAILURE_INVALID_PHY_ID                  0x03
+#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED       0x04
+#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE            0x05
+#define DEVREG_FAILURE_PORT_NOT_VALID_STATE            0x06
+#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID           0x07
+
+#endif
index 317a7fdc3b825064e4a5677f0a64bc5f3e43a8d6..23d607218ae8b8a27e07b51d3752bfae17bbba26 100644 (file)
@@ -24,7 +24,9 @@ config SCSI_QLA_FC
 
        Firmware images can be retrieved from:
 
-               ftp://ftp.qlogic.com/outgoing/linux/firmware/
+               http://ldriver.qlogic.com/firmware/
+
+       They are also included in the linux-firmware tree as well.
 
 config TCM_QLA2XXX
        tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
index 729b74389f83168544fb4ef95afe2e9464193ba6..937fed8cb0388550b619dfae667d452fa1c686a7 100644 (file)
@@ -3003,12 +3003,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
 
        /* Set transfer direction */
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
-               lcmd_pkt->cntrl_flags =
-                   __constant_cpu_to_le16(TMF_WRITE_DATA);
+               lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
-               lcmd_pkt->cntrl_flags =
-                   __constant_cpu_to_le16(TMF_READ_DATA);
+               lcmd_pkt->cntrl_flags = TMF_READ_DATA;
                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
        }
 
index 5307bf86d5e08d453be2f8d3bd3ca781607b640c..ad72c1d8511162b9465b96265510bdad6dfd1e42 100644 (file)
@@ -644,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
        qla2x00_rel_sp(sp->fcport->vha, sp);
 }
 
-void
+static void
 qla2x00_sp_compl(void *data, void *ptr, int res)
 {
        struct qla_hw_data *ha = (struct qla_hw_data *)data;
index d182c96e17ea48ef0427a67c461d8e9bc828b5c2..7a3870f385f63adcc7ed23db355af7fe0682fb93 100644 (file)
@@ -1370,7 +1370,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
                dump_stack();
                return;
        }
-       target_wait_for_sess_cmds(se_sess, 0);
+       target_wait_for_sess_cmds(se_sess);
 
        transport_deregister_session_configfs(sess->se_sess);
        transport_deregister_session(sess->se_sess);
index 14fec976f634e1c4804855617c65aae7fcd4a566..fad71ed067ec3525a742fe251634fa429bb781f8 100644 (file)
@@ -507,6 +507,7 @@ static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
        mrb->mbox_cmd = in_mbox[0];
        wmb();
 
+       ha->iocb_cnt += mrb->iocb_cnt;
        ha->isp_ops->queue_iocb(ha);
 exit_mbox_iocb:
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
index a47f99957ba8eb9c37d6e4b46ec25cfe763c24a8..4d231c12463eb38ad29d5b3c4ca25f4af24d99fa 100644 (file)
@@ -2216,14 +2216,14 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
        fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
        fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
        fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
-       fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf);
-       fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf);
+       fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
+       fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
        fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
        fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
        fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
        fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
-       fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn);
-       fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn);
+       fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
+       fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
        fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
        fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
        fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
@@ -5504,9 +5504,9 @@ static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
  * If this is invoked as a result of a userspace call then the entry is marked
  * as nonpersistent using flash_state field.
  **/
-int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
-                                struct dev_db_entry *fw_ddb_entry,
-                                uint16_t *idx, int user)
+static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
+                                       struct dev_db_entry *fw_ddb_entry,
+                                       uint16_t *idx, int user)
 {
        struct iscsi_bus_flash_session *fnode_sess = NULL;
        struct iscsi_bus_flash_conn *fnode_conn = NULL;
@@ -5605,6 +5605,7 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
                ql4_printk(KERN_ERR, ha,
                           "%s: A non-persistent entry %s found\n",
                           __func__, dev->kobj.name);
+               put_device(dev);
                goto exit_ddb_add;
        }
 
@@ -6112,8 +6113,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
        int parent_type, parent_index = 0xffff;
        int rc = 0;
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev)
                return -EIO;
 
@@ -6276,8 +6276,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
                        rc = sprintf(buf, "\n");
                break;
        case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
-               if ((fnode_sess->discovery_parent_idx) >= 0  &&
-                   (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES))
+               if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)
                        parent_index = fnode_sess->discovery_parent_idx;
 
                rc = sprintf(buf, "%u\n", parent_index);
@@ -6287,8 +6286,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
                        parent_type = ISCSI_DISC_PARENT_ISNS;
                else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
                        parent_type = ISCSI_DISC_PARENT_UNKNOWN;
-               else if (fnode_sess->discovery_parent_type >= 0  &&
-                        fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
+               else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
                        parent_type = ISCSI_DISC_PARENT_SENDTGT;
                else
                        parent_type = ISCSI_DISC_PARENT_UNKNOWN;
@@ -6349,6 +6347,8 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
                rc = -ENOSYS;
                break;
        }
+
+       put_device(dev);
        return rc;
 }
 
@@ -6368,20 +6368,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
 {
        struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
        struct scsi_qla_host *ha = to_qla_host(shost);
-       struct dev_db_entry *fw_ddb_entry = NULL;
        struct iscsi_flashnode_param_info *fnode_param;
        struct nlattr *attr;
        int rc = QLA_ERROR;
        uint32_t rem = len;
 
-       fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL);
-       if (!fw_ddb_entry) {
-               DEBUG2(ql4_printk(KERN_ERR, ha,
-                                 "%s: Unable to allocate ddb buffer\n",
-                                 __func__));
-               return -ENOMEM;
-       }
-
        nla_for_each_attr(attr, data, len, rem) {
                fnode_param = nla_data(attr);
 
@@ -6590,16 +6581,11 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
        struct dev_db_entry *fw_ddb_entry = NULL;
        dma_addr_t fw_ddb_entry_dma;
        uint16_t *ddb_cookie = NULL;
-       size_t ddb_size;
+       size_t ddb_size = 0;
        void *pddb = NULL;
        int target_id;
        int rc = 0;
 
-       if (!fnode_sess) {
-               rc = -EINVAL;
-               goto exit_ddb_del;
-       }
-
        if (fnode_sess->is_boot_target) {
                rc = -EPERM;
                DEBUG2(ql4_printk(KERN_ERR, ha,
@@ -6631,8 +6617,7 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
 
                dev_db_start_offset += (fnode_sess->target_id *
                                       sizeof(*fw_ddb_entry));
-               dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) -
-                                      (void *)fw_ddb_entry;
+               dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
 
                ddb_size = sizeof(*ddb_cookie);
        }
index 83e0fec35d563262df3d82bbc404d0b61456353a..fe873cf7570d05abe96e71e4422c7744ee15c265 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.03.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.03.00-k9"
index 5add6f4e79281a252928519f9613cf9c92f275dc..0a537a0515ca6c6c37829fb3c45f1fe8aa20c4de 100644 (file)
@@ -1997,24 +1997,39 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
        return ret;
 }
 
-static unsigned int map_state(sector_t lba, unsigned int *num)
+static unsigned long lba_to_map_index(sector_t lba)
+{
+       if (scsi_debug_unmap_alignment) {
+               lba += scsi_debug_unmap_granularity -
+                       scsi_debug_unmap_alignment;
+       }
+       do_div(lba, scsi_debug_unmap_granularity);
+
+       return lba;
+}
+
+static sector_t map_index_to_lba(unsigned long index)
 {
-       unsigned int granularity, alignment, mapped;
-       sector_t block, next, end;
+       return index * scsi_debug_unmap_granularity -
+               scsi_debug_unmap_alignment;
+}
 
-       granularity = scsi_debug_unmap_granularity;
-       alignment = granularity - scsi_debug_unmap_alignment;
-       block = lba + alignment;
-       do_div(block, granularity);
+static unsigned int map_state(sector_t lba, unsigned int *num)
+{
+       sector_t end;
+       unsigned int mapped;
+       unsigned long index;
+       unsigned long next;
 
-       mapped = test_bit(block, map_storep);
+       index = lba_to_map_index(lba);
+       mapped = test_bit(index, map_storep);
 
        if (mapped)
-               next = find_next_zero_bit(map_storep, map_size, block);
+               next = find_next_zero_bit(map_storep, map_size, index);
        else
-               next = find_next_bit(map_storep, map_size, block);
+               next = find_next_bit(map_storep, map_size, index);
 
-       end = next * granularity - scsi_debug_unmap_alignment;
+       end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
        *num = end - lba;
 
        return mapped;
@@ -2022,47 +2037,37 @@ static unsigned int map_state(sector_t lba, unsigned int *num)
 
 static void map_region(sector_t lba, unsigned int len)
 {
-       unsigned int granularity, alignment;
        sector_t end = lba + len;
 
-       granularity = scsi_debug_unmap_granularity;
-       alignment = granularity - scsi_debug_unmap_alignment;
-
        while (lba < end) {
-               sector_t block, rem;
-
-               block = lba + alignment;
-               rem = do_div(block, granularity);
+               unsigned long index = lba_to_map_index(lba);
 
-               if (block < map_size)
-                       set_bit(block, map_storep);
+               if (index < map_size)
+                       set_bit(index, map_storep);
 
-               lba += granularity - rem;
+               lba = map_index_to_lba(index + 1);
        }
 }
 
 static void unmap_region(sector_t lba, unsigned int len)
 {
-       unsigned int granularity, alignment;
        sector_t end = lba + len;
 
-       granularity = scsi_debug_unmap_granularity;
-       alignment = granularity - scsi_debug_unmap_alignment;
-
        while (lba < end) {
-               sector_t block, rem;
-
-               block = lba + alignment;
-               rem = do_div(block, granularity);
+               unsigned long index = lba_to_map_index(lba);
 
-               if (rem == 0 && lba + granularity < end && block < map_size) {
-                       clear_bit(block, map_storep);
-                       if (scsi_debug_lbprz)
+               if (lba == map_index_to_lba(index) &&
+                   lba + scsi_debug_unmap_granularity <= end &&
+                   index < map_size) {
+                       clear_bit(index, map_storep);
+                       if (scsi_debug_lbprz) {
                                memset(fake_storep +
-                                      block * scsi_debug_sector_size, 0,
-                                      scsi_debug_sector_size);
+                                      lba * scsi_debug_sector_size, 0,
+                                      scsi_debug_sector_size *
+                                      scsi_debug_unmap_granularity);
+                       }
                }
-               lba += granularity - rem;
+               lba = map_index_to_lba(index + 1);
        }
 }
 
@@ -2089,7 +2094,7 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
 
        write_lock_irqsave(&atomic_rw, iflags);
        ret = do_device_access(SCpnt, devip, lba, num, 1);
-       if (scsi_debug_unmap_granularity)
+       if (scsi_debug_lbp())
                map_region(lba, num);
        write_unlock_irqrestore(&atomic_rw, iflags);
        if (-1 == ret)
@@ -2122,7 +2127,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
 
        write_lock_irqsave(&atomic_rw, iflags);
 
-       if (unmap && scsi_debug_unmap_granularity) {
+       if (unmap && scsi_debug_lbp()) {
                unmap_region(lba, num);
                goto out;
        }
@@ -2146,7 +2151,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
                       fake_storep + (lba * scsi_debug_sector_size),
                       scsi_debug_sector_size);
 
-       if (scsi_debug_unmap_granularity)
+       if (scsi_debug_lbp())
                map_region(lba, num);
 out:
        write_unlock_irqrestore(&atomic_rw, iflags);
@@ -3389,8 +3394,6 @@ static int __init scsi_debug_init(void)
 
        /* Logical Block Provisioning */
        if (scsi_debug_lbp()) {
-               unsigned int map_bytes;
-
                scsi_debug_unmap_max_blocks =
                        clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
 
@@ -3401,16 +3404,16 @@ static int __init scsi_debug_init(void)
                        clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
 
                if (scsi_debug_unmap_alignment &&
-                   scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
+                   scsi_debug_unmap_granularity <=
+                   scsi_debug_unmap_alignment) {
                        printk(KERN_ERR
-                              "%s: ERR: unmap_granularity < unmap_alignment\n",
+                              "%s: ERR: unmap_granularity <= unmap_alignment\n",
                               __func__);
                        return -EINVAL;
                }
 
-               map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
-               map_bytes = map_size >> 3;
-               map_storep = vmalloc(map_bytes);
+               map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
+               map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
 
                printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
                       map_size);
@@ -3421,7 +3424,7 @@ static int __init scsi_debug_init(void)
                        goto free_vm;
                }
 
-               memset(map_storep, 0x0, map_bytes);
+               bitmap_zero(map_storep, map_size);
 
                /* Map first 1KB for partition table */
                if (scsi_debug_num_parts)
index c1b05a83d403221912fb845e314d3a30656a89fc..f43de1e56420ac7916ca99c83281b81b417f4fb9 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
+#include <linux/jiffies.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -791,32 +792,48 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
        struct scsi_device *sdev = scmd->device;
        struct Scsi_Host *shost = sdev->host;
        DECLARE_COMPLETION_ONSTACK(done);
-       unsigned long timeleft;
+       unsigned long timeleft = timeout;
        struct scsi_eh_save ses;
+       const unsigned long stall_for = msecs_to_jiffies(100);
        int rtn;
 
+retry:
        scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
        shost->eh_action = &done;
 
        scsi_log_send(scmd);
        scmd->scsi_done = scsi_eh_done;
-       shost->hostt->queuecommand(shost, scmd);
-
-       timeleft = wait_for_completion_timeout(&done, timeout);
+       rtn = shost->hostt->queuecommand(shost, scmd);
+       if (rtn) {
+               if (timeleft > stall_for) {
+                       scsi_eh_restore_cmnd(scmd, &ses);
+                       timeleft -= stall_for;
+                       msleep(jiffies_to_msecs(stall_for));
+                       goto retry;
+               }
+               /* signal not to enter either branch of the if () below */
+               timeleft = 0;
+               rtn = NEEDS_RETRY;
+       } else {
+               timeleft = wait_for_completion_timeout(&done, timeout);
+       }
 
        shost->eh_action = NULL;
 
-       scsi_log_completion(scmd, SUCCESS);
+       scsi_log_completion(scmd, rtn);
 
        SCSI_LOG_ERROR_RECOVERY(3,
                printk("%s: scmd: %p, timeleft: %ld\n",
                        __func__, scmd, timeleft));
 
        /*
-        * If there is time left scsi_eh_done got called, and we will
-        * examine the actual status codes to see whether the command
-        * actually did complete normally, else tell the host to forget
-        * about this command.
+        * If there is time left scsi_eh_done got called, and we will examine
+        * the actual status codes to see whether the command actually did
+        * complete normally, else if we have a zero return and no time left,
+        * the command must still be pending, so abort it and return FAILED.
+        * If we never actually managed to issue the command, because
+        * ->queuecommand() kept returning non zero, use the rtn = FAILED
+        * value above (so don't execute either branch of the if)
         */
        if (timeleft) {
                rtn = scsi_eh_completed_normally(scmd);
@@ -837,7 +854,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
                        rtn = FAILED;
                        break;
                }
-       } else {
+       } else if (!rtn) {
                scsi_abort_eh_cmnd(scmd);
                rtn = FAILED;
        }
index c31187d79343a17af26404a2d6c547dc392ae151..86d522004a208255b17861b4c1cb556bbe7c973e 100644 (file)
@@ -276,11 +276,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 }
 EXPORT_SYMBOL(scsi_execute);
 
-
-int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
+int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
                     int data_direction, void *buffer, unsigned bufflen,
                     struct scsi_sense_hdr *sshdr, int timeout, int retries,
-                    int *resid)
+                    int *resid, int flags)
 {
        char *sense = NULL;
        int result;
@@ -291,14 +290,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
                        return DRIVER_ERROR << 24;
        }
        result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
-                             sense, timeout, retries, 0, resid);
+                             sense, timeout, retries, flags, resid);
        if (sshdr)
                scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
 
        kfree(sense);
        return result;
 }
-EXPORT_SYMBOL(scsi_execute_req);
+EXPORT_SYMBOL(scsi_execute_req_flags);
 
 /*
  * Function:    scsi_init_cmd_errh()
index 8f6b12cbd224801e2828571beb6938808d87b26c..42539ee2cb111f0e10a6b152d9a0d60cbb2d895b 100644 (file)
@@ -144,33 +144,83 @@ static int scsi_bus_restore(struct device *dev)
 
 #ifdef CONFIG_PM_RUNTIME
 
+static int sdev_blk_runtime_suspend(struct scsi_device *sdev,
+                                       int (*cb)(struct device *))
+{
+       int err;
+
+       err = blk_pre_runtime_suspend(sdev->request_queue);
+       if (err)
+               return err;
+       if (cb)
+               err = cb(&sdev->sdev_gendev);
+       blk_post_runtime_suspend(sdev->request_queue, err);
+
+       return err;
+}
+
+static int sdev_runtime_suspend(struct device *dev)
+{
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL;
+       struct scsi_device *sdev = to_scsi_device(dev);
+       int err;
+
+       if (sdev->request_queue->dev)
+               return sdev_blk_runtime_suspend(sdev, cb);
+
+       err = scsi_dev_type_suspend(dev, cb);
+       if (err == -EAGAIN)
+               pm_schedule_suspend(dev, jiffies_to_msecs(
+                                       round_jiffies_up_relative(HZ/10)));
+       return err;
+}
+
 static int scsi_runtime_suspend(struct device *dev)
 {
        int err = 0;
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
        dev_dbg(dev, "scsi_runtime_suspend\n");
-       if (scsi_is_sdev_device(dev)) {
-               err = scsi_dev_type_suspend(dev,
-                               pm ? pm->runtime_suspend : NULL);
-               if (err == -EAGAIN)
-                       pm_schedule_suspend(dev, jiffies_to_msecs(
-                               round_jiffies_up_relative(HZ/10)));
-       }
+       if (scsi_is_sdev_device(dev))
+               err = sdev_runtime_suspend(dev);
 
        /* Insert hooks here for targets, hosts, and transport classes */
 
        return err;
 }
 
-static int scsi_runtime_resume(struct device *dev)
+static int sdev_blk_runtime_resume(struct scsi_device *sdev,
+                                       int (*cb)(struct device *))
 {
        int err = 0;
+
+       blk_pre_runtime_resume(sdev->request_queue);
+       if (cb)
+               err = cb(&sdev->sdev_gendev);
+       blk_post_runtime_resume(sdev->request_queue, err);
+
+       return err;
+}
+
+static int sdev_runtime_resume(struct device *dev)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int (*cb)(struct device *) = pm ? pm->runtime_resume : NULL;
+
+       if (sdev->request_queue->dev)
+               return sdev_blk_runtime_resume(sdev, cb);
+       else
+               return scsi_dev_type_resume(dev, cb);
+}
+
+static int scsi_runtime_resume(struct device *dev)
+{
+       int err = 0;
 
        dev_dbg(dev, "scsi_runtime_resume\n");
        if (scsi_is_sdev_device(dev))
-               err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL);
+               err = sdev_runtime_resume(dev);
 
        /* Insert hooks here for targets, hosts, and transport classes */
 
@@ -185,10 +235,18 @@ static int scsi_runtime_idle(struct device *dev)
 
        /* Insert hooks here for targets, hosts, and transport classes */
 
-       if (scsi_is_sdev_device(dev))
-               err = pm_schedule_suspend(dev, 100);
-       else
+       if (scsi_is_sdev_device(dev)) {
+               struct scsi_device *sdev = to_scsi_device(dev);
+
+               if (sdev->request_queue->dev) {
+                       pm_runtime_mark_last_busy(dev);
+                       err = pm_runtime_autosuspend(dev);
+               } else {
+                       err = pm_runtime_suspend(dev);
+               }
+       } else {
                err = pm_runtime_suspend(dev);
+       }
        return err;
 }
 
index db66357211ed167d4df1019f151b7e99214788c8..86f0c5d5c116f74d2117806e45ec4599d750837a 100644 (file)
@@ -84,6 +84,7 @@ static int proc_scsi_host_open(struct inode *inode, struct file *file)
 
 static const struct file_operations proc_scsi_fops = {
        .open = proc_scsi_host_open,
+       .release = single_release,
        .read = seq_read,
        .llseek = seq_lseek,
        .write = proc_scsi_host_write
index 47799a33d6caacf7e5715e9c7760d121c24797b1..133926b1bb78bc39231260b5359dff3ff9a16bcb 100644 (file)
@@ -1019,8 +1019,7 @@ static int flashnode_match_index(struct device *dev, void *data)
 /**
  * iscsi_get_flashnode_by_index -finds flashnode session entry by index
  * @shost: pointer to host data
- * @data: pointer to data containing value to use for comparison
- * @fn: function pointer that does actual comparison
+ * @idx: index to match
  *
  * Finds the flashnode session object for the passed index
  *
@@ -1029,13 +1028,13 @@ static int flashnode_match_index(struct device *dev, void *data)
  *  %NULL on failure
  */
 static struct iscsi_bus_flash_session *
-iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data,
-                            int (*fn)(struct device *dev, void *data))
+iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
 {
        struct iscsi_bus_flash_session *fnode_sess = NULL;
        struct device *dev;
 
-       dev = device_find_child(&shost->shost_gendev, data, fn);
+       dev = device_find_child(&shost->shost_gendev, &idx,
+                               flashnode_match_index);
        if (dev)
                fnode_sess = iscsi_dev_to_flash_session(dev);
 
@@ -1059,18 +1058,13 @@ struct device *
 iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
                          int (*fn)(struct device *dev, void *data))
 {
-       struct device *dev;
-
-       dev = device_find_child(&shost->shost_gendev, data, fn);
-       return dev;
+       return device_find_child(&shost->shost_gendev, data, fn);
 }
 EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
 
 /**
  * iscsi_find_flashnode_conn - finds flashnode connection entry
  * @fnode_sess: pointer to parent flashnode session entry
- * @data: pointer to data containing value to use for comparison
- * @fn: function pointer that does actual comparison
  *
  * Finds the flashnode connection object comparing the data passed using logic
  * defined in passed function pointer
@@ -1080,14 +1074,10 @@ EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
  *  %NULL on failure
  */
 struct device *
-iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
-                         void *data,
-                         int (*fn)(struct device *dev, void *data))
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess)
 {
-       struct device *dev;
-
-       dev = device_find_child(&fnode_sess->dev, data, fn);
-       return dev;
+       return device_find_child(&fnode_sess->dev, NULL,
+                                iscsi_is_flashnode_conn_dev);
 }
 EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
 
@@ -2808,7 +2798,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
        struct iscsi_bus_flash_session *fnode_sess;
        struct iscsi_bus_flash_conn *fnode_conn;
        struct device *dev;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->set_flashnode_param) {
@@ -2824,25 +2814,27 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.set_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.set_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.set_flashnode.host_no);
+                      __func__, idx, ev->u.set_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev) {
                err = -ENODEV;
-               goto put_host;
+               goto put_sess;
        }
 
        fnode_conn = iscsi_dev_to_flash_conn(dev);
        err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
+       put_device(dev);
+
+put_sess:
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -2891,7 +2883,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
 {
        struct Scsi_Host *shost;
        struct iscsi_bus_flash_session *fnode_sess;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->del_flashnode) {
@@ -2907,17 +2899,17 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.del_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.del_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.del_flashnode.host_no);
+                      __func__, idx, ev->u.del_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
        err = transport->del_flashnode(fnode_sess);
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -2933,7 +2925,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
        struct iscsi_bus_flash_session *fnode_sess;
        struct iscsi_bus_flash_conn *fnode_conn;
        struct device *dev;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->login_flashnode) {
@@ -2949,25 +2941,27 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.login_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.login_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.login_flashnode.host_no);
+                      __func__, idx, ev->u.login_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev) {
                err = -ENODEV;
-               goto put_host;
+               goto put_sess;
        }
 
        fnode_conn = iscsi_dev_to_flash_conn(dev);
        err = transport->login_flashnode(fnode_sess, fnode_conn);
+       put_device(dev);
+
+put_sess:
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -2983,7 +2977,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
        struct iscsi_bus_flash_session *fnode_sess;
        struct iscsi_bus_flash_conn *fnode_conn;
        struct device *dev;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->logout_flashnode) {
@@ -2999,26 +2993,28 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.logout_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.logout_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.logout_flashnode.host_no);
+                      __func__, idx, ev->u.logout_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev) {
                err = -ENODEV;
-               goto put_host;
+               goto put_sess;
        }
 
        fnode_conn = iscsi_dev_to_flash_conn(dev);
 
        err = transport->logout_flashnode(fnode_sess, fnode_conn);
+       put_device(dev);
+
+put_sess:
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -3985,8 +3981,10 @@ static __init int iscsi_transport_init(void)
        }
 
        iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
-       if (!iscsi_eh_timer_workq)
+       if (!iscsi_eh_timer_workq) {
+               err = -ENOMEM;
                goto release_nls;
+       }
 
        return 0;
 
index e6689776b4f617ac55b1e9c4dac138cee85a9196..c1c555242d0d715d46c051955deacea4a819d634 100644 (file)
@@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
        char *buffer_data;
        struct scsi_mode_data data;
        struct scsi_sense_hdr sshdr;
+       const char *temp = "temporary ";
        int len;
 
        if (sdp->type != TYPE_DISK)
@@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
                 * it's not worth the risk */
                return -EINVAL;
 
+       if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
+               buf += sizeof(temp) - 1;
+               sdkp->cache_override = 1;
+       } else {
+               sdkp->cache_override = 0;
+       }
+
        for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
                len = strlen(sd_cache_types[i]);
                if (strncmp(sd_cache_types[i], buf, len) == 0 &&
@@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
        rcd = ct & 0x01 ? 1 : 0;
        wce = ct & 0x02 ? 1 : 0;
+
+       if (sdkp->cache_override) {
+               sdkp->WCE = wce;
+               sdkp->RCD = rcd;
+               return count;
+       }
+
        if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
                            SD_MAX_RETRIES, &data, NULL))
                return -EINVAL;
@@ -1121,10 +1136,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
 
        sdev = sdkp->device;
 
-       retval = scsi_autopm_get_device(sdev);
-       if (retval)
-               goto error_autopm;
-
        /*
         * If the device is in error recovery, wait until it is done.
         * If the device is offline, then disallow any access to it.
@@ -1169,8 +1180,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
        return 0;
 
 error_out:
-       scsi_autopm_put_device(sdev);
-error_autopm:
        scsi_disk_put(sdkp);
        return retval;  
 }
@@ -1205,7 +1214,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
         * XXX is followed by a "rmmod sd_mod"?
         */
 
-       scsi_autopm_put_device(sdev);
        scsi_disk_put(sdkp);
 }
 
@@ -1366,14 +1374,9 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
        retval = -ENODEV;
 
        if (scsi_block_when_processing_errors(sdp)) {
-               retval = scsi_autopm_get_device(sdp);
-               if (retval)
-                       goto out;
-
                sshdr  = kzalloc(sizeof(*sshdr), GFP_KERNEL);
                retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
                                              sshdr);
-               scsi_autopm_put_device(sdp);
        }
 
        /* failed to execute TUR, assume media not present */
@@ -1423,8 +1426,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
                 * Leave the rest of the command zero to indicate
                 * flush everything.
                 */
-               res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
-                                      SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL);
+               res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
+                                            &sshdr, SD_FLUSH_TIMEOUT,
+                                            SD_MAX_RETRIES, NULL, REQ_PM);
                if (res == 0)
                        break;
        }
@@ -2318,6 +2322,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
        int old_rcd = sdkp->RCD;
        int old_dpofua = sdkp->DPOFUA;
 
+
+       if (sdkp->cache_override)
+               return;
+
        first_len = 4;
        if (sdp->skip_ms_page_8) {
                if (sdp->type == TYPE_RBC)
@@ -2811,6 +2819,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
        sdkp->capacity = 0;
        sdkp->media_present = 1;
        sdkp->write_prot = 0;
+       sdkp->cache_override = 0;
        sdkp->WCE = 0;
        sdkp->RCD = 0;
        sdkp->ATO = 0;
@@ -2837,6 +2846,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
 
        sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
                  sdp->removable ? "removable " : "");
+       blk_pm_runtime_init(sdp->request_queue, dev);
        scsi_autopm_put_device(sdp);
        put_device(&sdkp->dev);
 }
@@ -3020,8 +3030,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
        if (!scsi_device_online(sdp))
                return -ENODEV;
 
-       res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
-                              SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+       res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
+                              SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
        if (res) {
                sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
                sd_print_result(sdkp, res);
index 74a1e4ca5401f9a58e6e705bb2fc8a99fcd04a0f..2386aeb41fe8d74826908d3dca71eb3f97a59ac8 100644 (file)
@@ -73,6 +73,7 @@ struct scsi_disk {
        u8              protection_type;/* Data Integrity Field */
        u8              provisioning_mode;
        unsigned        ATO : 1;        /* state of disk ATO bit */
+       unsigned        cache_override : 1; /* temp override of WCE,RCD */
        unsigned        WCE : 1;        /* state of disk WCE bit */
        unsigned        RCD : 1;        /* state of disk RCD bit, unused */
        unsigned        DPOFUA : 1;     /* state of disk DPOFUA bit */
index 04998f36e5071bdda94ff225f39b0ca3616eeb7b..6174ca4ea27594487d7dc0828d9e21841742b8ed 100644 (file)
@@ -93,14 +93,6 @@ static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
                if (sdt->app_tag == 0xffff)
                        return 0;
 
-               /* Bad ref tag received from disk */
-               if (sdt->ref_tag == 0xffffffff) {
-                       printk(KERN_ERR
-                              "%s: bad phys ref tag on sector %lu\n",
-                              bix->disk_name, (unsigned long)sector);
-                       return -EIO;
-               }
-
                if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
                        printk(KERN_ERR
                               "%s: ref tag error on sector %lu (rcvd %u)\n",
index 0371047c59222251971d823219e3639ac2a53ff6..35faf24c604401eeb560472542cd2ed350289d10 100644 (file)
@@ -57,3 +57,14 @@ config SCSI_UFSHCD_PCI
          If you have a controller with this interface, say Y or M here.
 
          If unsure, say N.
+
+config SCSI_UFSHCD_PLATFORM
+       tristate "Platform bus based UFS Controller support"
+       depends on SCSI_UFSHCD
+       ---help---
+       This selects the UFS host controller support. Select this if
+       you have an UFS controller on Platform bus.
+
+       If you have a controller with this interface, say Y or M here.
+
+         If unsure, say N.
index 9eda0dfbd6df383304de10a0805868ee6485929c..1e5bd48457d636ef0aafd096f8a8038ac0dcbd5e 100644 (file)
@@ -1,3 +1,4 @@
 # UFSHCD makefile
 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
 obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
+obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
new file mode 100644 (file)
index 0000000..03319ac
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Universal Flash Storage Host controller Platform bus based glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/platform_device.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pltfrm_suspend - suspend power management function
+ * @dev: pointer to device handle
+ *
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       /*
+        * TODO:
+        * 1. Call ufshcd_suspend
+        * 2. Do bus specific power management
+        */
+
+       disable_irq(hba->irq);
+
+       return 0;
+}
+
+/**
+ * ufshcd_pltfrm_resume - resume power management function
+ * @dev: pointer to device handle
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       /*
+        * TODO:
+        * 1. Call ufshcd_resume.
+        * 2. Do bus specific wake up
+        */
+
+       enable_irq(hba->irq);
+
+       return 0;
+}
+#else
+#define ufshcd_pltfrm_suspend  NULL
+#define ufshcd_pltfrm_resume   NULL
+#endif
+
+/**
+ * ufshcd_pltfrm_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_probe(struct platform_device *pdev)
+{
+       struct ufs_hba *hba;
+       void __iomem *mmio_base;
+       struct resource *mem_res;
+       struct resource *irq_res;
+       resource_size_t mem_size;
+       int err;
+       struct device *dev = &pdev->dev;
+
+       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem_res) {
+               dev_err(&pdev->dev,
+                       "Memory resource not available\n");
+               err = -ENODEV;
+               goto out_error;
+       }
+
+       mem_size = resource_size(mem_res);
+       if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) {
+               dev_err(&pdev->dev,
+                       "Cannot reserve the memory resource\n");
+               err = -EBUSY;
+               goto out_error;
+       }
+
+       mmio_base = ioremap_nocache(mem_res->start, mem_size);
+       if (!mmio_base) {
+               dev_err(&pdev->dev, "memory map failed\n");
+               err = -ENOMEM;
+               goto out_release_regions;
+       }
+
+       irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!irq_res) {
+               dev_err(&pdev->dev, "IRQ resource not available\n");
+               err = -ENODEV;
+               goto out_iounmap;
+       }
+
+       err = dma_set_coherent_mask(dev, dev->coherent_dma_mask);
+       if (err) {
+               dev_err(&pdev->dev, "set dma mask failed\n");
+               goto out_iounmap;
+       }
+
+       err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start);
+       if (err) {
+               dev_err(&pdev->dev, "Intialization failed\n");
+               goto out_iounmap;
+       }
+
+       platform_set_drvdata(pdev, hba);
+
+       return 0;
+
+out_iounmap:
+       iounmap(mmio_base);
+out_release_regions:
+       release_mem_region(mem_res->start, mem_size);
+out_error:
+       return err;
+}
+
+/**
+ * ufshcd_pltfrm_remove - remove platform driver routine
+ * @pdev: pointer to platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_remove(struct platform_device *pdev)
+{
+       struct resource *mem_res;
+       resource_size_t mem_size;
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       disable_irq(hba->irq);
+
+       /* Some buggy controllers raise interrupt after
+        * the resources are removed. So first we unregister the
+        * irq handler and then the resources used by driver
+        */
+
+       free_irq(hba->irq, hba);
+       ufshcd_remove(hba);
+       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem_res)
+               dev_err(&pdev->dev, "ufshcd: Memory resource not available\n");
+       else {
+               mem_size = resource_size(mem_res);
+               release_mem_region(mem_res->start, mem_size);
+       }
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+static const struct of_device_id ufs_of_match[] = {
+       { .compatible = "jedec,ufs-1.1"},
+};
+
+static const struct dev_pm_ops ufshcd_dev_pm_ops = {
+       .suspend        = ufshcd_pltfrm_suspend,
+       .resume         = ufshcd_pltfrm_resume,
+};
+
+static struct platform_driver ufshcd_pltfrm_driver = {
+       .probe  = ufshcd_pltfrm_probe,
+       .remove = ufshcd_pltfrm_remove,
+       .driver = {
+               .name   = "ufshcd",
+               .owner  = THIS_MODULE,
+               .pm     = &ufshcd_dev_pm_ops,
+               .of_match_table = ufs_of_match,
+       },
+};
+
+module_platform_driver(ufshcd_pltfrm_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
index 60fd40c4e4c2ada48a268a997099b18f2fa4ef34..c32a478df81b83c75cdbbdac9206236021f9f863 100644 (file)
@@ -478,7 +478,7 @@ static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
                ucd_cmd_ptr->header.dword_2 = 0;
 
                ucd_cmd_ptr->exp_data_transfer_len =
-                       cpu_to_be32(lrbp->cmd->transfersize);
+                       cpu_to_be32(lrbp->cmd->sdb.length);
 
                memcpy(ucd_cmd_ptr->cdb,
                       lrbp->cmd->cmnd,
index 787bd2c22bca44043b615d8b3b4d70663a3d36a9..380387a47b1d86fe4e6a8a7ef22a537bfb85517a 100644 (file)
@@ -526,13 +526,17 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
        }
 
        if (xfer->tx_buf)
-               spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
+               if (xfer->bits_per_word > 8)
+                       spi_writel(as, TDR, *(u16 *)(xfer->tx_buf));
+               else
+                       spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
        else
                spi_writel(as, TDR, 0);
 
        dev_dbg(master->dev.parent,
-               "  start pio xfer %p: len %u tx %p rx %p\n",
-               xfer, xfer->len, xfer->tx_buf, xfer->rx_buf);
+               "  start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
+               xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
+               xfer->bits_per_word);
 
        /* Enable relevant interrupts */
        spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
@@ -950,21 +954,39 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
 {
        u8              *txp;
        u8              *rxp;
+       u16             *txp16;
+       u16             *rxp16;
        unsigned long   xfer_pos = xfer->len - as->current_remaining_bytes;
 
        if (xfer->rx_buf) {
-               rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
-               *rxp = spi_readl(as, RDR);
+               if (xfer->bits_per_word > 8) {
+                       rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
+                       *rxp16 = spi_readl(as, RDR);
+               } else {
+                       rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
+                       *rxp = spi_readl(as, RDR);
+               }
        } else {
                spi_readl(as, RDR);
        }
-
-       as->current_remaining_bytes--;
+       if (xfer->bits_per_word > 8) {
+               as->current_remaining_bytes -= 2;
+               if (as->current_remaining_bytes < 0)
+                       as->current_remaining_bytes = 0;
+       } else {
+               as->current_remaining_bytes--;
+       }
 
        if (as->current_remaining_bytes) {
                if (xfer->tx_buf) {
-                       txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
-                       spi_writel(as, TDR, *txp);
+                       if (xfer->bits_per_word > 8) {
+                               txp16 = (u16 *)(((u8 *)xfer->tx_buf)
+                                                       + xfer_pos + 2);
+                               spi_writel(as, TDR, *txp16);
+                       } else {
+                               txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
+                               spi_writel(as, TDR, *txp);
+                       }
                } else {
                        spi_writel(as, TDR, 0);
                }
@@ -1378,9 +1400,16 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
                        }
                }
 
+               if (xfer->bits_per_word > 8) {
+                       if (xfer->len % 2) {
+                               dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n");
+                               return -EINVAL;
+                       }
+               }
+
                /* FIXME implement these protocol options!! */
-               if (xfer->speed_hz) {
-                       dev_dbg(&spi->dev, "no protocol options yet\n");
+               if (xfer->speed_hz < spi->max_speed_hz) {
+                       dev_dbg(&spi->dev, "can't change speed in transfer\n");
                        return -ENOPROTOOPT;
                }
 
index 2e8f24a1fb952cbfd86b161ad50ac3e315d0850b..50b13c9b1ab691fd5defcae44b98dc4bfccb5557 100644 (file)
@@ -784,7 +784,7 @@ static const struct of_device_id davinci_spi_of_match[] = {
        },
        { },
 };
-MODULE_DEVICE_TABLE(of, davini_spi_of_match);
+MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
 
 /**
  * spi_davinci_get_pdata - Get platform data from DTS binding
index d65c000efe3530c1997e996c406eb5880f4df5a4..09df8e22dba0a146c540248cea5374fa0fb13c92 100644 (file)
@@ -489,11 +489,6 @@ static int tegra_sflash_probe(struct platform_device *pdev)
        tegra_sflash_parse_dt(tsd);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "No IO memory resource\n");
-               ret = -ENODEV;
-               goto exit_free_master;
-       }
        tsd->base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(tsd->base)) {
                ret = PTR_ERR(tsd->base);
index 163fd802b7aced2217494397297de76862056ea5..32b7bb111eb6b53d9e96808f7bd5fd0982cffd9d 100644 (file)
@@ -334,7 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
        spi->dev.parent = &master->dev;
        spi->dev.bus = &spi_bus_type;
        spi->dev.release = spidev_release;
-       spi->cs_gpio = -EINVAL;
+       spi->cs_gpio = -ENOENT;
        device_initialize(&spi->dev);
        return spi;
 }
@@ -1067,8 +1067,11 @@ static int of_spi_register_master(struct spi_master *master)
        nb = of_gpio_named_count(np, "cs-gpios");
        master->num_chipselect = max(nb, (int)master->num_chipselect);
 
-       if (nb < 1)
+       /* Return error only for an incorrectly formed cs-gpios property */
+       if (nb == 0 || nb == -ENOENT)
                return 0;
+       else if (nb < 0)
+               return nb;
 
        cs = devm_kzalloc(&master->dev,
                          sizeof(int) * master->num_chipselect,
@@ -1079,7 +1082,7 @@ static int of_spi_register_master(struct spi_master *master)
                return -ENOMEM;
 
        for (i = 0; i < master->num_chipselect; i++)
-               cs[i] = -EINVAL;
+               cs[i] = -ENOENT;
 
        for (i = 0; i < nb; i++)
                cs[i] = of_get_named_gpio(np, "cs-gpios", i);
index 4e8a1794f50a893120c028bf915616216697ada6..aefe820a8005585934f9d80b2d62a69b44555b29 100644 (file)
@@ -72,10 +72,10 @@ source "drivers/staging/sep/Kconfig"
 
 source "drivers/staging/iio/Kconfig"
 
-source "drivers/staging/zram/Kconfig"
-
 source "drivers/staging/zsmalloc/Kconfig"
 
+source "drivers/staging/zram/Kconfig"
+
 source "drivers/staging/wlags49_h2/Kconfig"
 
 source "drivers/staging/wlags49_h25/Kconfig"
index ceb1c643753d9fef288d7d5eda97b5191b7726a7..6dc27dac679d9133052b7aca2ad2ea06cf1d3486 100644 (file)
@@ -264,6 +264,8 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        }
 
        rv = alarm_do_ioctl(file, cmd, &ts);
+       if (rv)
+               return rv;
 
        switch (ANDROID_ALARM_BASE_CMD(cmd)) {
        case ANDROID_ALARM_GET_TIME(0):
@@ -272,7 +274,7 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                break;
        }
 
-       return rv;
+       return 0;
 }
 #ifdef CONFIG_COMPAT
 static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
@@ -295,6 +297,8 @@ static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
        }
 
        rv = alarm_do_ioctl(file, cmd, &ts);
+       if (rv)
+               return rv;
 
        switch (ANDROID_ALARM_BASE_CMD(cmd)) {
        case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */
@@ -303,7 +307,7 @@ static long alarm_compat_ioctl(struct file *file, unsigned int cmd,
                break;
        }
 
-       return rv;
+       return 0;
 }
 #endif
 
index b040200a5a5502d7a110e9064d25b10600196065..9bd874789ce5fc93906a78f62a68e542541bd288 100644 (file)
@@ -242,7 +242,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
  * 'log->buffer' which contains the first entry readable by 'euid'
  */
 static size_t get_next_entry_by_uid(struct logger_log *log,
-               size_t off, uid_t euid)
+               size_t off, kuid_t euid)
 {
        while (off != log->w_off) {
                struct logger_entry *entry;
@@ -251,7 +251,7 @@ static size_t get_next_entry_by_uid(struct logger_log *log,
 
                entry = get_entry_header(log, off, &scratch);
 
-               if (entry->euid == euid)
+               if (uid_eq(entry->euid, euid))
                        return off;
 
                next_len = sizeof(struct logger_entry) + entry->len;
index cc6bbd99c8e0326838246557f104d90fd11f9998..70af7d805dff191d744987f30e3b4b4093f3363e 100644 (file)
@@ -66,7 +66,7 @@ struct logger_entry {
        __s32           tid;
        __s32           sec;
        __s32           nsec;
-       uid_t           euid;
+       kuid_t          euid;
        char            msg[0];
 };
 
index 7871579bb83d8177d2ea262c38110446389fe923..87e852a0ef49140a24f72ba37cb051ad793c89d4 100644 (file)
@@ -981,6 +981,7 @@ config COMEDI_ME_DAQ
 
 config COMEDI_NI_6527
        tristate "NI 6527 support"
+       depends on HAS_DMA
        select COMEDI_MITE
        ---help---
          Enable support for the National Instruments 6527 PCI card
@@ -990,6 +991,7 @@ config COMEDI_NI_6527
 
 config COMEDI_NI_65XX
        tristate "NI 65xx static dio PCI card support"
+       depends on HAS_DMA
        select COMEDI_MITE
        ---help---
          Enable support for National Instruments 65xx static dio boards.
@@ -1003,6 +1005,7 @@ config COMEDI_NI_65XX
 
 config COMEDI_NI_660X
        tristate "NI 660x counter/timer PCI card support"
+       depends on HAS_DMA
        select COMEDI_NI_TIOCMD
        ---help---
          Enable support for National Instruments PCI-6601 (ni_660x), PCI-6602,
@@ -1013,6 +1016,7 @@ config COMEDI_NI_660X
 
 config COMEDI_NI_670X
        tristate "NI 670x PCI card support"
+       depends on HAS_DMA
        select COMEDI_MITE
        ---help---
          Enable support for National Instruments PCI-6703 and PCI-6704
@@ -1022,6 +1026,7 @@ config COMEDI_NI_670X
 
 config COMEDI_NI_LABPC_PCI
        tristate "NI Lab-PC PCI-1200 support"
+       depends on HAS_DMA
        select COMEDI_NI_LABPC
        select COMEDI_MITE
        ---help---
@@ -1032,6 +1037,7 @@ config COMEDI_NI_LABPC_PCI
 
 config COMEDI_NI_PCIDIO
        tristate "NI PCI-DIO32HS, PCI-6533, PCI-6534 support"
+       depends on HAS_DMA
        select COMEDI_MITE
        select COMEDI_8255
        ---help---
@@ -1043,6 +1049,7 @@ config COMEDI_NI_PCIDIO
 
 config COMEDI_NI_PCIMIO
        tristate "NI PCI-MIO-E series and M series support"
+       depends on HAS_DMA
        select COMEDI_NI_TIOCMD
        select COMEDI_8255
        select COMEDI_FC
@@ -1095,10 +1102,12 @@ config COMEDI_SSV_DNP
          called ssv_dnp.
 
 config COMEDI_MITE
+       depends on HAS_DMA
        tristate
 
 config COMEDI_NI_TIOCMD
        tristate
+       depends on HAS_DMA
        select COMEDI_NI_TIO
        select COMEDI_MITE
 
index ca709901fb3e841c27c9fc2a69133d3975cfece1..d4be0e68509b7f6a0b54d6bf7c412105349acdc6 100644 (file)
@@ -51,10 +51,12 @@ static void __comedi_buf_free(struct comedi_device *dev,
                        clear_bit(PG_reserved,
                                  &(virt_to_page(buf->virt_addr)->flags));
                        if (s->async_dma_dir != DMA_NONE) {
+#ifdef CONFIG_HAS_DMA
                                dma_free_coherent(dev->hw_dev,
                                                  PAGE_SIZE,
                                                  buf->virt_addr,
                                                  buf->dma_addr);
+#endif
                        } else {
                                free_page((unsigned long)buf->virt_addr);
                        }
@@ -74,6 +76,12 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
        struct comedi_buf_page *buf;
        unsigned i;
 
+       if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
+               dev_err(dev->class_dev,
+                       "dma buffer allocation not supported\n");
+               return;
+       }
+
        async->buf_page_list = vzalloc(sizeof(*buf) * n_pages);
        if (async->buf_page_list)
                pages = vmalloc(sizeof(struct page *) * n_pages);
@@ -84,11 +92,15 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
        for (i = 0; i < n_pages; i++) {
                buf = &async->buf_page_list[i];
                if (s->async_dma_dir != DMA_NONE)
+#ifdef CONFIG_HAS_DMA
                        buf->virt_addr = dma_alloc_coherent(dev->hw_dev,
                                                            PAGE_SIZE,
                                                            &buf->dma_addr,
                                                            GFP_KERNEL |
                                                            __GFP_COMP);
+#else
+                       break;
+#endif
                else
                        buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
                if (!buf->virt_addr)
index 00f2547024ec6b1a2d2abf6abf52dbcc1697dbc8..924c54c9c31fad595d1cdf9331db17d8c40413f4 100644 (file)
@@ -246,9 +246,6 @@ static int resize_async_buffer(struct comedi_device *dev,
                return -EBUSY;
        }
 
-       if (!async->prealloc_buf)
-               return -EINVAL;
-
        /* make sure buffer is an integral number of pages
         * (we round up) */
        new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
index 3d978f34d212b4c9f925ad928496403f8039c40f..77a7bb63258034ae0dd2b88bd692a0b6da9fe317 100644 (file)
@@ -976,8 +976,7 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
                /* clear flip-flop to make sure 2-byte registers for
                 * count and address get set correctly */
                clear_dma_ff(devpriv->dma_chan);
-               set_dma_addr(devpriv->dma_chan,
-                            virt_to_bus(devpriv->dma_buffer));
+               set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
                /*  set appropriate size of transfer */
                devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd);
                if (cmd->stop_src == TRIG_COUNT &&
@@ -1089,7 +1088,7 @@ static void labpc_drain_dma(struct comedi_device *dev)
                devpriv->count -= num_points;
 
        /*  set address and count for next transfer */
-       set_dma_addr(devpriv->dma_chan, virt_to_bus(devpriv->dma_buffer));
+       set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
        set_dma_count(devpriv->dma_chan, leftover * sample_size);
        release_dma_lock(flags);
 
@@ -1741,6 +1740,9 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
                                unsigned long dma_flags;
 
                                devpriv->dma_chan = dma_chan;
+                               devpriv->dma_addr =
+                                       virt_to_bus(devpriv->dma_buffer);
+
                                dma_flags = claim_dma_lock();
                                disable_dma(devpriv->dma_chan);
                                set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
index 615f16f271c0a8078a1a6a22339393ffdeddf2ed..4b691f5a9965098cd6edebb5be04d940ea65af19 100644 (file)
@@ -82,6 +82,7 @@ struct labpc_private {
        unsigned int divisor_b1;
        unsigned int dma_chan;  /*  dma channel to use */
        u16 *dma_buffer;        /*  buffer ai will dma into */
+       phys_addr_t dma_addr;
        /* transfer size in bytes for current transfer */
        unsigned int dma_transfer_size;
        /* we are using dma/fifo-half-full/etc. */
index a46d579016d9571eef12db6bff2e95beea47e4d2..8c5dee9b3b05188c85110ee171fb179d354bd858 100644 (file)
@@ -310,9 +310,11 @@ static int ni_gpct_insn_read(struct comedi_device *dev,
 static int ni_gpct_insn_config(struct comedi_device *dev,
                               struct comedi_subdevice *s,
                               struct comedi_insn *insn, unsigned int *data);
+#ifdef PCIDMA
 static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
 static int ni_gpct_cmdtest(struct comedi_device *dev,
                           struct comedi_subdevice *s, struct comedi_cmd *cmd);
+#endif
 static int ni_gpct_cancel(struct comedi_device *dev,
                          struct comedi_subdevice *s);
 static void handle_gpct_interrupt(struct comedi_device *dev,
@@ -4617,9 +4619,7 @@ static int ni_E_init(struct comedi_device *dev)
        for (j = 0; j < NUM_GPCT; ++j) {
                s = &dev->subdevices[NI_GPCT_SUBDEV(j)];
                s->type = COMEDI_SUBD_COUNTER;
-               s->subdev_flags =
-                   SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL | SDF_CMD_READ
-                   /* | SDF_CMD_WRITE */ ;
+               s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL;
                s->n_chan = 3;
                if (board->reg_type & ni_reg_m_series_mask)
                        s->maxdata = 0xffffffff;
@@ -4628,11 +4628,14 @@ static int ni_E_init(struct comedi_device *dev)
                s->insn_read = &ni_gpct_insn_read;
                s->insn_write = &ni_gpct_insn_write;
                s->insn_config = &ni_gpct_insn_config;
+#ifdef PCIDMA
+               s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */;
                s->do_cmd = &ni_gpct_cmd;
                s->len_chanlist = 1;
                s->do_cmdtest = &ni_gpct_cmdtest;
                s->cancel = &ni_gpct_cancel;
                s->async_dma_dir = DMA_BIDIRECTIONAL;
+#endif
                s->private = &devpriv->counter_dev->counters[j];
 
                devpriv->counter_dev->counters[j].chip_index = 0;
@@ -5216,10 +5219,10 @@ static int ni_gpct_insn_write(struct comedi_device *dev,
        return ni_tio_winsn(counter, insn, data);
 }
 
+#ifdef PCIDMA
 static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
 {
        int retval;
-#ifdef PCIDMA
        struct ni_gpct *counter = s->private;
 /* const struct comedi_cmd *cmd = &s->async->cmd; */
 
@@ -5233,23 +5236,20 @@ static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
        ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
        ni_e_series_enable_second_irq(dev, counter->counter_index, 1);
        retval = ni_tio_cmd(counter, s->async);
-#else
-       retval = -ENOTSUPP;
-#endif
        return retval;
 }
+#endif
 
+#ifdef PCIDMA
 static int ni_gpct_cmdtest(struct comedi_device *dev,
                           struct comedi_subdevice *s, struct comedi_cmd *cmd)
 {
-#ifdef PCIDMA
        struct ni_gpct *counter = s->private;
 
        return ni_tio_cmdtest(counter, cmd);
-#else
        return -ENOTSUPP;
-#endif
 }
+#endif
 
 static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
 {
index f0b4739c65a17eeff63facaf8158cfedbe0ba025..d15d9d58e5ac2a3022c92e9843f7de7b64685f1f 100644 (file)
@@ -2,7 +2,6 @@ config USB_DWC2
        tristate "DesignWare USB2 DRD Core Support"
        depends on USB
        depends on VIRT_TO_BUS
-       select USB_OTG_UTILS
        help
          Say Y or M here if your system has a Dual Role HighSpeed
          USB controller based on the DesignWare HSOTG IP Core.
@@ -39,6 +38,7 @@ config USB_DWC2_TRACK_MISSED_SOFS
        bool "Enable Missed SOF Tracking"
        help
          Say Y here to enable logging of missed SOF events to the dmesg log.
+         WARNING: This feature is still experimental.
          If in doubt, say N.
 
 config USB_DWC2_DEBUG_PERIODIC
index 827ab781ae9bd68f8aca7f2176babdc00cf1695e..8551ccedf0376284c4d81648db0c2cbddde6d946 100644 (file)
@@ -2804,9 +2804,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
 
        /* Set device flags indicating whether the HCD supports DMA */
        if (hsotg->core_params->dma_enable > 0) {
-               if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0)
-                       dev_warn(hsotg->dev,
-                                "can't enable workaround for >2GB RAM\n");
+               if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
+                       dev_warn(hsotg->dev, "can't set DMA mask\n");
                if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0)
                        dev_warn(hsotg->dev,
                                 "can't enable workaround for >2GB RAM\n");
index 6e5dbed6ccec06d8427b995f88bad428d6fcdf15..e24062f0a49ebd87ec2d30b06f914309d21173c0 100644 (file)
@@ -56,8 +56,6 @@
 static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
 {
 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
-#warning Compiling code to track missed SOFs
-
        u16 curr_frame_number = hsotg->frame_number;
 
        if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
index 1f3d581a10787154acccc04d3bb5a9274731996b..44cce2fa6361194a36ac72dff5825575fd324bf5 100644 (file)
@@ -95,6 +95,14 @@ static int dwc2_driver_probe(struct platform_device *dev)
 
        hsotg->dev = &dev->dev;
 
+       /*
+        * Use reasonable defaults so platforms don't have to provide these.
+        */
+       if (!dev->dev.dma_mask)
+               dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+       if (!dev->dev.coherent_dma_mask)
+               dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
        irq = platform_get_irq(dev, 0);
        if (irq < 0) {
                dev_err(&dev->dev, "missing IRQ resource\n");
@@ -102,11 +110,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
        }
 
        res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&dev->dev, "missing memory base resource\n");
-               return -EINVAL;
-       }
-
        hsotg->regs = devm_ioremap_resource(&dev->dev, res);
        if (IS_ERR(hsotg->regs))
                return PTR_ERR(hsotg->regs);
index 3c18efe3136575ab20559923881ad44da9bd0835..69059138de4ab334b96f44b63599be2ddceed7e9 100644 (file)
@@ -39,7 +39,7 @@ if WIMAX_GDM72XX_USB
 
 config WIMAX_GDM72XX_USB_PM
        bool "Enable power managerment support"
-       depends on USB_SUSPEND
+       depends on PM_RUNTIME
 
 endif # WIMAX_GDM72XX_USB
 
index 2856b8fd44adf808f8ca871f1960f7dec2272b93..163c638e4095f3b893668bb5d83e8e2110b76afb 100644 (file)
@@ -690,7 +690,6 @@ static void mxs_lradc_trigger_remove(struct iio_dev *iio)
 static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
 {
        struct mxs_lradc *lradc = iio_priv(iio);
-       struct iio_buffer *buffer = iio->buffer;
        int ret = 0, chan, ofs = 0;
        unsigned long enable = 0;
        uint32_t ctrl4_set = 0;
@@ -698,7 +697,7 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
        uint32_t ctrl1_irq = 0;
        const uint32_t chan_value = LRADC_CH_ACCUMULATE |
                ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
-       const int len = bitmap_weight(buffer->scan_mask, LRADC_MAX_TOTAL_CHANS);
+       const int len = bitmap_weight(iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS);
 
        if (!len)
                return -EINVAL;
@@ -725,7 +724,7 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
                lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
        writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
 
-       for_each_set_bit(chan, buffer->scan_mask, LRADC_MAX_TOTAL_CHANS) {
+       for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
                ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
                ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs);
                ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs);
index d060f2572512af496f094036cfbc9460a19850d3..c99f890cc6c65ae9ea13a9aafa8c8258e21e3cb8 100644 (file)
@@ -1869,6 +1869,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
                dev_info(&chip->client->dev,
                                "%s: i2c device found does not match expected id\n",
                                __func__);
+               ret = -EINVAL;
                goto fail1;
        }
 
@@ -1907,7 +1908,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
                if (ret) {
                        dev_err(&clientp->dev,
                                "%s: irq request failed", __func__);
-                       goto fail2;
+                       goto fail1;
                }
        }
 
@@ -1920,17 +1921,17 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
        if (ret) {
                dev_err(&clientp->dev,
                        "%s: iio registration failed\n", __func__);
-               goto fail1;
+               goto fail2;
        }
 
        dev_info(&clientp->dev, "%s Light sensor found.\n", id->name);
 
        return 0;
 
-fail1:
+fail2:
        if (clientp->irq)
                free_irq(clientp->irq, indio_dev);
-fail2:
+fail1:
        iio_device_free(indio_dev);
 
        return ret;
index 8c9e40390f42fae3b0f52d3d989d79891d2778f1..ef699f75318636db921fd693ee798d6bedffc7dc 100644 (file)
@@ -1,6 +1,7 @@
 config DRM_IMX
        tristate "DRM Support for Freescale i.MX"
        select DRM_KMS_HELPER
+       select VIDEOMODE_HELPERS
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
        depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
@@ -19,10 +20,12 @@ config DRM_IMX_FB_HELPER
 config DRM_IMX_PARALLEL_DISPLAY
        tristate "Support for parallel displays"
        depends on DRM_IMX
+       select VIDEOMODE_HELPERS
 
 config DRM_IMX_TVE
        tristate "Support for TV and VGA displays"
        depends on DRM_IMX
+       select REGMAP_MMIO
        help
          Choose this to enable the internal Television Encoder (TVe)
          found on i.MX53 processors.
@@ -30,6 +33,7 @@ config DRM_IMX_TVE
 config DRM_IMX_IPUV3_CORE
        tristate "IPUv3 core support"
        depends on DRM_IMX
+       depends on RESET_CONTROLLER
        help
          Choose this if you have a i.MX5/6 system and want
          to use the IPU. This option only enables IPU base
@@ -38,5 +42,6 @@ config DRM_IMX_IPUV3_CORE
 config DRM_IMX_IPUV3
        tristate "DRM Support for i.MX IPUv3"
        depends on DRM_IMX
+       depends on DRM_IMX_IPUV3_CORE
        help
          Choose this if you have a i.MX5 or i.MX6 processor.
index ac16344644073340cdce7f350134791e58136f62..03892de9bd7e1103665c2144212722436b97b8be 100644 (file)
@@ -670,7 +670,9 @@ static int imx_tve_probe(struct platform_device *pdev)
        tve->dac_reg = devm_regulator_get(&pdev->dev, "dac");
        if (!IS_ERR(tve->dac_reg)) {
                regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
-               regulator_enable(tve->dac_reg);
+               ret = regulator_enable(tve->dac_reg);
+               if (ret)
+                       return ret;
        }
 
        tve->clk = devm_clk_get(&pdev->dev, "tve");
index ea61c869110f053d68633e15d761fef0541d4de9..ff5c63350932b32f7bd05aa277733d0a77cb2b1f 100644 (file)
@@ -316,31 +316,14 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
 
 static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
 {
-       struct drm_pending_vblank_event *e;
-       struct timeval now;
        unsigned long flags;
        struct drm_device *drm = ipu_crtc->base.dev;
 
        spin_lock_irqsave(&drm->event_lock, flags);
-
-       e = ipu_crtc->page_flip_event;
-       if (!e) {
-               spin_unlock_irqrestore(&drm->event_lock, flags);
-               return;
-       }
-
-       do_gettimeofday(&now);
-       e->event.sequence = 0;
-       e->event.tv_sec = now.tv_sec;
-       e->event.tv_usec = now.tv_usec;
+       if (ipu_crtc->page_flip_event)
+               drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event);
        ipu_crtc->page_flip_event = NULL;
-
        imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
-
-       list_add_tail(&e->base.link, &e->base.file_priv->event_list);
-
-       wake_up_interruptible(&e->base.file_priv->event_wait);
-
        spin_unlock_irqrestore(&drm->event_lock, flags);
 }
 
index ec32776ff5475310c37074c8ed2e5cf5cabd3d06..df6569b997b88269bfdb48fd17dc410a0da50c0a 100644 (file)
@@ -1,6 +1,7 @@
 config SOLO6X10
        tristate "Softlogic 6x10 MPEG codec cards"
        depends on PCI && VIDEO_DEV && SND && I2C
+       depends on FONTS
        select VIDEOBUF2_DMA_SG
        select VIDEOBUF2_DMA_CONTIG
        select SND_PCM
index a88959f9a07ab09a82121f81246188d4d1e8aad5..197c393c4ca752a9f50062e2852afd2bb8be89a3 100644 (file)
@@ -123,6 +123,20 @@ int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
 }
 EXPORT_SYMBOL_GPL(nvec_register_notifier);
 
+/**
+ * nvec_unregister_notifier - Unregister a notifier with nvec
+ * @nvec: A &struct nvec_chip
+ * @nb: The notifier block to unregister
+ *
+ * Unregisters a notifier with @nvec. The notifier will be removed from the
+ * atomic notifier chain.
+ */
+int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
+
 /**
  * nvec_status_notifier - The final notifier
  *
@@ -185,7 +199,7 @@ static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
  *
  * Free the given message
  */
-inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
+void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
 {
        if (msg != &nvec->tx_scratch)
                dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
@@ -800,11 +814,6 @@ static int tegra_nvec_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no mem resource?\n");
-               return -ENODEV;
-       }
-
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
@@ -815,7 +824,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       i2c_clk = clk_get(&pdev->dev, "div-clk");
+       i2c_clk = devm_clk_get(&pdev->dev, "div-clk");
        if (IS_ERR(i2c_clk)) {
                dev_err(nvec->dev, "failed to get controller clock\n");
                return -ENODEV;
@@ -902,8 +911,11 @@ static int tegra_nvec_remove(struct platform_device *pdev)
 
        nvec_toggle_global_events(nvec, false);
        mfd_remove_devices(nvec->dev);
+       nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
        cancel_work_sync(&nvec->rx_work);
        cancel_work_sync(&nvec->tx_work);
+       /* FIXME: needs check wether nvec is responsible for power off */
+       pm_power_off = NULL;
 
        return 0;
 }
index b7a14bc0ab9153319b58bad6f30f7e538d3fc775..2b1316d87470eaa9f3c84b775854bc56d10da3ab 100644 (file)
@@ -197,9 +197,8 @@ extern int nvec_register_notifier(struct nvec_chip *nvec,
                                  struct notifier_block *nb,
                                  unsigned int events);
 
-extern int nvec_unregister_notifier(struct device *dev,
-                                   struct notifier_block *nb,
-                                   unsigned int events);
+extern int nvec_unregister_notifier(struct nvec_chip *dev,
+                                   struct notifier_block *nb);
 
 extern void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg);
 
index 7445ce6422bbc9e868b0274c3dd65bce4b3b4a08..a0ec52a4114f277938ba274b9e4cfe8632a749d7 100644 (file)
@@ -169,8 +169,15 @@ static int nvec_kbd_probe(struct platform_device *pdev)
 
 static int nvec_kbd_remove(struct platform_device *pdev)
 {
+       struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+       char disable_kbd[] = { NVEC_KBD, DISABLE_KBD },
+            uncnfg_wake_key_reporting[] = { NVEC_KBD, CNFG_WAKE_KEY_REPORTING,
+                                               false };
+       nvec_write_async(nvec, uncnfg_wake_key_reporting, 3);
+       nvec_write_async(nvec, disable_kbd, 2);
+       nvec_unregister_notifier(nvec, &keys_dev.notifier);
+
        input_unregister_device(keys_dev.input);
-       input_free_device(keys_dev.input);
 
        return 0;
 }
@@ -188,4 +195,5 @@ module_platform_driver(nvec_kbd_driver);
 
 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
 MODULE_DESCRIPTION("NVEC keyboard driver");
+MODULE_ALIAS("platform:nvec-kbd");
 MODULE_LICENSE("GPL");
index 296f7b9a8c8c11f32d8eb35612bd75a1cabe1b4b..aacfcd6954a371ab23e47491230818a644b7e772 100644 (file)
@@ -414,6 +414,7 @@ static int nvec_power_remove(struct platform_device *pdev)
        struct nvec_power *power = platform_get_drvdata(pdev);
 
        cancel_delayed_work_sync(&power->poller);
+       nvec_unregister_notifier(power->nvec, &power->notifier);
        switch (pdev->id) {
        case AC:
                power_supply_unregister(&nvec_psy);
index aff6b9b9f9aa76b4c460ad6e5fe410e56f72f09d..06dbb02085a936c52ee97bec1787da0ec92179d4 100644 (file)
@@ -106,7 +106,7 @@ static int nvec_mouse_probe(struct platform_device *pdev)
        struct serio *ser_dev;
        char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
 
-       ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL);
+       ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
        if (ser_dev == NULL)
                return -ENOMEM;
 
@@ -133,6 +133,11 @@ static int nvec_mouse_probe(struct platform_device *pdev)
 
 static int nvec_mouse_remove(struct platform_device *pdev)
 {
+       struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+
+       ps2_sendcommand(ps2_dev.ser_dev, DISABLE_MOUSE);
+       ps2_stopstreaming(ps2_dev.ser_dev);
+       nvec_unregister_notifier(nvec, &ps2_dev.notifier);
        serio_unregister_port(ps2_dev.ser_dev);
 
        return 0;
@@ -179,4 +184,5 @@ module_platform_driver(nvec_mouse_driver);
 
 MODULE_DESCRIPTION("NVEC mouse driver");
 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
+MODULE_ALIAS("platform:nvec-mouse");
 MODULE_LICENSE("GPL");
index 185b676d858ad627a737619417a04ba0dd08191f..aab945a316ea47753d82130bb4043d9f7df7b255 100644 (file)
@@ -1,6 +1,6 @@
 config DX_SEP
        tristate "Discretix SEP driver"
-       depends on PCI
+       depends on PCI && CRYPTO
        help
          Discretix SEP driver; used for the security processor subsystem
          on board the Intel Mobile Internet Device and adds SEP availability
index fe667dde43ce4b0180c0f6cbfcb526debfaed719..386362c9964ff1e8856adcaa14fa000aa82686f5 100644 (file)
@@ -1087,7 +1087,11 @@ static int synaptics_rmi4_resume(struct device *dev)
        unsigned char intr_status;
        struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
 
-       regulator_enable(rmi4_data->regulator);
+       retval = regulator_enable(rmi4_data->regulator);
+       if (retval) {
+               dev_err(dev, "Regulator enable failed (%d)\n", retval);
+               return retval;
+       }
 
        enable_irq(rmi4_data->i2c_client->irq);
        rmi4_data->touch_stopped = false;
index f4f1bf7a30fdff871cedd0cceaeb95b2cd8c24c7..c699a3058b39920a34daf1a6f63524be990e29cb 100644 (file)
@@ -133,7 +133,7 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
             DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
                       pDevice->dev->name, pDevice->apdev->name);
        }
-       kfree(pDevice->apdev);
+       free_netdev(pDevice->apdev);
        pDevice->apdev = NULL;
     pDevice->bEnable8021x = false;
     pDevice->bEnableHostWEP = false;
index c335808211ee54b929441028e738797d457ead86..d0cf7d8a20e5640fe507f3ff1b1dc64c170f9408 100644 (file)
@@ -1345,9 +1345,12 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info,
                return rc;
        }
 
+       spin_lock_irq(&pDevice->lock);
+
        if (wrq->disabled) {
                pDevice->ePSMode = WMAC_POWER_CAM;
                PSvDisablePowerSaving(pDevice);
+               spin_unlock_irq(&pDevice->lock);
                return rc;
        }
        if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
@@ -1358,6 +1361,9 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info,
                pDevice->ePSMode = WMAC_POWER_FAST;
                PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
        }
+
+       spin_unlock_irq(&pDevice->lock);
+
        switch (wrq->flags & IW_POWER_MODE) {
        case IW_POWER_UNICAST_R:
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n");
index e1f91d5a0f6a4d74f6f4864b85d16e134ae046a2..a858666eae68f9fd4e4eead1e22948404ae62935 100644 (file)
 #ifndef _ZCACHE_RAMSTER_H_
 #define _ZCACHE_RAMSTER_H_
 
-#ifdef CONFIG_RAMSTER_MODULE
-#define CONFIG_RAMSTER
-#endif
-
 #ifdef CONFIG_RAMSTER
 #include "ramster/ramster.h"
 #else
index 327e4f0d98e1a57680a557ea08401f43959454f1..5b26ee977c2f34823e12cfa86e10153902d0206b 100644 (file)
@@ -1,6 +1,8 @@
 #include <linux/atomic.h>
 #include "debug.h"
 
+ssize_t ramster_foreign_eph_pages;
+ssize_t ramster_foreign_pers_pages;
 #ifdef CONFIG_DEBUG_FS
 #include <linux/debugfs.h>
 
diff --git a/drivers/staging/zcache/ramster/ramster-howto.txt b/drivers/staging/zcache/ramster/ramster-howto.txt
new file mode 100644 (file)
index 0000000..7b1ee3b
--- /dev/null
@@ -0,0 +1,366 @@
+                       RAMSTER HOW-TO
+
+Author: Dan Magenheimer
+Ramster maintainer: Konrad Wilk <konrad.wilk@oracle.com>
+
+This is a HOWTO document for ramster which, as of this writing, is in
+the kernel as a subdirectory of zcache in drivers/staging, called ramster.
+(Zcache can be built with or without ramster functionality.)  If enabled
+and properly configured, ramster allows memory capacity load balancing
+across multiple machines in a cluster.  Further, the ramster code serves
+as an example of asynchronous access for zcache (as well as cleancache and
+frontswap) that may prove useful for future transcendent memory
+implementations, such as KVM and NVRAM.  While ramster works today on
+any network connection that supports kernel sockets, its features may
+become more interesting on future high-speed fabrics/interconnects.
+
+Ramster requires both kernel and userland support.  The userland support,
+called ramster-tools, is known to work with EL6-based distros, but is a
+set of poorly-hacked slightly-modified cluster tools based on ocfs2, which
+includes an init file, a config file, and a userland binary that interfaces
+to the kernel.  This state of userland support reflects the abysmal userland
+skills of this suitably-embarrassed author; any help/patches to turn
+ramster-tools into more distributable rpms/debs useful for a wider range
+of distros would be appreciated.  The source RPM that can be used as a
+starting point is available at:
+    http://oss.oracle.com/projects/tmem/files/RAMster/ 
+
+As a result of this author's ignorance, userland setup described in this
+HOWTO assumes an EL6 distro and is described in EL6 syntax.  Apologies
+if this offends anyone!
+
+Kernel support has only been tested on x86_64.  Systems with an active
+ocfs2 filesystem should work, but since ramster leverages a lot of
+code from ocfs2, there may be latent issues.  A kernel configuration that
+includes CONFIG_OCFS2_FS should build OK, and should certainly run OK
+if no ocfs2 filesystem is mounted.
+
+This HOWTO demonstrates memory capacity load balancing for a two-node
+cluster, where one node called the "local" node becomes overcommitted
+and the other node called the "remote" node provides additional RAM
+capacity for use by the local node.  Ramster is capable of more complex
+topologies; see the last section titled "ADVANCED RAMSTER TOPOLOGIES".
+
+If you find any terms in this HOWTO unfamiliar or don't understand the
+motivation for ramster, the following LWN reading is recommended:
+-- Transcendent Memory in a Nutshell (lwn.net/Articles/454795)
+-- The future calculus of memory management (lwn.net/Articles/475681)
+And since ramster is built on top of zcache, this article may be helpful:
+-- In-kernel memory compression (lwn.net/Articles/545244)
+
+Now that you've memorized the contents of those articles, let's get started!
+
+A. PRELIMINARY
+
+1) Install two x86_64 Linux systems that are known to work when
+   upgraded to a recent upstream Linux kernel version.
+
+On each system:
+
+2) Configure, build and install, then boot Linux, just to ensure it
+   can be done with an unmodified upstream kernel.  Confirm you booted
+   the upstream kernel with "uname -a".
+
+3) If you plan to do any performance testing or unless you plan to
+   test only swapping, the "WasActive" patch is also highly recommended.
+   (Search lkml.org for WasActive, apply the patch, rebuild your kernel.)
+   For a demo or simple testing, the patch can be ignored.
+
+4) Install ramster-tools as root.  An x86_64 rpm for EL6-based systems
+   can be found at:
+    http://oss.oracle.com/projects/tmem/files/RAMster/ 
+   (Sorry but for now, non-EL6 users must recreate ramster-tools on
+   their own from source.  See above.)
+
+5) Ensure that debugfs is mounted at each boot.  Examples below assume it
+   is mounted at /sys/kernel/debug.
+
+B. BUILDING RAMSTER INTO THE KERNEL
+
+Do the following on each system:
+
+1) Using the kernel configuration mechanism of your choice, change
+   your config to include:
+
+       CONFIG_CLEANCACHE=y
+       CONFIG_FRONTSWAP=y
+       CONFIG_STAGING=y
+       CONFIG_CONFIGFS_FS=y # NOTE: MUST BE y, not m
+       CONFIG_ZCACHE=y
+       CONFIG_RAMSTER=y
+
+   For a linux-3.10 or later kernel, you should also set:
+
+       CONFIG_ZCACHE_DEBUG=y
+       CONFIG_RAMSTER_DEBUG=y
+
+   Before building the kernel please doublecheck your kernel config
+   file to ensure all of the settings are correct.
+
+2) Build this kernel and change your boot file (e.g. /etc/grub.conf)
+   so that the new kernel will boot.
+
+3) Add "zcache" and "ramster" as kernel boot parameters for the new kernel.
+
+4) Reboot each system approximately simultaneously.
+
+5) Check dmesg to ensure there are some messages from ramster, prefixed
+   by "ramster:"
+
+       # dmesg | grep ramster
+
+   You should also see a lot of files in:
+
+       # ls /sys/kernel/debug/zcache
+       # ls /sys/kernel/debug/ramster
+
+   These are mostly counters for various zcache and ramster activities.
+   You should also see files in:
+
+       # ls /sys/kernel/mm/ramster
+
+   These are sysfs files that control ramster as we shall see.
+
+   Ramster now will act as a single-system zcache on each system
+   but doesn't yet know anything about the cluster so can't yet do
+   anything remotely.
+
+C. CONFIGURING THE RAMSTER CLUSTER
+
+This part can be error prone unless you are familiar with clustering
+filesystems.  We need to describe the cluster in a /etc/ramster.conf
+file and the init scripts that parse it are extremely picky about
+the syntax.
+
+1) Create a /etc/ramster.conf file and ensure it is identical on both
+   systems.  This file mimics the ocfs2 format and there is a good amount
+   of documentation that can be searched for ocfs2.conf, but you can use:
+
+       cluster:
+               name = ramster
+               node_count = 2
+       node:
+               name = system1
+               cluster = ramster
+               number = 0
+               ip_address = my.ip.ad.r1
+               ip_port = 7777
+       node:
+               name = system2
+               cluster = ramster
+               number = 1
+               ip_address = my.ip.ad.r2
+               ip_port = 7777
+
+   You must ensure that the "name" field in the file exactly matches
+   the output of "hostname" on each system; if "hostname" shows a
+   fully-qualified hostname, ensure the name is fully qualified in
+   /etc/ramster.conf.  Obviously, substitute my.ip.ad.rx with proper
+   ip addresses.
+
+2) Enable the ramster service and configure it.  If you used the
+   EL6 ramster-tools, this would be:
+
+       # chkconfig --add ramster
+       # service ramster configure
+
+   Set "load on boot" to "y", cluster to start is "ramster" (or whatever
+   name you chose in ramster.conf), heartbeat dead threshold as "500",
+   network idle timeout as "1000000".  Leave the others as default.
+
+3) Reboot both systems.  After reboot, try (assuming EL6 ramster-tools):
+
+       # service ramster status
+
+   You should see "Checking RAMSTER cluster "ramster": Online".  If you do
+   not, something is wrong and ramster will not work.  Note that you
+   should also see that the driver for "configfs" is loaded and mounted,
+   the driver for ocfs2_dlmfs is not loaded, and some numbers for network
+   parameters.  You will also see "Checking RAMSTER heartbeat: Not active".
+   That's all OK.
+
+4) Now you need to start the cluster heartbeat; the cluster is not "up"
+   until all nodes detect a heartbeat.  In a real cluster, heartbeat detection
+   is done via a cluster filesystem, but ramster doesn't require one.  Some
+   hack-y kernel code in ramster can start the heartbeat for you though if
+   you tell it what nodes are "up".  To enable the heartbeat, do:
+
+       # echo 0 > /sys/kernel/mm/ramster/manual_node_up
+       # echo 1 > /sys/kernel/mm/ramster/manual_node_up
+
+   This must be done on BOTH nodes and, to avoid timeouts, must be done
+   approximately concurrently on both nodes.  On an EL6 system, it is
+   convenient to put these lines in /etc/rc.local.  To confirm that the
+   cluster is now up, on both systems do:
+
+       # dmesg | grep ramster
+
+   You should see ramster "Accepted connection" messages in dmesg on both
+   nodes after this.  Note that if you check userland status again with
+
+       # service ramster status
+
+   you will still see "Checking RAMSTER heartbeat: Not active".  That's
+   still OK... the ramster kernel heartbeat hack doesn't communicate to
+   userland.
+
+5) You now must tell each node the node to which it should "remotify" pages.
+   On this two node cluster, we will assume the "local" node, node 0, has
+   memory overcommitted and will use ramster to utilize RAM capacity on
+   the "remote node", node 1.  To configure this, on node 0, you do:
+
+       # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+   You should see "ramster: node 1 set as remotification target" in dmesg
+   on node 0.  Again, on EL6, /etc/rc.local is a good place to put this
+   on node 0 so you don't forget to do it at each boot.
+
+6) One more step:  By default, the ramster code does not "remotify" any
+   pages; this is primarily for testing purposes, but sometimes it is
+   useful.  This may change in the future, but for now, on node 0, you do:
+
+       # echo 1 > /sys/kernel/mm/ramster/pers_remotify_enable
+       # echo 1 > /sys/kernel/mm/ramster/eph_remotify_enable
+
+   The first enables remotifying swap (persistent, aka frontswap) pages,
+   the second enables remotifying of page cache (ephemeral, cleancache)
+   pages.
+
+   On EL6, these lines can also be put in /etc/rc.local (AFTER the
+   node_up lines), or at the beginning of a script that runs a workload.
+
+7) Note that most testing has been done with both/all machines booted
+   roughly simultaneously to avoid cluster timeouts.  Ideally, you should
+   do this too unless you are trying to break ramster rather than just
+   use it. ;-)
+
+D. TESTING RAMSTER
+
+1) Note that ramster has no value unless pages get "remotified".  For
+   swap/frontswap/persistent pages, this doesn't happen unless/until
+   the workload would cause swapping to occur, at which point pages
+   are put into frontswap/zcache, and the remotification thread starts
+   working.  To get to the point where the system swaps, you either
+   need a workload for which the working set exceeds the RAM in the
+   system; or you need to somehow reduce the amount of RAM one of
+   the system sees.  This latter is easy when testing in a VM, but
+   harder on physical systems.  In some cases, "mem=xxxM" on the
+   kernel command line restricts memory, but for some values of xxx
+   the kernel may fail to boot.  One may also try creating a fixed
+   RAMdisk, doing nothing with it, but ensuring that it eats up a fixed
+   amount of RAM.
+
+2) To see if ramster is working, on the "remote node", node 1, try:
+
+       # grep . /sys/kernel/debug/ramster/foreign_*
+        # # note, that is space-dot-space between grep and the pathname
+
+   to monitor the number (and max) ephemeral and persistent pages
+   that ramster has sent.  If these stay at zero, ramster is not working
+   either because the workload on the local node (node 0) isn't creating
+   enough memory pressure or because "remotifying" isn't working.  On the
+   local system, node 0, you can watch lots of useful information also.
+   Try:
+
+       grep . /sys/kernel/debug/zcache/*pageframes* \
+               /sys/kernel/debug/zcache/*zbytes* \
+               /sys/kernel/debug/zcache/*zpages* \
+               /sys/kernel/debug/ramster/*remote*
+
+   Of particular note are the remote_*_pages_succ_get counters.  These
+   show how many disk reads and/or disk writes have been avoided on the
+   overcommitted local system by storing pages remotely using ramster.
+
+   At the risk of information overload, you can also grep:
+
+        /sys/kernel/debug/cleancache/* and /sys/kernel/debug/frontswap/*
+
+   These show, for example, how many disk reads and/or disk writes have
+   been avoided by using zcache to optimize RAM on the local system.
+
+
+AUTOMATIC SWAP REPATRIATION
+
+You may notice that while the systems are idle, the foreign persistent
+page count on the remote machine slowly decreases.  This is because
+ramster implements "frontswap selfshrinking":  When possible, swap
+pages that have been remotified are slowly repatriated to the local
+machine.  This is so that local RAM can be used when possible and
+so that, in case of remote machine crash, the probability of loss
+of data is reduced.
+
+REBOOTING / POWEROFF
+
+If a system is shut down while some of its swap pages still reside
+on a remote system, the system may lock up during the shutdown
+sequence.  This will occur if the network is shut down before the
+swap mechansim is shut down, which is the default ordering on many
+distros.  To avoid this annoying problem, simply shut off the swap
+subsystem before starting the shutdown sequence, e.g.:
+
+       # swapoff -a
+       # reboot
+
+Ideally, this swapoff-before-ifdown ordering should be enforced permanently
+using shutdown scripts.
+
+KNOWN PROBLEMS
+
+1) You may periodically see messages such as:
+
+    ramster_r2net, message length problem
+
+   This is harmless but indicates that a node is sending messages
+   containing compressed pages that exceed the maximum for zcache
+   (PAGE_SIZE*15/16).  The sender side needs to be fixed.
+
+2) If you see a "No longer connected to node..." message or a "No connection
+   established with node X after N seconds", it is possible you may
+   be in an unrecoverable state.  If you are certain all of the
+   appropriate cluster configuration steps described above have been
+   performed, try rebooting the two servers concurrently to see if
+   the cluster starts.
+
+   Note that "Connection to node... shutdown, state 7" is an intermediate
+   connection state.  As long as you later see "Accepted connection", the
+   intermediate states are harmless.
+
+3) There are known issues in counting certain values.  As a result
+   you may see periodic warnings from the kernel.  Almost always you
+   will see "ramster: bad accounting for XXX".  There are also "WARN_ONCE"
+   messages.  If you see kernel warnings with a tombstone, please report
+   them.  They are harmless but reflect bugs that need to be eventually fixed.
+
+ADVANCED RAMSTER TOPOLOGIES
+
+The kernel code for ramster can support up to eight nodes in a cluster,
+but no testing has been done with more than three nodes.
+
+In the example described above, the "remote" node serves as a RAM
+overflow for the "local" node.  This can be made symmetric by appropriate
+settings of the sysfs remote_target_nodenum file.  For example, by setting:
+
+       # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+on node 0, and
+
+       # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+on node 1, each node can serve as a RAM overflow for the other.
+
+For more than two nodes, a "RAM server" can be configured.  For a
+three node system, set:
+
+       # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+on node 1, and
+
+       # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
+
+on node 2.  Then node 0 is a RAM server for node 1 and node 2.
+
+In this implementation of ramster, any remote node is potentially a single
+point of failure (SPOF).  Though the probability of failure is reduced
+by automatic swap repatriation (see above), a proposed future enhancement
+to ramster improves high-availability for the cluster by sending a copy
+of each page of date to two other nodes.  Patches welcome!
index b18b887db79f70811ee43d0423a2f87d1b4583c7..a937ce1fa27afb6b8895a72f33d19e3ae2fee878 100644 (file)
@@ -66,8 +66,6 @@ static int ramster_remote_target_nodenum __read_mostly = -1;
 
 /* Used by this code. */
 long ramster_flnodes;
-ssize_t ramster_foreign_eph_pages;
-ssize_t ramster_foreign_pers_pages;
 /* FIXME frontswap selfshrinking knobs in debugfs? */
 
 static LIST_HEAD(ramster_rem_op_list);
@@ -399,14 +397,18 @@ void ramster_count_foreign_pages(bool eph, int count)
                        inc_ramster_foreign_eph_pages();
                } else {
                        dec_ramster_foreign_eph_pages();
+#ifdef CONFIG_RAMSTER_DEBUG
                        WARN_ON_ONCE(ramster_foreign_eph_pages < 0);
+#endif
                }
        } else {
                if (count > 0) {
                        inc_ramster_foreign_pers_pages();
                } else {
                        dec_ramster_foreign_pers_pages();
+#ifdef CONFIG_RAMSTER_DEBUG
                        WARN_ON_ONCE(ramster_foreign_pers_pages < 0);
+#endif
                }
        }
 }
index 522cb8e5514269b12eaee3a8875ba6f241c64dad..dcceed29d31ae415818d1fceecc6518cbc017845 100644 (file)
@@ -1922,15 +1922,15 @@ static int zcache_init(void)
 
 #ifdef CONFIG_ZCACHE_MODULE
 #ifdef CONFIG_RAMSTER
-module_param(ramster_enabled, int, S_IRUGO);
+module_param(ramster_enabled, bool, S_IRUGO);
 module_param(disable_frontswap_selfshrink, int, S_IRUGO);
 #endif
-module_param(disable_cleancache, int, S_IRUGO);
-module_param(disable_frontswap, int, S_IRUGO);
+module_param(disable_cleancache, bool, S_IRUGO);
+module_param(disable_frontswap, bool, S_IRUGO);
 #ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
 module_param(frontswap_has_exclusive_gets, bool, S_IRUGO);
 #endif
-module_param(disable_frontswap_ignore_nonactive, int, S_IRUGO);
+module_param(disable_frontswap_ignore_nonactive, bool, S_IRUGO);
 module_param(zcache_comp_name, charp, S_IRUGO);
 module_init(zcache_init);
 MODULE_LICENSE("GPL");
index ffbc6a94be522abd396a50b90ccb47e9c202a76f..d7705e5824fb2d39205d55846d560df2a8a03270 100644 (file)
@@ -651,7 +651,7 @@ static int iscsit_add_reject(
        cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
-               iscsit_release_cmd(cmd);
+               iscsit_free_cmd(cmd, false);
                return -1;
        }
 
@@ -697,7 +697,7 @@ int iscsit_add_reject_from_cmd(
        cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
-               iscsit_release_cmd(cmd);
+               iscsit_free_cmd(cmd, false);
                return -1;
        }
 
@@ -1250,7 +1250,7 @@ static u32 iscsit_do_crypto_hash_sg(
 
 static void iscsit_do_crypto_hash_buf(
        struct hash_desc *hash,
-       unsigned char *buf,
+       const void *buf,
        u32 payload_length,
        u32 padding,
        u8 *pad_bytes,
@@ -1743,7 +1743,7 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        return 0;
 out:
        if (cmd)
-               iscsit_release_cmd(cmd);
+               iscsit_free_cmd(cmd, false);
 ping_out:
        kfree(ping_data);
        return ret;
@@ -2251,7 +2251,7 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
                pr_err("Received logout request on connection that"
                        " is not in logged in state, ignoring request.\n");
-               iscsit_release_cmd(cmd);
+               iscsit_free_cmd(cmd, false);
                return 0;
        }
 
@@ -2524,9 +2524,8 @@ static int iscsit_send_conn_drop_async_message(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                cmd->tx_size += ISCSI_CRC_LEN;
                pr_debug("Attaching CRC32C HeaderDigest to"
@@ -2662,9 +2661,8 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)cmd->pdu, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -2841,9 +2839,8 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -2900,9 +2897,8 @@ static int iscsit_send_unsolicited_nopin(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                tx_size += ISCSI_CRC_LEN;
                pr_debug("Attaching CRC32C HeaderDigest to"
@@ -2949,9 +2945,8 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -3040,9 +3035,8 @@ static int iscsit_send_r2t(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -3256,9 +3250,8 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)cmd->pdu, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -3329,9 +3322,8 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -3504,9 +3496,8 @@ static int iscsit_send_text_rsp(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -3557,11 +3548,11 @@ static int iscsit_send_reject(
        struct iscsi_cmd *cmd,
        struct iscsi_conn *conn)
 {
-       u32 iov_count = 0, tx_size = 0;
-       struct iscsi_reject *hdr;
+       struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
        struct kvec *iov;
+       u32 iov_count = 0, tx_size;
 
-       iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]);
+       iscsit_build_reject(cmd, conn, hdr);
 
        iov = &cmd->iov_misc[0];
        iov[iov_count].iov_base = cmd->pdu;
@@ -3574,9 +3565,8 @@ static int iscsit_send_reject(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)hdr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)header_digest);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
                tx_size += ISCSI_CRC_LEN;
@@ -3585,9 +3575,8 @@ static int iscsit_send_reject(
        }
 
        if (conn->conn_ops->DataDigest) {
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-                               (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
-                               0, NULL, (u8 *)&cmd->data_crc);
+               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
+                               ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
 
                iov[iov_count].iov_base = &cmd->data_crc;
                iov[iov_count++].iov_len  = ISCSI_CRC_LEN;
@@ -3676,7 +3665,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
                list_del(&cmd->i_conn_node);
                spin_unlock_bh(&conn->cmd_lock);
 
-               iscsit_free_cmd(cmd);
+               iscsit_free_cmd(cmd, false);
                break;
        case ISTATE_SEND_NOPIN_WANT_RESPONSE:
                iscsit_mod_nopin_response_timer(conn);
@@ -4133,7 +4122,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
 
                iscsit_increment_maxcmdsn(cmd, sess);
 
-               iscsit_free_cmd(cmd);
+               iscsit_free_cmd(cmd, true);
 
                spin_lock_bh(&conn->cmd_lock);
        }
index 7816af6cdd1209f7e09ac31adc027ab80b3663de..40d9dbca987b25a811ca0fb75c9c0277f65933a8 100644 (file)
@@ -823,7 +823,7 @@ static int iscsit_attach_ooo_cmdsn(
                /*
                 * CmdSN is greater than the tail of the list.
                 */
-               if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
+               if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
                        list_add_tail(&ooo_cmdsn->ooo_list,
                                        &sess->sess_ooo_cmdsn_list);
                else {
@@ -833,11 +833,12 @@ static int iscsit_attach_ooo_cmdsn(
                         */
                        list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
                                                ooo_list) {
-                               if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
+                               if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
                                        continue;
 
+                               /* Insert before this entry */
                                list_add(&ooo_cmdsn->ooo_list,
-                                       &ooo_tmp->ooo_list);
+                                       ooo_tmp->ooo_list.prev);
                                break;
                        }
                }
index ba6091bf93fcde3c1463d6ba54a8ae07f812ddf0..45a5afd5ea13b2eda8bfb11de4f6f238e3996219 100644 (file)
@@ -143,7 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
                        list_del(&cmd->i_conn_node);
                        cmd->conn = NULL;
                        spin_unlock(&cr->conn_recovery_cmd_lock);
-                       iscsit_free_cmd(cmd);
+                       iscsit_free_cmd(cmd, true);
                        spin_lock(&cr->conn_recovery_cmd_lock);
                }
                spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -165,7 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
                        list_del(&cmd->i_conn_node);
                        cmd->conn = NULL;
                        spin_unlock(&cr->conn_recovery_cmd_lock);
-                       iscsit_free_cmd(cmd);
+                       iscsit_free_cmd(cmd, true);
                        spin_lock(&cr->conn_recovery_cmd_lock);
                }
                spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -248,7 +248,7 @@ void iscsit_discard_cr_cmds_by_expstatsn(
                iscsit_remove_cmd_from_connection_recovery(cmd, sess);
 
                spin_unlock(&cr->conn_recovery_cmd_lock);
-               iscsit_free_cmd(cmd);
+               iscsit_free_cmd(cmd, true);
                spin_lock(&cr->conn_recovery_cmd_lock);
        }
        spin_unlock(&cr->conn_recovery_cmd_lock);
@@ -302,7 +302,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
                list_del(&cmd->i_conn_node);
 
                spin_unlock_bh(&conn->cmd_lock);
-               iscsit_free_cmd(cmd);
+               iscsit_free_cmd(cmd, true);
                spin_lock_bh(&conn->cmd_lock);
        }
        spin_unlock_bh(&conn->cmd_lock);
@@ -355,7 +355,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
 
                        list_del(&cmd->i_conn_node);
                        spin_unlock_bh(&conn->cmd_lock);
-                       iscsit_free_cmd(cmd);
+                       iscsit_free_cmd(cmd, true);
                        spin_lock_bh(&conn->cmd_lock);
                        continue;
                }
@@ -375,7 +375,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
                     iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
                        list_del(&cmd->i_conn_node);
                        spin_unlock_bh(&conn->cmd_lock);
-                       iscsit_free_cmd(cmd);
+                       iscsit_free_cmd(cmd, true);
                        spin_lock_bh(&conn->cmd_lock);
                        continue;
                }
index f690be9e5293f0872667188a84df5a7e1998032e..e38222191a33b7c19ef71e827a6d995f31dc8271 100644 (file)
@@ -436,7 +436,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
        /*
         * Extra parameters for ISER from RFC-5046
         */
-       param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS,
+       param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS,
                        PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
                        TYPERANGE_BOOL_AND, USE_LEADING_ONLY);
        if (!param)
@@ -529,7 +529,7 @@ int iscsi_set_keys_to_negotiate(
                        SET_PSTATE_NEGOTIATE(param);
                } else if (!strcmp(param->name, OFMARKINT)) {
                        SET_PSTATE_NEGOTIATE(param);
-               } else if (!strcmp(param->name, RDMAEXTENTIONS)) {
+               } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
                        if (iser == true)
                                SET_PSTATE_NEGOTIATE(param);
                } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
@@ -580,7 +580,7 @@ int iscsi_set_keys_irrelevant_for_discovery(
                        param->state &= ~PSTATE_NEGOTIATE;
                else if (!strcmp(param->name, OFMARKINT))
                        param->state &= ~PSTATE_NEGOTIATE;
-               else if (!strcmp(param->name, RDMAEXTENTIONS))
+               else if (!strcmp(param->name, RDMAEXTENSIONS))
                        param->state &= ~PSTATE_NEGOTIATE;
                else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH))
                        param->state &= ~PSTATE_NEGOTIATE;
@@ -758,9 +758,9 @@ static int iscsi_add_notunderstood_response(
        }
        INIT_LIST_HEAD(&extra_response->er_list);
 
-       strncpy(extra_response->key, key, strlen(key) + 1);
-       strncpy(extra_response->value, NOTUNDERSTOOD,
-                       strlen(NOTUNDERSTOOD) + 1);
+       strlcpy(extra_response->key, key, sizeof(extra_response->key));
+       strlcpy(extra_response->value, NOTUNDERSTOOD,
+               sizeof(extra_response->value));
 
        list_add_tail(&extra_response->er_list,
                        &param_list->extra_response_list);
@@ -1629,8 +1629,6 @@ int iscsi_decode_text_input(
 
                if (phase & PHASE_SECURITY) {
                        if (iscsi_check_for_auth_key(key) > 0) {
-                               char *tmpptr = key + strlen(key);
-                               *tmpptr = '=';
                                kfree(tmpbuf);
                                return 1;
                        }
@@ -1977,7 +1975,7 @@ void iscsi_set_session_parameters(
                        ops->SessionType = !strcmp(param->value, DISCOVERY);
                        pr_debug("SessionType:                  %s\n",
                                param->value);
-               } else if (!strcmp(param->name, RDMAEXTENTIONS)) {
+               } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
                        ops->RDMAExtensions = !strcmp(param->value, YES);
                        pr_debug("RDMAExtensions:               %s\n",
                                param->value);
index f31b9c4b83f26ca6db4cd4fa5fd357ca414c0bfa..a47046a752aac7ccdabbdf4e41c936d53a13569b 100644 (file)
@@ -1,8 +1,10 @@
 #ifndef ISCSI_PARAMETERS_H
 #define ISCSI_PARAMETERS_H
 
+#include <scsi/iscsi_proto.h>
+
 struct iscsi_extra_response {
-       char key[64];
+       char key[KEY_MAXLEN];
        char value[32];
        struct list_head er_list;
 } ____cacheline_aligned;
@@ -91,7 +93,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
 /*
  * Parameter names of iSCSI Extentions for RDMA (iSER).  See RFC-5046
  */
-#define RDMAEXTENTIONS                 "RDMAExtensions"
+#define RDMAEXTENSIONS                 "RDMAExtensions"
 #define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength"
 #define TARGETRECVDATASEGMENTLENGTH    "TargetRecvDataSegmentLength"
 
@@ -142,7 +144,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
 /*
  * Initial values for iSER parameters following RFC-5046 Section 6
  */
-#define INITIAL_RDMAEXTENTIONS                 NO
+#define INITIAL_RDMAEXTENSIONS                 NO
 #define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144"
 #define INITIAL_TARGETRECVDATASEGMENTLENGTH    "8192"
 
index 2cc6c9a3ffb8417b4322dcb5688f01b40d47db63..08a3bacef0c599ee72ba13e04d28a0c765783a97 100644 (file)
@@ -676,40 +676,56 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
 
 void iscsit_release_cmd(struct iscsi_cmd *cmd)
 {
-       struct iscsi_conn *conn = cmd->conn;
-
-       iscsit_free_r2ts_from_list(cmd);
-       iscsit_free_all_datain_reqs(cmd);
-
        kfree(cmd->buf_ptr);
        kfree(cmd->pdu_list);
        kfree(cmd->seq_list);
        kfree(cmd->tmr_req);
        kfree(cmd->iov_data);
 
-       if (conn) {
+       kmem_cache_free(lio_cmd_cache, cmd);
+}
+
+static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
+                             bool check_queues)
+{
+       struct iscsi_conn *conn = cmd->conn;
+
+       if (scsi_cmd) {
+               if (cmd->data_direction == DMA_TO_DEVICE) {
+                       iscsit_stop_dataout_timer(cmd);
+                       iscsit_free_r2ts_from_list(cmd);
+               }
+               if (cmd->data_direction == DMA_FROM_DEVICE)
+                       iscsit_free_all_datain_reqs(cmd);
+       }
+
+       if (conn && check_queues) {
                iscsit_remove_cmd_from_immediate_queue(cmd, conn);
                iscsit_remove_cmd_from_response_queue(cmd, conn);
        }
-
-       kmem_cache_free(lio_cmd_cache, cmd);
 }
 
-void iscsit_free_cmd(struct iscsi_cmd *cmd)
+void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
 {
+       struct se_cmd *se_cmd = NULL;
+       int rc;
        /*
         * Determine if a struct se_cmd is associated with
         * this struct iscsi_cmd.
         */
        switch (cmd->iscsi_opcode) {
        case ISCSI_OP_SCSI_CMD:
-               if (cmd->data_direction == DMA_TO_DEVICE)
-                       iscsit_stop_dataout_timer(cmd);
+               se_cmd = &cmd->se_cmd;
+               __iscsit_free_cmd(cmd, true, shutdown);
                /*
                 * Fallthrough
                 */
        case ISCSI_OP_SCSI_TMFUNC:
-               transport_generic_free_cmd(&cmd->se_cmd, 1);
+               rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
+               if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
+                       __iscsit_free_cmd(cmd, true, shutdown);
+                       target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+               }
                break;
        case ISCSI_OP_REJECT:
                /*
@@ -718,11 +734,19 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd)
                 * associated cmd->se_cmd needs to be released.
                 */
                if (cmd->se_cmd.se_tfo != NULL) {
-                       transport_generic_free_cmd(&cmd->se_cmd, 1);
+                       se_cmd = &cmd->se_cmd;
+                       __iscsit_free_cmd(cmd, true, shutdown);
+
+                       rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
+                       if (!rc && shutdown && se_cmd->se_sess) {
+                               __iscsit_free_cmd(cmd, true, shutdown);
+                               target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+                       }
                        break;
                }
                /* Fall-through */
        default:
+               __iscsit_free_cmd(cmd, false, shutdown);
                cmd->release_cmd(cmd);
                break;
        }
index 4f8e01a47081ff72328130a62278d421ec73ed08..a4422659d04944f58c85670bfd08c646c1bcf5ff 100644 (file)
@@ -29,7 +29,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co
 extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
 extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
 extern void iscsit_release_cmd(struct iscsi_cmd *);
-extern void iscsit_free_cmd(struct iscsi_cmd *);
+extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
 extern int iscsit_check_session_usage_count(struct iscsi_session *);
 extern void iscsit_dec_session_usage_count(struct iscsi_session *);
 extern void iscsit_inc_session_usage_count(struct iscsi_session *);
index 43b7ac6c5b1c80e5132bb793d4a447abf11f443c..4a8bd36d39588b24d8de02f495e4ca166c022795 100644 (file)
@@ -1584,6 +1584,13 @@ static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
        .store  = target_core_store_dev_udev_path,
 };
 
+static ssize_t target_core_show_dev_enable(void *p, char *page)
+{
+       struct se_device *dev = p;
+
+       return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
+}
+
 static ssize_t target_core_store_dev_enable(
        void *p,
        const char *page,
@@ -1609,8 +1616,8 @@ static ssize_t target_core_store_dev_enable(
 static struct target_core_configfs_attribute target_core_attr_dev_enable = {
        .attr   = { .ca_owner = THIS_MODULE,
                    .ca_name = "enable",
-                   .ca_mode = S_IWUSR },
-       .show   = NULL,
+                   .ca_mode =  S_IRUGO | S_IWUSR },
+       .show   = target_core_show_dev_enable,
        .store  = target_core_store_dev_enable,
 };
 
index 2e4d655471bc0f994c5d24df4b0c43f61fefac34..4630481b60438db290caf48ed8fc60c7f224938c 100644 (file)
@@ -68,7 +68,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                struct se_dev_entry *deve = se_cmd->se_deve;
 
                deve->total_cmds++;
-               deve->total_bytes += se_cmd->data_length;
 
                if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
                    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
@@ -85,8 +84,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                else if (se_cmd->data_direction == DMA_FROM_DEVICE)
                        deve->read_bytes += se_cmd->data_length;
 
-               deve->deve_cmds++;
-
                se_lun = deve->se_lun;
                se_cmd->se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
@@ -275,17 +272,6 @@ int core_free_device_list_for_node(
        return 0;
 }
 
-void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
-{
-       struct se_dev_entry *deve;
-       unsigned long flags;
-
-       spin_lock_irqsave(&se_nacl->device_list_lock, flags);
-       deve = se_nacl->device_list[se_cmd->orig_fe_lun];
-       deve->deve_cmds--;
-       spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
-}
-
 void core_update_device_list_access(
        u32 mapped_lun,
        u32 lun_access,
index 58ed683e04aefc942b7456fae4b9c6ba10f05a00..b11890d85120977d6e380db19192143ddab6251c 100644 (file)
@@ -153,10 +153,7 @@ static int fd_configure_device(struct se_device *dev)
                struct request_queue *q = bdev_get_queue(inode->i_bdev);
                unsigned long long dev_size;
 
-               dev->dev_attrib.hw_block_size =
-                       bdev_logical_block_size(inode->i_bdev);
-               dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
-
+               fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
                /*
                 * Determine the number of bytes from i_size_read() minus
                 * one (1) logical sector from underlying struct block_device
@@ -203,9 +200,7 @@ static int fd_configure_device(struct se_device *dev)
                        goto fail;
                }
 
-               dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
-               dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
-
+               fd_dev->fd_block_size = FD_BLOCKSIZE;
                /*
                 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
                 */
@@ -224,8 +219,8 @@ static int fd_configure_device(struct se_device *dev)
                dev->dev_attrib.max_write_same_len = 0x1000;
        }
 
-       fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
-
+       dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
+       dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
        dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
 
        if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
@@ -699,11 +694,12 @@ static sector_t fd_get_blocks(struct se_device *dev)
         * to handle underlying block_device resize operations.
         */
        if (S_ISBLK(i->i_mode))
-               dev_size = (i_size_read(i) - fd_dev->fd_block_size);
+               dev_size = i_size_read(i);
        else
                dev_size = fd_dev->fd_dev_size;
 
-       return div_u64(dev_size, dev->dev_attrib.block_size);
+       return div_u64(dev_size - dev->dev_attrib.block_size,
+                      dev->dev_attrib.block_size);
 }
 
 static struct sbc_ops fd_sbc_ops = {
index 07f5f94634bb63ea217250d90e37d7030da93136..aa1620abec6dc0b1ccb5a78305ca2ed41800ac5d 100644 (file)
@@ -615,6 +615,8 @@ iblock_execute_rw(struct se_cmd *cmd)
                                rw = WRITE_FUA;
                        else if (!(q->flush_flags & REQ_FLUSH))
                                rw = WRITE_FUA;
+                       else
+                               rw = WRITE;
                } else {
                        rw = WRITE;
                }
index 853bab60e362ebd8371f50d7a52504ec25fb9fa4..18d49df4d0ac59435c9b50ef87fb61723415c1c5 100644 (file)
@@ -8,7 +8,6 @@ extern struct t10_alua_lu_gp *default_lu_gp;
 struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
 int    core_free_device_list_for_node(struct se_node_acl *,
                struct se_portal_group *);
-void   core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
 void   core_update_device_list_access(u32, u32, struct se_node_acl *);
 int    core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
                u32, u32, struct se_node_acl *, struct se_portal_group *);
index e0b3c379aa148c75d4364dccbb47720c0db1b731..0921a64b555028997691fb28ad7ab84294b10a0d 100644 (file)
@@ -291,6 +291,11 @@ rd_execute_rw(struct se_cmd *cmd)
        u32 src_len;
        u64 tmp;
 
+       if (dev->rd_flags & RDF_NULLIO) {
+               target_complete_cmd(cmd, SAM_STAT_GOOD);
+               return 0;
+       }
+
        tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
        rd_offset = do_div(tmp, PAGE_SIZE);
        rd_page = tmp;
@@ -373,11 +378,12 @@ rd_execute_rw(struct se_cmd *cmd)
 }
 
 enum {
-       Opt_rd_pages, Opt_err
+       Opt_rd_pages, Opt_rd_nullio, Opt_err
 };
 
 static match_table_t tokens = {
        {Opt_rd_pages, "rd_pages=%d"},
+       {Opt_rd_nullio, "rd_nullio=%d"},
        {Opt_err, NULL}
 };
 
@@ -408,6 +414,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
                                " Count: %u\n", rd_dev->rd_page_count);
                        rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
                        break;
+               case Opt_rd_nullio:
+                       match_int(args, &arg);
+                       if (arg != 1)
+                               break;
+
+                       pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
+                       rd_dev->rd_flags |= RDF_NULLIO;
+                       break;
                default:
                        break;
                }
@@ -424,8 +438,9 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
        ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
                        rd_dev->rd_dev_id);
        bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
-                       "  SG_table_count: %u\n", rd_dev->rd_page_count,
-                       PAGE_SIZE, rd_dev->sg_table_count);
+                       "  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
+                       PAGE_SIZE, rd_dev->sg_table_count,
+                       !!(rd_dev->rd_flags & RDF_NULLIO));
        return bl;
 }
 
index 933b38b6e56373f282c96a2d3af5677dd6ba8eca..1789d1e14395e0c631d56485972a5ad65b5799a1 100644 (file)
@@ -22,6 +22,7 @@ struct rd_dev_sg_table {
 } ____cacheline_aligned;
 
 #define RDF_HAS_PAGE_COUNT     0x01
+#define RDF_NULLIO             0x02
 
 struct rd_dev {
        struct se_device dev;
index f8388b4024aafa56e647c3e90bc99895df6c5e2c..21e315874a5472503dfe1ba010dea1236993b553 100644 (file)
@@ -65,7 +65,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
 static void transport_handle_queue_full(struct se_cmd *cmd,
                struct se_device *dev);
 static int transport_generic_get_mem(struct se_cmd *cmd);
-static void transport_put_cmd(struct se_cmd *cmd);
+static int transport_put_cmd(struct se_cmd *cmd);
 static void target_complete_ok_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
@@ -221,6 +221,7 @@ struct se_session *transport_init_session(void)
        INIT_LIST_HEAD(&se_sess->sess_list);
        INIT_LIST_HEAD(&se_sess->sess_acl_list);
        INIT_LIST_HEAD(&se_sess->sess_cmd_list);
+       INIT_LIST_HEAD(&se_sess->sess_wait_list);
        spin_lock_init(&se_sess->sess_cmd_lock);
        kref_init(&se_sess->sess_kref);
 
@@ -1943,7 +1944,7 @@ static inline void transport_free_pages(struct se_cmd *cmd)
  * This routine unconditionally frees a command, and reference counting
  * or list removal must be done in the caller.
  */
-static void transport_release_cmd(struct se_cmd *cmd)
+static int transport_release_cmd(struct se_cmd *cmd)
 {
        BUG_ON(!cmd->se_tfo);
 
@@ -1955,11 +1956,11 @@ static void transport_release_cmd(struct se_cmd *cmd)
         * If this cmd has been setup with target_get_sess_cmd(), drop
         * the kref and call ->release_cmd() in kref callback.
         */
-        if (cmd->check_release != 0) {
-               target_put_sess_cmd(cmd->se_sess, cmd);
-               return;
-       }
+        if (cmd->check_release != 0)
+               return target_put_sess_cmd(cmd->se_sess, cmd);
+
        cmd->se_tfo->release_cmd(cmd);
+       return 1;
 }
 
 /**
@@ -1968,7 +1969,7 @@ static void transport_release_cmd(struct se_cmd *cmd)
  *
  * This routine releases our reference to the command and frees it if possible.
  */
-static void transport_put_cmd(struct se_cmd *cmd)
+static int transport_put_cmd(struct se_cmd *cmd)
 {
        unsigned long flags;
 
@@ -1976,7 +1977,7 @@ static void transport_put_cmd(struct se_cmd *cmd)
        if (atomic_read(&cmd->t_fe_count) &&
            !atomic_dec_and_test(&cmd->t_fe_count)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               return;
+               return 0;
        }
 
        if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
@@ -1986,8 +1987,7 @@ static void transport_put_cmd(struct se_cmd *cmd)
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        transport_free_pages(cmd);
-       transport_release_cmd(cmd);
-       return;
+       return transport_release_cmd(cmd);
 }
 
 void *transport_kmap_data_sg(struct se_cmd *cmd)
@@ -2152,24 +2152,25 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
        }
 }
 
-void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
 {
+       int ret = 0;
+
        if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
                if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
                         transport_wait_for_tasks(cmd);
 
-               transport_release_cmd(cmd);
+               ret = transport_release_cmd(cmd);
        } else {
                if (wait_for_tasks)
                        transport_wait_for_tasks(cmd);
 
-               core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
-
                if (cmd->se_lun)
                        transport_lun_remove_cmd(cmd);
 
-               transport_put_cmd(cmd);
+               ret = transport_put_cmd(cmd);
        }
+       return ret;
 }
 EXPORT_SYMBOL(transport_generic_free_cmd);
 
@@ -2213,21 +2214,19 @@ static void target_release_cmd_kref(struct kref *kref)
 {
        struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
        struct se_session *se_sess = se_cmd->se_sess;
-       unsigned long flags;
 
-       spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        if (list_empty(&se_cmd->se_cmd_list)) {
-               spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+               spin_unlock(&se_sess->sess_cmd_lock);
                se_cmd->se_tfo->release_cmd(se_cmd);
                return;
        }
        if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
-               spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+               spin_unlock(&se_sess->sess_cmd_lock);
                complete(&se_cmd->cmd_wait_comp);
                return;
        }
        list_del(&se_cmd->se_cmd_list);
-       spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+       spin_unlock(&se_sess->sess_cmd_lock);
 
        se_cmd->se_tfo->release_cmd(se_cmd);
 }
@@ -2238,7 +2237,8 @@ static void target_release_cmd_kref(struct kref *kref)
  */
 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
 {
-       return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
+       return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
+                       &se_sess->sess_cmd_lock);
 }
 EXPORT_SYMBOL(target_put_sess_cmd);
 
@@ -2253,11 +2253,14 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
        unsigned long flags;
 
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
-
-       WARN_ON(se_sess->sess_tearing_down);
+       if (se_sess->sess_tearing_down) {
+               spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+               return;
+       }
        se_sess->sess_tearing_down = 1;
+       list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
 
-       list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list)
+       list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
                se_cmd->cmd_wait_set = 1;
 
        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -2266,44 +2269,32 @@ EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
 
 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
  * @se_sess:    session to wait for active I/O
- * @wait_for_tasks:    Make extra transport_wait_for_tasks call
  */
-void target_wait_for_sess_cmds(
-       struct se_session *se_sess,
-       int wait_for_tasks)
+void target_wait_for_sess_cmds(struct se_session *se_sess)
 {
        struct se_cmd *se_cmd, *tmp_cmd;
-       bool rc = false;
+       unsigned long flags;
 
        list_for_each_entry_safe(se_cmd, tmp_cmd,
-                               &se_sess->sess_cmd_list, se_cmd_list) {
+                               &se_sess->sess_wait_list, se_cmd_list) {
                list_del(&se_cmd->se_cmd_list);
 
                pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
                        " %d\n", se_cmd, se_cmd->t_state,
                        se_cmd->se_tfo->get_cmd_state(se_cmd));
 
-               if (wait_for_tasks) {
-                       pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
-                               " fabric state: %d\n", se_cmd, se_cmd->t_state,
-                               se_cmd->se_tfo->get_cmd_state(se_cmd));
-
-                       rc = transport_wait_for_tasks(se_cmd);
-
-                       pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
-                               " fabric state: %d\n", se_cmd, se_cmd->t_state,
-                               se_cmd->se_tfo->get_cmd_state(se_cmd));
-               }
-
-               if (!rc) {
-                       wait_for_completion(&se_cmd->cmd_wait_comp);
-                       pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
-                               " fabric state: %d\n", se_cmd, se_cmd->t_state,
-                               se_cmd->se_tfo->get_cmd_state(se_cmd));
-               }
+               wait_for_completion(&se_cmd->cmd_wait_comp);
+               pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+                       " fabric state: %d\n", se_cmd, se_cmd->t_state,
+                       se_cmd->se_tfo->get_cmd_state(se_cmd));
 
                se_cmd->se_tfo->release_cmd(se_cmd);
        }
+
+       spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+       WARN_ON(!list_empty(&se_sess->sess_cmd_list));
+       spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
 }
 EXPORT_SYMBOL(target_wait_for_sess_cmds);
 
index 5b4d75fd7b49f3f857e39dacc04488977c3d448b..54ffd64ca3f7560a3b9563bf4087c949a32cf2c6 100644 (file)
@@ -169,21 +169,11 @@ static int armada_thermal_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get platform resource\n");
-               return -ENODEV;
-       }
-
        priv->sensor = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->sensor))
                return PTR_ERR(priv->sensor);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get platform resource\n");
-               return -ENODEV;
-       }
-
        priv->control = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->control))
                return PTR_ERR(priv->control);
index 4b15a5f270dc71a021bde1b6e96e8656720d45bc..a088d1365ca5ef287e46db053ba3625b8c536473 100644 (file)
@@ -149,10 +149,6 @@ static int dove_thermal_probe(struct platform_device *pdev)
                return PTR_ERR(priv->sensor);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get platform resource\n");
-               return -ENODEV;
-       }
        priv->control = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->control))
                return PTR_ERR(priv->control);
index d20ce9e614034ba4077d797b5584cfad4d994727..788b1ddcac6caa11709b0c383bc3d60295bdb8be 100644 (file)
@@ -925,11 +925,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        INIT_WORK(&data->irq_work, exynos_tmu_work);
 
        data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!data->mem) {
-               dev_err(&pdev->dev, "Failed to get platform resource\n");
-               return -ENOENT;
-       }
-
        data->base = devm_ioremap_resource(&pdev->dev, data->mem);
        if (IS_ERR(data->base))
                return PTR_ERR(data->base);
index 6d0c27cd03da2676b91bfef307482a83be765697..9bffcec5ad82a4bcf0f05e3a89c73cf0b62433dc 100644 (file)
@@ -859,6 +859,7 @@ static int __init ehv_bc_init(void)
  */
 static void __exit ehv_bc_exit(void)
 {
+       platform_driver_unregister(&ehv_bc_tty_driver);
        tty_unregister_driver(ehv_bc_driver);
        put_tty_driver(ehv_bc_driver);
        kfree(bcs);
index 71d6eb2c93b1c6335ba123cc9ac3c17a2af0d805..4c4a236745692ab1ee2e674f5d7dbf0334002dae 100644 (file)
@@ -1618,8 +1618,12 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
                                if (ip->type == PORT_16550A)
                                        me->fifo[p] = 1;
 
-                               opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2);
-                               opmode &= OP_MODE_MASK;
+                               if (ip->board->chip_flag == MOXA_MUST_MU860_HWID) {
+                                       opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2);
+                                       opmode &= OP_MODE_MASK;
+                               } else {
+                                       opmode = RS232_MODE;
+                               }
                                me->iftype[p] = opmode;
                                mutex_unlock(&port->mutex);
                        }
@@ -1676,6 +1680,9 @@ static int mxser_ioctl(struct tty_struct *tty,
                int shiftbit;
                unsigned char val, mask;
 
+               if (info->board->chip_flag != MOXA_MUST_MU860_HWID)
+                       return -EFAULT;
+
                p = tty->index % 4;
                if (cmd == MOXA_SET_OP_MODE) {
                        if (get_user(opmode, (int __user *) argp))
index d655416087b7099d7639923c7c0a478c72a5afbe..6c7fe90ad72d48d2834536331e6826ee2719f94f 100644 (file)
@@ -1573,6 +1573,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
                        ldata->real_raw = 0;
        }
        n_tty_set_room(tty);
+       /*
+        * Fix tty hang when I_IXON(tty) is cleared, but the tty
+        * been stopped by STOP_CHAR(tty) before it.
+        */
+       if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
+               start_tty(tty);
+       }
+
        /* The termios change make the tty ready for I/O */
        wake_up_interruptible(&tty->write_wait);
        wake_up_interruptible(&tty->read_wait);
index 82d35c5a58fd0bc16649aacc3c004377ff23de54..354564ea47c504446ba5aeab1153d48edc4acc48 100644 (file)
@@ -150,12 +150,14 @@ static Word_t aiop_intr_bits[AIOP_CTL_SIZE] = {
        AIOP_INTR_BIT_3
 };
 
+#ifdef CONFIG_PCI
 static Word_t upci_aiop_intr_bits[AIOP_CTL_SIZE] = {
        UPCI_AIOP_INTR_BIT_0,
        UPCI_AIOP_INTR_BIT_1,
        UPCI_AIOP_INTR_BIT_2,
        UPCI_AIOP_INTR_BIT_3
 };
+#endif
 
 static Byte_t RData[RDATASIZE] = {
        0x00, 0x09, 0xf6, 0x82,
@@ -227,7 +229,6 @@ static unsigned long nextLineNumber;
 static int __init init_ISA(int i);
 static void rp_wait_until_sent(struct tty_struct *tty, int timeout);
 static void rp_flush_buffer(struct tty_struct *tty);
-static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model);
 static unsigned char GetLineNumber(int ctrl, int aiop, int ch);
 static unsigned char SetLineNumber(int ctrl, int aiop, int ch);
 static void rp_start(struct tty_struct *tty);
@@ -241,11 +242,6 @@ static void sDisInterrupts(CHANNEL_T * ChP, Word_t Flags);
 static void sModemReset(CONTROLLER_T * CtlP, int chan, int on);
 static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on);
 static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data);
-static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
-                             ByteIO_t * AiopIOList, int AiopIOListSize,
-                             WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
-                             int PeriodicOnly, int altChanRingIndicator,
-                             int UPCIRingInd);
 static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO,
                           ByteIO_t * AiopIOList, int AiopIOListSize,
                           int IRQNum, Byte_t Frequency, int PeriodicOnly);
@@ -1775,6 +1771,145 @@ static DEFINE_PCI_DEVICE_TABLE(rocket_pci_ids) = {
 };
 MODULE_DEVICE_TABLE(pci, rocket_pci_ids);
 
+/*  Resets the speaker controller on RocketModem II and III devices */
+static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model)
+{
+       ByteIO_t addr;
+
+       /* RocketModem II speaker control is at the 8th port location of offset 0x40 */
+       if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) {
+               addr = CtlP->AiopIO[0] + 0x4F;
+               sOutB(addr, 0);
+       }
+
+       /* RocketModem III speaker control is at the 1st port location of offset 0x80 */
+       if ((model == MODEL_UPCI_RM3_8PORT)
+           || (model == MODEL_UPCI_RM3_4PORT)) {
+               addr = CtlP->AiopIO[0] + 0x88;
+               sOutB(addr, 0);
+       }
+}
+
+/***************************************************************************
+Function: sPCIInitController
+Purpose:  Initialization of controller global registers and controller
+          structure.
+Call:     sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize,
+                          IRQNum,Frequency,PeriodicOnly)
+          CONTROLLER_T *CtlP; Ptr to controller structure
+          int CtlNum; Controller number
+          ByteIO_t *AiopIOList; List of I/O addresses for each AIOP.
+             This list must be in the order the AIOPs will be found on the
+             controller.  Once an AIOP in the list is not found, it is
+             assumed that there are no more AIOPs on the controller.
+          int AiopIOListSize; Number of addresses in AiopIOList
+          int IRQNum; Interrupt Request number.  Can be any of the following:
+                         0: Disable global interrupts
+                         3: IRQ 3
+                         4: IRQ 4
+                         5: IRQ 5
+                         9: IRQ 9
+                         10: IRQ 10
+                         11: IRQ 11
+                         12: IRQ 12
+                         15: IRQ 15
+          Byte_t Frequency: A flag identifying the frequency
+                   of the periodic interrupt, can be any one of the following:
+                      FREQ_DIS - periodic interrupt disabled
+                      FREQ_137HZ - 137 Hertz
+                      FREQ_69HZ - 69 Hertz
+                      FREQ_34HZ - 34 Hertz
+                      FREQ_17HZ - 17 Hertz
+                      FREQ_9HZ - 9 Hertz
+                      FREQ_4HZ - 4 Hertz
+                   If IRQNum is set to 0 the Frequency parameter is
+                   overidden, it is forced to a value of FREQ_DIS.
+          int PeriodicOnly: 1 if all interrupts except the periodic
+                               interrupt are to be blocked.
+                            0 is both the periodic interrupt and
+                               other channel interrupts are allowed.
+                            If IRQNum is set to 0 the PeriodicOnly parameter is
+                               overidden, it is forced to a value of 0.
+Return:   int: Number of AIOPs on the controller, or CTLID_NULL if controller
+               initialization failed.
+
+Comments:
+          If periodic interrupts are to be disabled but AIOP interrupts
+          are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0.
+
+          If interrupts are to be completely disabled set IRQNum to 0.
+
+          Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an
+          invalid combination.
+
+          This function performs initialization of global interrupt modes,
+          but it does not actually enable global interrupts.  To enable
+          and disable global interrupts use functions sEnGlobalInt() and
+          sDisGlobalInt().  Enabling of global interrupts is normally not
+          done until all other initializations are complete.
+
+          Even if interrupts are globally enabled, they must also be
+          individually enabled for each channel that is to generate
+          interrupts.
+
+Warnings: No range checking on any of the parameters is done.
+
+          No context switches are allowed while executing this function.
+
+          After this function all AIOPs on the controller are disabled,
+          they can be enabled with sEnAiop().
+*/
+static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
+                             ByteIO_t * AiopIOList, int AiopIOListSize,
+                             WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
+                             int PeriodicOnly, int altChanRingIndicator,
+                             int UPCIRingInd)
+{
+       int i;
+       ByteIO_t io;
+
+       CtlP->AltChanRingIndicator = altChanRingIndicator;
+       CtlP->UPCIRingInd = UPCIRingInd;
+       CtlP->CtlNum = CtlNum;
+       CtlP->CtlID = CTLID_0001;       /* controller release 1 */
+       CtlP->BusType = isPCI;  /* controller release 1 */
+
+       if (ConfigIO) {
+               CtlP->isUPCI = 1;
+               CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL;
+               CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL;
+               CtlP->AiopIntrBits = upci_aiop_intr_bits;
+       } else {
+               CtlP->isUPCI = 0;
+               CtlP->PCIIO =
+                   (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC);
+               CtlP->AiopIntrBits = aiop_intr_bits;
+       }
+
+       sPCIControllerEOI(CtlP);        /* clear EOI if warm init */
+       /* Init AIOPs */
+       CtlP->NumAiop = 0;
+       for (i = 0; i < AiopIOListSize; i++) {
+               io = AiopIOList[i];
+               CtlP->AiopIO[i] = (WordIO_t) io;
+               CtlP->AiopIntChanIO[i] = io + _INT_CHAN;
+
+               CtlP->AiopID[i] = sReadAiopID(io);      /* read AIOP ID */
+               if (CtlP->AiopID[i] == AIOPID_NULL)     /* if AIOP does not exist */
+                       break;  /* done looking for AIOPs */
+
+               CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */
+               sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE);    /* clock prescaler */
+               sOutB(io + _INDX_DATA, sClockPrescale);
+               CtlP->NumAiop++;        /* bump count of AIOPs */
+       }
+
+       if (CtlP->NumAiop == 0)
+               return (-1);
+       else
+               return (CtlP->NumAiop);
+}
+
 /*
  *  Called when a PCI card is found.  Retrieves and stores model information,
  *  init's aiopic and serial port hardware.
@@ -2519,147 +2654,6 @@ static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO,
                return (CtlP->NumAiop);
 }
 
-#ifdef CONFIG_PCI
-/***************************************************************************
-Function: sPCIInitController
-Purpose:  Initialization of controller global registers and controller
-          structure.
-Call:     sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize,
-                          IRQNum,Frequency,PeriodicOnly)
-          CONTROLLER_T *CtlP; Ptr to controller structure
-          int CtlNum; Controller number
-          ByteIO_t *AiopIOList; List of I/O addresses for each AIOP.
-             This list must be in the order the AIOPs will be found on the
-             controller.  Once an AIOP in the list is not found, it is
-             assumed that there are no more AIOPs on the controller.
-          int AiopIOListSize; Number of addresses in AiopIOList
-          int IRQNum; Interrupt Request number.  Can be any of the following:
-                         0: Disable global interrupts
-                         3: IRQ 3
-                         4: IRQ 4
-                         5: IRQ 5
-                         9: IRQ 9
-                         10: IRQ 10
-                         11: IRQ 11
-                         12: IRQ 12
-                         15: IRQ 15
-          Byte_t Frequency: A flag identifying the frequency
-                   of the periodic interrupt, can be any one of the following:
-                      FREQ_DIS - periodic interrupt disabled
-                      FREQ_137HZ - 137 Hertz
-                      FREQ_69HZ - 69 Hertz
-                      FREQ_34HZ - 34 Hertz
-                      FREQ_17HZ - 17 Hertz
-                      FREQ_9HZ - 9 Hertz
-                      FREQ_4HZ - 4 Hertz
-                   If IRQNum is set to 0 the Frequency parameter is
-                   overidden, it is forced to a value of FREQ_DIS.
-          int PeriodicOnly: 1 if all interrupts except the periodic
-                               interrupt are to be blocked.
-                            0 is both the periodic interrupt and
-                               other channel interrupts are allowed.
-                            If IRQNum is set to 0 the PeriodicOnly parameter is
-                               overidden, it is forced to a value of 0.
-Return:   int: Number of AIOPs on the controller, or CTLID_NULL if controller
-               initialization failed.
-
-Comments:
-          If periodic interrupts are to be disabled but AIOP interrupts
-          are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0.
-
-          If interrupts are to be completely disabled set IRQNum to 0.
-
-          Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an
-          invalid combination.
-
-          This function performs initialization of global interrupt modes,
-          but it does not actually enable global interrupts.  To enable
-          and disable global interrupts use functions sEnGlobalInt() and
-          sDisGlobalInt().  Enabling of global interrupts is normally not
-          done until all other initializations are complete.
-
-          Even if interrupts are globally enabled, they must also be
-          individually enabled for each channel that is to generate
-          interrupts.
-
-Warnings: No range checking on any of the parameters is done.
-
-          No context switches are allowed while executing this function.
-
-          After this function all AIOPs on the controller are disabled,
-          they can be enabled with sEnAiop().
-*/
-static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
-                             ByteIO_t * AiopIOList, int AiopIOListSize,
-                             WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
-                             int PeriodicOnly, int altChanRingIndicator,
-                             int UPCIRingInd)
-{
-       int i;
-       ByteIO_t io;
-
-       CtlP->AltChanRingIndicator = altChanRingIndicator;
-       CtlP->UPCIRingInd = UPCIRingInd;
-       CtlP->CtlNum = CtlNum;
-       CtlP->CtlID = CTLID_0001;       /* controller release 1 */
-       CtlP->BusType = isPCI;  /* controller release 1 */
-
-       if (ConfigIO) {
-               CtlP->isUPCI = 1;
-               CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL;
-               CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL;
-               CtlP->AiopIntrBits = upci_aiop_intr_bits;
-       } else {
-               CtlP->isUPCI = 0;
-               CtlP->PCIIO =
-                   (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC);
-               CtlP->AiopIntrBits = aiop_intr_bits;
-       }
-
-       sPCIControllerEOI(CtlP);        /* clear EOI if warm init */
-       /* Init AIOPs */
-       CtlP->NumAiop = 0;
-       for (i = 0; i < AiopIOListSize; i++) {
-               io = AiopIOList[i];
-               CtlP->AiopIO[i] = (WordIO_t) io;
-               CtlP->AiopIntChanIO[i] = io + _INT_CHAN;
-
-               CtlP->AiopID[i] = sReadAiopID(io);      /* read AIOP ID */
-               if (CtlP->AiopID[i] == AIOPID_NULL)     /* if AIOP does not exist */
-                       break;  /* done looking for AIOPs */
-
-               CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */
-               sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE);    /* clock prescaler */
-               sOutB(io + _INDX_DATA, sClockPrescale);
-               CtlP->NumAiop++;        /* bump count of AIOPs */
-       }
-
-       if (CtlP->NumAiop == 0)
-               return (-1);
-       else
-               return (CtlP->NumAiop);
-}
-
-/*  Resets the speaker controller on RocketModem II and III devices */
-static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model)
-{
-       ByteIO_t addr;
-
-       /* RocketModem II speaker control is at the 8th port location of offset 0x40 */
-       if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) {
-               addr = CtlP->AiopIO[0] + 0x4F;
-               sOutB(addr, 0);
-       }
-
-       /* RocketModem III speaker control is at the 1st port location of offset 0x80 */
-       if ((model == MODEL_UPCI_RM3_8PORT)
-           || (model == MODEL_UPCI_RM3_4PORT)) {
-               addr = CtlP->AiopIO[0] + 0x88;
-               sOutB(addr, 0);
-       }
-}
-#endif
-
 /***************************************************************************
 Function: sReadAiopID
 Purpose:  Read the AIOP idenfication number directly from an AIOP.
index ef2e08e9b5901fdd9267a66896da10f829f931ac..5dc9c4bfa66e4686d5360ab33da61eff1109715a 100644 (file)
@@ -14,7 +14,6 @@
  * 2.4/2.5 port                 David McCullough
  */
 
-#include <asm/dbg.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/serial.h>
index 46528d57be72b710513a65ad2494ae839774981a..86c00b1c55836b3c52801c9d79bccef489c728d2 100644 (file)
@@ -2755,7 +2755,7 @@ static void __init serial8250_isa_init_ports(void)
        if (nr_uarts > UART_NR)
                nr_uarts = UART_NR;
 
-       for (i = 0; i < UART_NR; i++) {
+       for (i = 0; i < nr_uarts; i++) {
                struct uart_8250_port *up = &serial8250_ports[i];
                struct uart_port *port = &up->port;
 
@@ -2916,7 +2916,7 @@ static int __init serial8250_console_setup(struct console *co, char *options)
         * if so, search for the first available port that does have
         * console support.
         */
-       if (co->index >= UART_NR)
+       if (co->index >= nr_uarts)
                co->index = 0;
        port = &serial8250_ports[co->index].port;
        if (!port->iobase && !port->membase)
@@ -2957,7 +2957,7 @@ int serial8250_find_port(struct uart_port *p)
        int line;
        struct uart_port *port;
 
-       for (line = 0; line < UART_NR; line++) {
+       for (line = 0; line < nr_uarts; line++) {
                port = &serial8250_ports[line].port;
                if (uart_match_port(p, port))
                        return line;
@@ -3110,7 +3110,7 @@ static int serial8250_remove(struct platform_device *dev)
 {
        int i;
 
-       for (i = 0; i < UART_NR; i++) {
+       for (i = 0; i < nr_uarts; i++) {
                struct uart_8250_port *up = &serial8250_ports[i];
 
                if (up->port.dev == &dev->dev)
@@ -3178,7 +3178,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
        /*
         * First, find a port entry which matches.
         */
-       for (i = 0; i < UART_NR; i++)
+       for (i = 0; i < nr_uarts; i++)
                if (uart_match_port(&serial8250_ports[i].port, port))
                        return &serial8250_ports[i];
 
@@ -3187,7 +3187,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
         * free entry.  We look for one which hasn't been previously
         * used (indicated by zero iobase).
         */
-       for (i = 0; i < UART_NR; i++)
+       for (i = 0; i < nr_uarts; i++)
                if (serial8250_ports[i].port.type == PORT_UNKNOWN &&
                    serial8250_ports[i].port.iobase == 0)
                        return &serial8250_ports[i];
@@ -3196,7 +3196,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
         * That also failed.  Last resort is to find any entry which
         * doesn't have a real port associated with it.
         */
-       for (i = 0; i < UART_NR; i++)
+       for (i = 0; i < nr_uarts; i++)
                if (serial8250_ports[i].port.type == PORT_UNKNOWN)
                        return &serial8250_ports[i];
 
index beaa283f5cc6f75c24484b11beb73704f25a6ccd..d07b6af3a9379db82927677a5915d9bae01d7874 100644 (file)
@@ -338,7 +338,8 @@ static int dw8250_runtime_suspend(struct device *dev)
 {
        struct dw8250_data *data = dev_get_drvdata(dev);
 
-       clk_disable_unprepare(data->clk);
+       if (!IS_ERR(data->clk))
+               clk_disable_unprepare(data->clk);
 
        return 0;
 }
@@ -347,7 +348,8 @@ static int dw8250_runtime_resume(struct device *dev)
 {
        struct dw8250_data *data = dev_get_drvdata(dev);
 
-       clk_prepare_enable(data->clk);
+       if (!IS_ERR(data->clk))
+               clk_prepare_enable(data->clk);
 
        return 0;
 }
@@ -367,6 +369,7 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
 static const struct acpi_device_id dw8250_acpi_match[] = {
        { "INT33C4", 0 },
        { "INT33C5", 0 },
+       { "80860F0A", 0 },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
index 8ab70a6209199145f840b9d0cff0b255700f454c..e2774f9ecd59f16915e0647028e643e616b9c4cb 100644 (file)
@@ -332,7 +332,7 @@ static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *
                dmaengine_slave_config(chan, &rx_conf);
                uap->dmarx.chan = chan;
 
-               if (plat->dma_rx_poll_enable) {
+               if (plat && plat->dma_rx_poll_enable) {
                        /* Set poll rate if specified. */
                        if (plat->dma_rx_poll_rate) {
                                uap->dmarx.auto_poll_rate = false;
index 52a3ecd404219c7365850debe309d06b99a2d8c8..6fa2ae77fffde5a2de2fd14f8568da7fc55f8d1e 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/serial.h>
 #include <linux/serial_core.h>
 
-#include <bcm63xx_clk.h>
 #include <bcm63xx_irq.h>
 #include <bcm63xx_regs.h>
 #include <bcm63xx_io.h>
index 147c9e1935951dcca13a7df60b2d02e999663a30..8cdfbd365892146611b772bbfb217a8f48938c3a 100644 (file)
@@ -761,6 +761,8 @@ static int imx_startup(struct uart_port *port)
 
        temp = readl(sport->port.membase + UCR2);
        temp |= (UCR2_RXEN | UCR2_TXEN);
+       if (!sport->have_rtscts)
+               temp |= UCR2_IRTS;
        writel(temp, sport->port.membase + UCR2);
 
        if (USE_IRDA(sport)) {
index e956377a38fe53e299766724c278a05a35d802c4..65be0c00c4bff3f802549a23dfe34515a5a03b54 100644 (file)
@@ -707,8 +707,10 @@ static int __init mcf_init(void)
        if (rc)
                return rc;
        rc = platform_driver_register(&mcf_platform_driver);
-       if (rc)
+       if (rc) {
+               uart_unregister_driver(&mcf_driver);
                return rc;
+       }
        return 0;
 }
 
index 018bad922554fc45b9ad711bcab2a106fc683b6e..f51b280f3bf280b882130ec92b499be65937eed2 100644 (file)
@@ -1497,18 +1497,23 @@ mpc52xx_uart_init(void)
        if (psc_ops && psc_ops->fifoc_init) {
                ret = psc_ops->fifoc_init();
                if (ret)
-                       return ret;
+                       goto err_init;
        }
 
        ret = platform_driver_register(&mpc52xx_uart_of_driver);
        if (ret) {
                printk(KERN_ERR "%s: platform_driver_register failed (%i)\n",
                       __FILE__, ret);
-               uart_unregister_driver(&mpc52xx_uart_driver);
-               return ret;
+               goto err_reg;
        }
 
        return 0;
+err_reg:
+       if (psc_ops && psc_ops->fifoc_uninit)
+               psc_ops->fifoc_uninit();
+err_init:
+       uart_unregister_driver(&mpc52xx_uart_driver);
+       return ret;
 }
 
 static void __exit
index 77287c54f331682425403e045b0548c7f460590c..549c70a2a63e8397521b63fd6120e7dad9cfb079 100644 (file)
@@ -199,7 +199,7 @@ static void nwpserial_shutdown(struct uart_port *port)
        dcr_write(up->dcr_host, UART_IER, up->ier);
 
        /* free irq */
-       free_irq(up->port.irq, port);
+       free_irq(up->port.irq, up);
 }
 
 static int nwpserial_verify_port(struct uart_port *port,
index 30d4f7a783cd1f0bbfdf6ebaf491f2400153eaf0..f0b9f6b52b32cd887ad6b86586f97aa9eec9648c 100644 (file)
@@ -202,26 +202,6 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
        return pdata->get_context_loss_count(up->dev);
 }
 
-static void serial_omap_set_forceidle(struct uart_omap_port *up)
-{
-       struct omap_uart_port_info *pdata = up->dev->platform_data;
-
-       if (!pdata || !pdata->set_forceidle)
-               return;
-
-       pdata->set_forceidle(up->dev);
-}
-
-static void serial_omap_set_noidle(struct uart_omap_port *up)
-{
-       struct omap_uart_port_info *pdata = up->dev->platform_data;
-
-       if (!pdata || !pdata->set_noidle)
-               return;
-
-       pdata->set_noidle(up->dev);
-}
-
 static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
 {
        struct omap_uart_port_info *pdata = up->dev->platform_data;
@@ -298,8 +278,6 @@ static void serial_omap_stop_tx(struct uart_port *port)
                serial_out(up, UART_IER, up->ier);
        }
 
-       serial_omap_set_forceidle(up);
-
        pm_runtime_mark_last_busy(up->dev);
        pm_runtime_put_autosuspend(up->dev);
 }
@@ -364,7 +342,6 @@ static void serial_omap_start_tx(struct uart_port *port)
 
        pm_runtime_get_sync(up->dev);
        serial_omap_enable_ier_thri(up);
-       serial_omap_set_noidle(up);
        pm_runtime_mark_last_busy(up->dev);
        pm_runtime_put_autosuspend(up->dev);
 }
index 074b9194144fdefa1e09536a96626129624a64e0..0c8a9fa2be6cee12182bb8e32e0ed37be1c8fdab 100644 (file)
@@ -1166,6 +1166,18 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
                ourport->tx_irq = ret;
 
        ourport->clk    = clk_get(&platdev->dev, "uart");
+       if (IS_ERR(ourport->clk)) {
+               pr_err("%s: Controller clock not found\n",
+                               dev_name(&platdev->dev));
+               return PTR_ERR(ourport->clk);
+       }
+
+       ret = clk_prepare_enable(ourport->clk);
+       if (ret) {
+               pr_err("uart: clock failed to prepare+enable: %d\n", ret);
+               clk_put(ourport->clk);
+               return ret;
+       }
 
        /* Keep all interrupts masked and cleared */
        if (s3c24xx_serial_has_interrupt_mask(port)) {
@@ -1180,6 +1192,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
 
        /* reset the fifos (and setup the uart) */
        s3c24xx_serial_resetport(port, cfg);
+       clk_disable_unprepare(ourport->clk);
        return 0;
 }
 
@@ -1803,6 +1816,7 @@ static int __init s3c24xx_serial_modinit(void)
 
 static void __exit s3c24xx_serial_modexit(void)
 {
+       platform_driver_unregister(&samsung_serial_driver);
        uart_unregister_driver(&s3c24xx_uart_drv);
 }
 
index 6953dc82850cb278fd99da209e560021d4d0c1e8..a4fdce74f883e4f5868f3efb975ec51347ef2c47 100644 (file)
@@ -60,24 +60,22 @@ static void tty_audit_buf_put(struct tty_audit_buf *buf)
                tty_audit_buf_free(buf);
 }
 
-static void tty_audit_log(const char *description, struct task_struct *tsk,
-                         kuid_t loginuid, unsigned sessionid, int major,
-                         int minor, unsigned char *data, size_t size)
+static void tty_audit_log(const char *description, int major, int minor,
+                         unsigned char *data, size_t size)
 {
        struct audit_buffer *ab;
+       struct task_struct *tsk = current;
+       uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
+       uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
+       u32 sessionid = audit_get_sessionid(tsk);
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY);
        if (ab) {
                char name[sizeof(tsk->comm)];
-               kuid_t uid = task_uid(tsk);
-
-               audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u "
-                                "major=%d minor=%d comm=", description,
-                                tsk->pid,
-                                from_kuid(&init_user_ns, uid),
-                                from_kuid(&init_user_ns, loginuid),
-                                sessionid,
-                                major, minor);
+
+               audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
+                                " minor=%d comm=", description, tsk->pid, uid,
+                                loginuid, sessionid, major, minor);
                get_task_comm(name, tsk);
                audit_log_untrustedstring(ab, name);
                audit_log_format(ab, " data=");
@@ -90,11 +88,9 @@ static void tty_audit_log(const char *description, struct task_struct *tsk,
  *     tty_audit_buf_push      -       Push buffered data out
  *
  *     Generate an audit message from the contents of @buf, which is owned by
- *     @tsk with @loginuid.  @buf->mutex must be locked.
+ *     the current task.  @buf->mutex must be locked.
  */
-static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid,
-                              unsigned int sessionid,
-                              struct tty_audit_buf *buf)
+static void tty_audit_buf_push(struct tty_audit_buf *buf)
 {
        if (buf->valid == 0)
                return;
@@ -102,24 +98,10 @@ static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid,
                buf->valid = 0;
                return;
        }
-       tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor,
-                     buf->data, buf->valid);
+       tty_audit_log("tty", buf->major, buf->minor, buf->data, buf->valid);
        buf->valid = 0;
 }
 
-/**
- *     tty_audit_buf_push_current      -       Push buffered data out
- *
- *     Generate an audit message from the contents of @buf, which is owned by
- *     the current task.  @buf->mutex must be locked.
- */
-static void tty_audit_buf_push_current(struct tty_audit_buf *buf)
-{
-       kuid_t auid = audit_get_loginuid(current);
-       unsigned int sessionid = audit_get_sessionid(current);
-       tty_audit_buf_push(current, auid, sessionid, buf);
-}
-
 /**
  *     tty_audit_exit  -       Handle a task exit
  *
@@ -130,15 +112,13 @@ void tty_audit_exit(void)
 {
        struct tty_audit_buf *buf;
 
-       spin_lock_irq(&current->sighand->siglock);
        buf = current->signal->tty_audit_buf;
        current->signal->tty_audit_buf = NULL;
-       spin_unlock_irq(&current->sighand->siglock);
        if (!buf)
                return;
 
        mutex_lock(&buf->mutex);
-       tty_audit_buf_push_current(buf);
+       tty_audit_buf_push(buf);
        mutex_unlock(&buf->mutex);
 
        tty_audit_buf_put(buf);
@@ -151,9 +131,8 @@ void tty_audit_exit(void)
  */
 void tty_audit_fork(struct signal_struct *sig)
 {
-       spin_lock_irq(&current->sighand->siglock);
        sig->audit_tty = current->signal->audit_tty;
-       spin_unlock_irq(&current->sighand->siglock);
+       sig->audit_tty_log_passwd = current->signal->audit_tty_log_passwd;
 }
 
 /**
@@ -163,20 +142,21 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch)
 {
        struct tty_audit_buf *buf;
        int major, minor, should_audit;
+       unsigned long flags;
 
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irqsave(&current->sighand->siglock, flags);
        should_audit = current->signal->audit_tty;
        buf = current->signal->tty_audit_buf;
        if (buf)
                atomic_inc(&buf->count);
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
 
        major = tty->driver->major;
        minor = tty->driver->minor_start + tty->index;
        if (buf) {
                mutex_lock(&buf->mutex);
                if (buf->major == major && buf->minor == minor)
-                       tty_audit_buf_push_current(buf);
+                       tty_audit_buf_push(buf);
                mutex_unlock(&buf->mutex);
                tty_audit_buf_put(buf);
        }
@@ -187,24 +167,20 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch)
 
                auid = audit_get_loginuid(current);
                sessionid = audit_get_sessionid(current);
-               tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major,
-                             minor, &ch, 1);
+               tty_audit_log("ioctl=TIOCSTI", major, minor, &ch, 1);
        }
 }
 
 /**
- * tty_audit_push_task -       Flush task's pending audit data
- * @tsk:               task pointer
- * @loginuid:          sender login uid
- * @sessionid:         sender session id
+ * tty_audit_push_current -    Flush current's pending audit data
  *
- * Called with a ref on @tsk held. Try to lock sighand and get a
- * reference to the tty audit buffer if available.
+ * Try to lock sighand and get a reference to the tty audit buffer if available.
  * Flush the buffer or return an appropriate error code.
  */
-int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid)
+int tty_audit_push_current(void)
 {
        struct tty_audit_buf *buf = ERR_PTR(-EPERM);
+       struct task_struct *tsk = current;
        unsigned long flags;
 
        if (!lock_task_sighand(tsk, &flags))
@@ -225,7 +201,7 @@ int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid)
                return PTR_ERR(buf);
 
        mutex_lock(&buf->mutex);
-       tty_audit_buf_push(tsk, loginuid, sessionid, buf);
+       tty_audit_buf_push(buf);
        mutex_unlock(&buf->mutex);
 
        tty_audit_buf_put(buf);
@@ -243,10 +219,11 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
                unsigned icanon)
 {
        struct tty_audit_buf *buf, *buf2;
+       unsigned long flags;
 
        buf = NULL;
        buf2 = NULL;
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irqsave(&current->sighand->siglock, flags);
        if (likely(!current->signal->audit_tty))
                goto out;
        buf = current->signal->tty_audit_buf;
@@ -254,7 +231,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
                atomic_inc(&buf->count);
                goto out;
        }
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
 
        buf2 = tty_audit_buf_alloc(tty->driver->major,
                                   tty->driver->minor_start + tty->index,
@@ -264,7 +241,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
                return NULL;
        }
 
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irqsave(&current->sighand->siglock, flags);
        if (!current->signal->audit_tty)
                goto out;
        buf = current->signal->tty_audit_buf;
@@ -276,7 +253,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
        atomic_inc(&buf->count);
        /* Fall through */
  out:
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
        if (buf2)
                tty_audit_buf_free(buf2);
        return buf;
@@ -292,10 +269,18 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
 {
        struct tty_audit_buf *buf;
        int major, minor;
+       int audit_log_tty_passwd;
+       unsigned long flags;
 
        if (unlikely(size == 0))
                return;
 
+       spin_lock_irqsave(&current->sighand->siglock, flags);
+       audit_log_tty_passwd = current->signal->audit_tty_log_passwd;
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
+       if (!audit_log_tty_passwd && icanon && !L_ECHO(tty))
+               return;
+
        if (tty->driver->type == TTY_DRIVER_TYPE_PTY
            && tty->driver->subtype == PTY_TYPE_MASTER)
                return;
@@ -309,7 +294,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
        minor = tty->driver->minor_start + tty->index;
        if (buf->major != major || buf->minor != minor
            || buf->icanon != icanon) {
-               tty_audit_buf_push_current(buf);
+               tty_audit_buf_push(buf);
                buf->major = major;
                buf->minor = minor;
                buf->icanon = icanon;
@@ -325,7 +310,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
                data += run;
                size -= run;
                if (buf->valid == N_TTY_BUF_SIZE)
-                       tty_audit_buf_push_current(buf);
+                       tty_audit_buf_push(buf);
        } while (size != 0);
        mutex_unlock(&buf->mutex);
        tty_audit_buf_put(buf);
@@ -339,16 +324,17 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
 void tty_audit_push(struct tty_struct *tty)
 {
        struct tty_audit_buf *buf;
+       unsigned long flags;
 
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irqsave(&current->sighand->siglock, flags);
        if (likely(!current->signal->audit_tty)) {
-               spin_unlock_irq(&current->sighand->siglock);
+               spin_unlock_irqrestore(&current->sighand->siglock, flags);
                return;
        }
        buf = current->signal->tty_audit_buf;
        if (buf)
                atomic_inc(&buf->count);
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
 
        if (buf) {
                int major, minor;
@@ -357,7 +343,7 @@ void tty_audit_push(struct tty_struct *tty)
                minor = tty->driver->minor_start + tty->index;
                mutex_lock(&buf->mutex);
                if (buf->major == major && buf->minor == minor)
-                       tty_audit_buf_push_current(buf);
+                       tty_audit_buf_push(buf);
                mutex_unlock(&buf->mutex);
                tty_audit_buf_put(buf);
        }
index fbd447b390f775fa8182a290266d3d4f41f46e2c..740202d8a5c4b732ab9c98b188c995d3a0f78587 100644 (file)
@@ -779,7 +779,6 @@ int vc_allocate(unsigned int currcons)      /* return 0 on success */
                con_set_default_unimap(vc);
            vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
            if (!vc->vc_screenbuf) {
-               tty_port_destroy(&vc->port);
                kfree(vc);
                vc_cons[currcons].d = NULL;
                return -ENOMEM;
@@ -986,26 +985,25 @@ static int vt_resize(struct tty_struct *tty, struct winsize *ws)
        return ret;
 }
 
-void vc_deallocate(unsigned int currcons)
+struct vc_data *vc_deallocate(unsigned int currcons)
 {
+       struct vc_data *vc = NULL;
+
        WARN_CONSOLE_UNLOCKED();
 
        if (vc_cons_allocated(currcons)) {
-               struct vc_data *vc = vc_cons[currcons].d;
-               struct vt_notifier_param param = { .vc = vc };
+               struct vt_notifier_param param;
 
+               param.vc = vc = vc_cons[currcons].d;
                atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, &param);
                vcs_remove_sysfs(currcons);
                vc->vc_sw->con_deinit(vc);
                put_pid(vc->vt_pid);
                module_put(vc->vc_sw->owner);
                kfree(vc->vc_screenbuf);
-               if (currcons >= MIN_NR_CONSOLES) {
-                       tty_port_destroy(&vc->port);
-                       kfree(vc);
-               }
                vc_cons[currcons].d = NULL;
        }
+       return vc;
 }
 
 /*
index 98ff1735eafc0841428a2e2fa2257909073c775e..fc2c06c66e89d5e7536c5bb5387bc0fd9a837086 100644 (file)
@@ -283,6 +283,51 @@ do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_
        return 0;
 }
 
+/* deallocate a single console, if possible (leave 0) */
+static int vt_disallocate(unsigned int vc_num)
+{
+       struct vc_data *vc = NULL;
+       int ret = 0;
+
+       if (!vc_num)
+               return 0;
+
+       console_lock();
+       if (VT_BUSY(vc_num))
+               ret = -EBUSY;
+       else
+               vc = vc_deallocate(vc_num);
+       console_unlock();
+
+       if (vc && vc_num >= MIN_NR_CONSOLES) {
+               tty_port_destroy(&vc->port);
+               kfree(vc);
+       }
+
+       return ret;
+}
+
+/* deallocate all unused consoles, but leave 0 */
+static void vt_disallocate_all(void)
+{
+       struct vc_data *vc[MAX_NR_CONSOLES];
+       int i;
+
+       console_lock();
+       for (i = 1; i < MAX_NR_CONSOLES; i++)
+               if (!VT_BUSY(i))
+                       vc[i] = vc_deallocate(i);
+               else
+                       vc[i] = NULL;
+       console_unlock();
+
+       for (i = 1; i < MAX_NR_CONSOLES; i++) {
+               if (vc[i] && i >= MIN_NR_CONSOLES) {
+                       tty_port_destroy(&vc[i]->port);
+                       kfree(vc[i]);
+               }
+       }
+}
 
 
 /*
@@ -769,24 +814,10 @@ int vt_ioctl(struct tty_struct *tty,
                        ret = -ENXIO;
                        break;
                }
-               if (arg == 0) {
-                   /* deallocate all unused consoles, but leave 0 */
-                       console_lock();
-                       for (i=1; i<MAX_NR_CONSOLES; i++)
-                               if (! VT_BUSY(i))
-                                       vc_deallocate(i);
-                       console_unlock();
-               } else {
-                       /* deallocate a single console, if possible */
-                       arg--;
-                       if (VT_BUSY(arg))
-                               ret = -EBUSY;
-                       else if (arg) {                       /* leave 0 */
-                               console_lock();
-                               vc_deallocate(arg);
-                               console_unlock();
-                       }
-               }
+               if (arg == 0)
+                       vt_disallocate_all();
+               else
+                       ret = vt_disallocate(--arg);
                break;
 
        case VT_RESIZE:
index e92eeaf251fe33c8c12f693b54db8b75258643a2..5295be0342c1acc88f5698bbdc29a16268ad551a 100644 (file)
@@ -45,6 +45,7 @@ config UIO_PDRV_GENIRQ
 
 config UIO_DMEM_GENIRQ
        tristate "Userspace platform driver with generic irq and dynamic memory"
+       depends on HAS_DMA
        help
          Platform driver for Userspace I/O devices, including generic
          interrupt handling code. Shared interrupts are not supported.
index b7eb86ad6bf2c2538e1a5d08d358c9b29f432e76..8a7eb77233b4a7b275e042dfeb965be2f45393f8 100644 (file)
@@ -686,7 +686,8 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
 {
        int ret, len;
        __le32 *buf;
-       int offb, offd;
+       int offb;
+       unsigned int offd;
        const int stride = CMD_PACKET_SIZE / (4 * 2) - 1;
        int buflen =  ((size - 1) / stride + 1 + size * 2) * 4;
 
index 608a2aeb400c929726c4a9581dfad1e09eeed179..b2df442eb3e5b9537772569cfd321e32e5ef2ce5 100644 (file)
@@ -20,7 +20,7 @@ config USB_CHIPIDEA_UDC
 config USB_CHIPIDEA_HOST
        bool "ChipIdea host controller"
        depends on USB=y || USB=USB_CHIPIDEA
-       depends on USB_EHCI_HCD
+       depends on USB_EHCI_HCD=y
        select USB_EHCI_ROOT_HUB_TT
        help
          Say Y here to enable host controller functionality of the
index 8faec9dbbb84326d436ffc568451b322eb95a1a3..73f9d5f15adb6706b30a48b4c34963027d2565a0 100644 (file)
@@ -173,17 +173,10 @@ static int ci13xxx_imx_probe(struct platform_device *pdev)
 
        ci13xxx_imx_platdata.phy = data->phy;
 
-       if (!pdev->dev.dma_mask) {
-               pdev->dev.dma_mask = devm_kzalloc(&pdev->dev,
-                                     sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
-               if (!pdev->dev.dma_mask) {
-                       ret = -ENOMEM;
-                       dev_err(&pdev->dev, "Failed to alloc dma_mask!\n");
-                       goto err;
-               }
-               *pdev->dev.dma_mask = DMA_BIT_MASK(32);
-               dma_set_coherent_mask(&pdev->dev, *pdev->dev.dma_mask);
-       }
+       if (!pdev->dev.dma_mask)
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        if (usbmisc_ops && usbmisc_ops->init) {
                ret = usbmisc_ops->init(&pdev->dev);
index 450107e5f657a32a3b7d5b0fd1e310a99e77ec66..49b098bedf9b732ae5c99fdf3020903c24b261e4 100644 (file)
@@ -370,11 +370,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "missing resource\n");
-               return -ENODEV;
-       }
-
        base = devm_ioremap_resource(dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
index 8772b3659296b90b2c009774fd3bba8c0498236e..db535b0aa172c85b3787a476b8fe3adeefbe3e6f 100644 (file)
@@ -51,7 +51,7 @@ config USB_DYNAMIC_MINORS
 
 config USB_OTG
        bool "OTG support"
-       depends on USB_SUSPEND
+       depends on PM_RUNTIME
        default n
        help
          The most notable feature of USB OTG is support for a
index caefc800f2986defa9f66973d071fc7140d632d4..c88c4fb9459dfbc94119e8a0590a924fa5868f0c 100644 (file)
@@ -1287,9 +1287,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
                        goto error;
                }
                for (totlen = u = 0; u < uurb->number_of_packets; u++) {
-                       /* arbitrary limit,
-                        * sufficient for USB 2.0 high-bandwidth iso */
-                       if (isopkt[u].length > 8192) {
+                       /*
+                        * arbitrary limit need for USB 3.0
+                        * bMaxBurst (0~15 allowed, 1~16 packets)
+                        * bmAttributes (bit 1:0, mult 0~2, 1~3 packets)
+                        * sizemax: 1024 * 16 * 3 = 49152
+                        */
+                       if (isopkt[u].length > 49152) {
                                ret = -EINVAL;
                                goto error;
                        }
index ab5638d9c707c0524eb37a58fc1ce3e95d1a7ae9..a63598895077ff8c984c0ed473d5ee90afa52086 100644 (file)
@@ -88,6 +88,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Edirol SD-20 */
        { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Alcor Micro Corp. Hub */
+       { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* appletouch */
        { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
index ea5ee9c21c3514cc7a0fe87ff2f6d15bcc206eb2..757aa18027d05ca0eb198d97d92f1710e8e58def 100644 (file)
@@ -19,21 +19,21 @@ choice
 
 config USB_DWC3_HOST
        bool "Host only mode"
-       depends on USB
+       depends on USB=y || USB=USB_DWC3
        help
          Select this when you want to use DWC3 in host mode only,
          thereby the gadget feature will be regressed.
 
 config USB_DWC3_GADGET
        bool "Gadget only mode"
-       depends on USB_GADGET
+       depends on USB_GADGET=y || USB_GADGET=USB_DWC3
        help
          Select this when you want to use DWC3 in gadget mode only,
          thereby the host feature will be regressed.
 
 config USB_DWC3_DUAL_ROLE
        bool "Dual Role mode"
-       depends on (USB && USB_GADGET)
+       depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
        help
          This is the default mode of working of DWC3 controller where
          both host and gadget features are enabled.
index a8afe6e2662186e50e45cf578d967c901c9a7456..8ce9d7fd6cfc30051462373d3c8d1f2b681dfcb3 100644 (file)
@@ -95,8 +95,6 @@ static int dwc3_exynos_remove_child(struct device *dev, void *unused)
        return 0;
 }
 
-static u64 dwc3_exynos_dma_mask = DMA_BIT_MASK(32);
-
 static int dwc3_exynos_probe(struct platform_device *pdev)
 {
        struct dwc3_exynos      *exynos;
@@ -118,7 +116,9 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
         * Once we move to full device tree support this will vanish off.
         */
        if (!dev->dma_mask)
-               dev->dma_mask = &dwc3_exynos_dma_mask;
+               dev->dma_mask = &dev->coherent_dma_mask;
+       if (!dev->coherent_dma_mask)
+               dev->coherent_dma_mask = DMA_BIT_MASK(32);
 
        platform_set_drvdata(pdev, exynos);
 
@@ -164,9 +164,9 @@ static int dwc3_exynos_remove(struct platform_device *pdev)
 {
        struct dwc3_exynos      *exynos = platform_get_drvdata(pdev);
 
+       device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
        platform_device_unregister(exynos->usb2_phy);
        platform_device_unregister(exynos->usb3_phy);
-       device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
 
        clk_disable_unprepare(exynos->clk);
 
index 227d4a7acad7595d4774e9fc335a790c742087be..eba9e2baf32b3d23a78977342286a2a8be5506aa 100644 (file)
@@ -196,9 +196,9 @@ static void dwc3_pci_remove(struct pci_dev *pci)
 {
        struct dwc3_pci *glue = pci_get_drvdata(pci);
 
+       platform_device_unregister(glue->dwc3);
        platform_device_unregister(glue->usb2_phy);
        platform_device_unregister(glue->usb3_phy);
-       platform_device_unregister(glue->dwc3);
        pci_set_drvdata(pci, NULL);
        pci_disable_device(pci);
 }
index 2b6e7e0012071259464c1db3acf6c54b55e5971e..b5e5b35df49c8ff49ca5817ced0838e79dad9ccf 100644 (file)
@@ -1706,11 +1706,19 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
                dep = dwc->eps[epnum];
                if (!dep)
                        continue;
-
-               dwc3_free_trb_pool(dep);
-
-               if (epnum != 0 && epnum != 1)
+               /*
+                * Physical endpoints 0 and 1 are special; they form the
+                * bi-directional USB endpoint 0.
+                *
+                * For those two physical endpoints, we don't allocate a TRB
+                * pool nor do we add them the endpoints list. Due to that, we
+                * shouldn't do these two operations otherwise we would end up
+                * with all sorts of bugs when removing dwc3.ko.
+                */
+               if (epnum != 0 && epnum != 1) {
+                       dwc3_free_trb_pool(dep);
                        list_del(&dep->endpoint.ep_list);
+               }
 
                kfree(dep);
        }
index 83300d94a8933ff97a52992466799874cdeacb20..f41aa0d0c414312163bf2076935fb96adccd7036 100644 (file)
@@ -146,7 +146,6 @@ config USB_LPC32XX
        depends on ARCH_LPC32XX
        depends on USB_PHY
        select USB_ISP1301
-       select USB_OTG_UTILS
        help
           This option selects the USB device controller in the LPC32xx SoC.
 
index f2a970f75bfa949b2c28db72c183e00790fea280..5a5128a226f737d2ec74915cde9f23d5d74e0470 100644 (file)
@@ -1992,8 +1992,6 @@ static int __init usba_udc_probe(struct platform_device *pdev)
 err_get_hclk:
        clk_put(pclk);
 
-       platform_set_drvdata(pdev, NULL);
-
        return ret;
 }
 
index 6e6518264c42253b810bb41d93fbcd46da7a4906..fd24cb4540a49d6b0382b932ff99eae2dc9c309f 100644 (file)
@@ -2334,21 +2334,11 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "error finding USBD resource\n");
-               return -ENXIO;
-       }
-
        udc->usbd_regs = devm_ioremap_resource(dev, res);
        if (IS_ERR(udc->usbd_regs))
                return PTR_ERR(udc->usbd_regs);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res) {
-               dev_err(dev, "error finding IUDMA resource\n");
-               return -ENXIO;
-       }
-
        udc->iudma_regs = devm_ioremap_resource(dev, res);
        if (IS_ERR(udc->iudma_regs))
                return PTR_ERR(udc->iudma_regs);
@@ -2420,7 +2410,6 @@ static int bcm63xx_udc_remove(struct platform_device *pdev)
        usb_del_gadget_udc(&udc->gadget);
        BUG_ON(udc->driver);
 
-       platform_set_drvdata(pdev, NULL);
        bcm63xx_uninit_udc_hw(udc);
 
        return 0;
index 3d5cfc9c2c78a51950701b68bb94bfbb25f2f25c..80e7f75a56c7efa43b33a752671eb8c263bb1bf9 100644 (file)
@@ -821,8 +821,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
                gi->gstrings[i] = NULL;
                s = usb_gstrings_attach(&gi->cdev, gi->gstrings,
                                USB_GADGET_FIRST_AVAIL_IDX);
-               if (IS_ERR(s))
+               if (IS_ERR(s)) {
+                       ret = PTR_ERR(s);
                        goto err_comp_cleanup;
+               }
 
                gi->cdev.desc.iManufacturer = s[USB_GADGET_MANUFACTURER_IDX].id;
                gi->cdev.desc.iProduct = s[USB_GADGET_PRODUCT_IDX].id;
@@ -847,8 +849,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
                        }
                        cfg->gstrings[i] = NULL;
                        s = usb_gstrings_attach(&gi->cdev, cfg->gstrings, 1);
-                       if (IS_ERR(s))
+                       if (IS_ERR(s)) {
+                               ret = PTR_ERR(s);
                                goto err_comp_cleanup;
+                       }
                        c->iConfiguration = s[0].id;
                }
 
index a792e322f4f1b84c58e20538b48d1e7beea4082f..c588e8e486e590843ed2714403f282a7b3748a68 100644 (file)
@@ -1001,7 +1001,6 @@ static int dummy_udc_remove(struct platform_device *pdev)
        struct dummy    *dum = platform_get_drvdata(pdev);
 
        usb_del_gadget_udc(&dum->gadget);
-       platform_set_drvdata(pdev, NULL);
        device_remove_file(&dum->gadget.dev, &dev_attr_function);
        return 0;
 }
@@ -2661,8 +2660,10 @@ static int __init init(void)
        }
        for (i = 0; i < mod_data.num; i++) {
                dum[i] = kzalloc(sizeof(struct dummy), GFP_KERNEL);
-               if (!dum[i])
+               if (!dum[i]) {
+                       retval = -ENOMEM;
                        goto err_add_pdata;
+               }
                retval = platform_device_add_data(the_hcd_pdev[i], &dum[i],
                                sizeof(void *));
                if (retval)
index d893d69290794efa9c8d1e064fe1abd62077ff6a..abf8a31ae146028a89130835d600b4a7dfa00486 100644 (file)
@@ -816,6 +816,7 @@ ecm_unbind(struct usb_configuration *c, struct usb_function *f)
  * @c: the configuration to support the network link
  * @ethaddr: a buffer in which the ethernet address of the host side
  *     side of the link was recorded
+ * @dev: eth_dev structure
  * Context: single threaded during gadget setup
  *
  * Returns zero on success, else negative errno.
index 185d6f5e4e4d15a933aeaa5b5e8ab0edc2c642b5..7be04b3424941f73c96d7a6de51d53101efe0f92 100644 (file)
@@ -373,6 +373,7 @@ geth_unbind(struct usb_configuration *c, struct usb_function *f)
  * @c: the configuration to support the network link
  * @ethaddr: a buffer in which the ethernet address of the host side
  *     side of the link was recorded
+ * @dev: eth_dev structure
  * Context: single threaded during gadget setup
  *
  * Returns zero on success, else negative errno.
index c7468b6c07b0057041f63328da8cf075f6ada80b..03c1fb686644e02d34b5b0f4a9c8e847d6a6de27 100644 (file)
@@ -456,8 +456,6 @@ static int snd_uac2_remove(struct platform_device *pdev)
 {
        struct snd_card *card = platform_get_drvdata(pdev);
 
-       platform_set_drvdata(pdev, NULL);
-
        if (card)
                return snd_card_free(card);
 
index cec8871b77f9d9acd91b2c845175b6f9150e8571..b8632d40f8bffcd3f653a99c12f6d6b02a8cd11d 100644 (file)
@@ -1461,8 +1461,10 @@ static int __init fusb300_probe(struct platform_device *pdev)
 
        fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep,
                                GFP_KERNEL);
-       if (fusb300->ep0_req == NULL)
+       if (fusb300->ep0_req == NULL) {
+               ret = -ENOMEM;
                goto clean_up3;
+       }
 
        init_controller(fusb300);
        ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget);
index b5cebd6b0d7af170128ede0f90754052b694aeb2..9b2d24e4c95f0c27f3e98c0d2dbf3aa6bfad4b83 100644 (file)
@@ -1511,8 +1511,6 @@ static int __exit imx_udc_remove(struct platform_device *pdev)
        if (pdata->exit)
                pdata->exit(&pdev->dev);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 866ef09992478e18c114592305297efe640d225f..51cfe72da5bb88d6fb0a21a1d1d2753fa2e6c115 100644 (file)
@@ -1660,8 +1660,10 @@ static int __init m66592_probe(struct platform_device *pdev)
        m66592->epaddr2ep[0] = &m66592->ep[0];
 
        m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL);
-       if (m66592->ep0_req == NULL)
+       if (m66592->ep0_req == NULL) {
+               ret = -ENOMEM;
                goto clean_up3;
+       }
        m66592->ep0_req->complete = nop_completion;
 
        init_controller(m66592);
index ef47495dec8f5e638e97ee4171b0a54b22dc2ba4..95c531d5aa4fa2dd9fcf4d038c5426254cb16629 100644 (file)
@@ -2236,7 +2236,6 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev)
                dev->transceiver = NULL;
        }
 
-       platform_set_drvdata(pdev, NULL);
        the_controller = NULL;
        return 0;
 }
index 0b742d171843fc8579691930791d0926a7f3d958..7ff7d9cf2061d28ebcd2b4a2935678b0f1460144 100644 (file)
@@ -1977,8 +1977,10 @@ static int __init r8a66597_probe(struct platform_device *pdev)
 
        r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
                                                        GFP_KERNEL);
-       if (r8a66597->ep0_req == NULL)
+       if (r8a66597->ep0_req == NULL) {
+               ret = -ENOMEM;
                goto clean_up3;
+       }
        r8a66597->ep0_req->complete = nop_completion;
 
        ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget);
index a3cdc32115d52c646f2ae2b89ded1fcf378a9d70..af22f24046b239dcc9dda8553c17377326c62ded 100644 (file)
@@ -437,7 +437,7 @@ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
        if (hs_req->req.length == 0)
                return;
 
-       usb_gadget_unmap_request(&hsotg->gadget, hs_req, hs_ep->dir_in);
+       usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
 }
 
 /**
index d0e75e1b3ccb9323de0a29622586894710fd2679..09c4f70c93c4cb13639cf89ed7d423c2231df621 100644 (file)
@@ -1851,6 +1851,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
                irq = gpio_to_irq(udc_info->vbus_pin);
                if (irq < 0) {
                        dev_err(dev, "no irq for gpio vbus pin\n");
+                       retval = irq;
                        goto err_gpio_claim;
                }
 
@@ -1948,8 +1949,6 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
        iounmap(base_addr);
        release_mem_region(rsrc_start, rsrc_len);
 
-       platform_set_drvdata(pdev, NULL);
-
        if (!IS_ERR(udc_clock) && udc_clock != NULL) {
                clk_disable(udc_clock);
                clk_put(udc_clock);
index 2cd6262e8b7149a23d8458e76e5ef6ef3b919f1c..0deb9d6cde26245fd1c52c0b541eecd100aedce6 100644 (file)
@@ -284,12 +284,16 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
        ss_opts->bulk_buflen = gzero_options.bulk_buflen;
 
        func_ss = usb_get_function(func_inst_ss);
-       if (IS_ERR(func_ss))
+       if (IS_ERR(func_ss)) {
+               status = PTR_ERR(func_ss);
                goto err_put_func_inst_ss;
+       }
 
        func_inst_lb = usb_get_function_instance("Loopback");
-       if (IS_ERR(func_inst_lb))
+       if (IS_ERR(func_inst_lb)) {
+               status = PTR_ERR(func_inst_lb);
                goto err_put_func_ss;
+       }
 
        lb_opts = container_of(func_inst_lb, struct f_lb_opts, func_inst);
        lb_opts->bulk_buflen = gzero_options.bulk_buflen;
index de94f2699063b12dd3c0cd1613a9cd2e37cd00c6..344d5e2f87d73a7234452a2b698f63b4959494e5 100644 (file)
@@ -507,7 +507,7 @@ endif # USB_OHCI_HCD
 
 config USB_UHCI_HCD
        tristate "UHCI HCD (most Intel and VIA) support"
-       depends on PCI || SPARC_LEON || ARCH_VT8500
+       depends on PCI || USB_UHCI_SUPPORT_NON_PCI_HC
        ---help---
          The Universal Host Controller Interface is a standard by Intel for
          accessing the USB hardware in the PC (which is also called the USB
@@ -524,26 +524,19 @@ config USB_UHCI_HCD
 
 config USB_UHCI_SUPPORT_NON_PCI_HC
        bool
-       depends on USB_UHCI_HCD
-       default y if (SPARC_LEON || ARCH_VT8500)
+       default y if (SPARC_LEON || USB_UHCI_PLATFORM)
 
 config USB_UHCI_PLATFORM
-       bool "Generic UHCI Platform Driver support"
-       depends on USB_UHCI_SUPPORT_NON_PCI_HC
+       bool
        default y if ARCH_VT8500
-       ---help---
-         Enable support for generic UHCI platform devices that require no
-         additional configuration.
 
 config USB_UHCI_BIG_ENDIAN_MMIO
        bool
-       depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON
-       default y
+       default y if SPARC_LEON
 
 config USB_UHCI_BIG_ENDIAN_DESC
        bool
-       depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON
-       default y
+       default y if SPARC_LEON
 
 config USB_FHCI_HCD
        tristate "Freescale QE USB Host Controller support"
index 66420097c24234eb3db9d09a656345cd80edcc9f..02f4611faa62c571a3a59ff4b33a35ead1966caf 100644 (file)
@@ -63,8 +63,6 @@ static void atmel_stop_ehci(struct platform_device *pdev)
 
 /*-------------------------------------------------------------------------*/
 
-static u64 at91_ehci_dma_mask = DMA_BIT_MASK(32);
-
 static int ehci_atmel_drv_probe(struct platform_device *pdev)
 {
        struct usb_hcd *hcd;
@@ -93,7 +91,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &at91_ehci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
        if (!hcd) {
index 312fc10da3c7aade0225cd126f28e372f1906058..246e124e6ac55c3dc66ba822a9b3f8ba5bebdd6a 100644 (file)
@@ -1286,23 +1286,6 @@ MODULE_LICENSE ("GPL");
 #define        PLATFORM_DRIVER         ehci_hcd_sead3_driver
 #endif
 
-#if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \
-       !IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_MXC) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_HCD_OMAP) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_HCD_ORION) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_HCD_SPEAR) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_S5P) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_HCD_AT91) && \
-       !IS_ENABLED(CONFIG_USB_EHCI_MSM) && \
-       !defined(PLATFORM_DRIVER) && \
-       !defined(PS3_SYSTEM_BUS_DRIVER) && \
-       !defined(OF_PLATFORM_DRIVER) && \
-       !defined(XILINX_OF_PLATFORM_DRIVER)
-#error "missing bus glue for ehci-hcd"
-#endif
-
 static int __init ehci_hcd_init(void)
 {
        int retval = 0;
index 3d1491b5f3605e68b0a72e1d5786233f5ad94d7b..16d7150e855722be6bc143c504d31120adf53f83 100644 (file)
@@ -90,8 +90,6 @@ static const struct ehci_driver_overrides ehci_omap_overrides __initdata = {
        .extra_priv_size = sizeof(struct omap_hcd),
 };
 
-static u64 omap_ehci_dma_mask = DMA_BIT_MASK(32);
-
 /**
  * ehci_hcd_omap_probe - initialize TI-based HCDs
  *
@@ -146,8 +144,10 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &omap_ehci_dma_mask;
+       if (!dev->dma_mask)
+               dev->dma_mask = &dev->coherent_dma_mask;
+       if (!dev->coherent_dma_mask)
+               dev->coherent_dma_mask = DMA_BIT_MASK(32);
 
        hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
                        dev_name(dev));
index 54c57948515051867df3dc8d218699ba6806e5ff..efbc588b48c50acbc2225c6be3f63ce1b1f35ffd 100644 (file)
@@ -137,8 +137,6 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
        }
 }
 
-static u64 ehci_orion_dma_mask = DMA_BIT_MASK(32);
-
 static int ehci_orion_drv_probe(struct platform_device *pdev)
 {
        struct orion_ehci_data *pd = pdev->dev.platform_data;
@@ -183,7 +181,9 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
         * now. Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &ehci_orion_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        if (!request_mem_region(res->start, resource_size(res),
                                ehci_orion_hc_driver.description)) {
index 635775278c7fec525a1a3566fce05bb77f3ad77e..379037f51a2fc41b7e7aaaf112a759ce70615ac5 100644 (file)
@@ -71,8 +71,6 @@ static void s5p_setup_vbus_gpio(struct platform_device *pdev)
                dev_err(dev, "can't request ehci vbus gpio %d", gpio);
 }
 
-static u64 ehci_s5p_dma_mask = DMA_BIT_MASK(32);
-
 static int s5p_ehci_probe(struct platform_device *pdev)
 {
        struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
@@ -90,7 +88,7 @@ static int s5p_ehci_probe(struct platform_device *pdev)
         * Once we move to full device tree support this will vanish off.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &ehci_s5p_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
        if (!pdev->dev.coherent_dma_mask)
                pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
@@ -107,6 +105,7 @@ static int s5p_ehci_probe(struct platform_device *pdev)
        if (IS_ERR(phy)) {
                /* Fallback to pdata */
                if (!pdata) {
+                       usb_put_hcd(hcd);
                        dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
                        return -EPROBE_DEFER;
                } else {
index acff5b8f6e89dd89edf37d30377c2b1b86307c68..f80d0330d548d36564e9332d60392c9671360263 100644 (file)
@@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
 }
 
 static const unsigned char
-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
+max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
 
 /* carryover low/fullspeed bandwidth that crosses uframe boundries */
 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
@@ -646,6 +646,10 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
        /* reschedule QH iff another request is queued */
        if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
                rc = qh_schedule(ehci, qh);
+               if (rc == 0) {
+                       qh_refresh(ehci, qh);
+                       qh_link_periodic(ehci, qh);
+               }
 
                /* An error here likely indicates handshake failure
                 * or no space left in the schedule.  Neither fault
@@ -653,9 +657,10 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
                 *
                 * FIXME kill the now-dysfunctional queued urbs
                 */
-               if (rc != 0)
+               else {
                        ehci_err(ehci, "can't reschedule qh %p, err %d\n",
                                        qh, rc);
+               }
        }
 
        /* maybe turn off periodic schedule */
index 61ecfb3d52f5d102f78d058fa5c032d37e0e7742..bd3e5cbc6240316dc5947f6f38dbc6a6c7492c51 100644 (file)
@@ -58,8 +58,6 @@ static int ehci_spear_drv_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend,
                ehci_spear_drv_resume);
 
-static u64 spear_ehci_dma_mask = DMA_BIT_MASK(32);
-
 static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
 {
        struct usb_hcd *hcd ;
@@ -84,7 +82,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &spear_ehci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        usbh_clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(usbh_clk)) {
index e3eddc31ac83fd4f2065a5815967700fa6932ef7..59d111bf44a9d961b45fc85b99d659a8a67230b4 100644 (file)
@@ -637,8 +637,6 @@ static void tegra_ehci_set_phcd(struct usb_phy *x, bool enable)
        writel(val, base + TEGRA_USB_PORTSC1);
 }
 
-static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
-
 static int tegra_ehci_probe(struct platform_device *pdev)
 {
        struct resource *res;
@@ -661,7 +659,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &tegra_ehci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        setup_vbus_gpio(pdev, pdata);
 
index 125e261f5bfca15be3d658f4cd976434c58b4912..2facee53eab16103af0a3abc0d67712f8ecc991f 100644 (file)
@@ -1739,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
        int retval = 1;
        unsigned long flags;
 
-       /* if !USB_SUSPEND, root hub timers won't get shut down ... */
+       /* if !PM_RUNTIME, root hub timers won't get shut down ... */
        if (!HC_IS_RUNNING(hcd->state))
                return 0;
 
index bbb791bd7617d6db99e064713ea95c527a83bfbf..a13709ee4e5d933bf4514df16296d66c7700ef17 100644 (file)
@@ -373,8 +373,10 @@ static int isp1760_plat_probe(struct platform_device *pdev)
        irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (!irq_res) {
                pr_warning("isp1760: IRQ resource not available\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto cleanup;
        }
+
        irqflags |= irq_res->flags & IRQF_TRIGGER_MASK;
 
        if (priv) {
index a0cb44f0e72420ad2c777f09818229a2a3f7c3c6..2ee1496dbc1d37c1f778d847712f78437abe21a4 100644 (file)
@@ -504,8 +504,6 @@ static const struct of_device_id at91_ohci_dt_ids[] = {
 
 MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
 
-static u64 at91_ohci_dma_mask = DMA_BIT_MASK(32);
-
 static int ohci_at91_of_init(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
@@ -522,7 +520,9 @@ static int ohci_at91_of_init(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &at91_ohci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
index 07592c00af26ba166ad75280296c5364b19af9f2..b0b542c14e3132a9f35eceb86ef1255b676e13ca 100644 (file)
@@ -98,8 +98,6 @@ static const struct hc_driver exynos_ohci_hc_driver = {
        .start_port_reset       = ohci_start_port_reset,
 };
 
-static u64 ohci_exynos_dma_mask = DMA_BIT_MASK(32);
-
 static int exynos_ohci_probe(struct platform_device *pdev)
 {
        struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
@@ -117,7 +115,7 @@ static int exynos_ohci_probe(struct platform_device *pdev)
         * Once we move to full device tree support this will vanish off.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &ohci_exynos_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
        if (!pdev->dev.coherent_dma_mask)
                pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
index 9e6de9586ae4bf82afb01f4c70464ffbbcd01117..fc627fd5411670369d89ba4aec7428b55e2fdef0 100644 (file)
@@ -233,14 +233,14 @@ static int ohci_urb_enqueue (
                        urb->start_frame = frame;
                }
        } else if (ed->type == PIPE_ISOCHRONOUS) {
-               u16     next = ohci_frame_no(ohci) + 2;
+               u16     next = ohci_frame_no(ohci) + 1;
                u16     frame = ed->last_iso + ed->interval;
 
                /* Behind the scheduling threshold? */
                if (unlikely(tick_before(frame, next))) {
 
                        /* USB_ISO_ASAP: Round up to the first available slot */
-                       if (urb->transfer_flags & URB_ISO_ASAP)
+                       if (urb->transfer_flags & URB_ISO_ASAP) {
                                frame += (next - frame + ed->interval - 1) &
                                                -ed->interval;
 
@@ -248,21 +248,25 @@ static int ohci_urb_enqueue (
                         * Not ASAP: Use the next slot in the stream.  If
                         * the entire URB falls before the threshold, fail.
                         */
-                       else if (tick_before(frame + ed->interval *
+                       } else {
+                               if (tick_before(frame + ed->interval *
                                        (urb->number_of_packets - 1), next)) {
-                               retval = -EXDEV;
-                               usb_hcd_unlink_urb_from_ep(hcd, urb);
-                               goto fail;
-                       }
+                                       retval = -EXDEV;
+                                       usb_hcd_unlink_urb_from_ep(hcd, urb);
+                                       goto fail;
+                               }
 
-                       /*
-                        * Some OHCI hardware doesn't handle late TDs
-                        * correctly.  After retiring them it proceeds to
-                        * the next ED instead of the next TD.  Therefore
-                        * we have to omit the late TDs entirely.
-                        */
-                       urb_priv->td_cnt = DIV_ROUND_UP(next - frame,
-                                       ed->interval);
+                               /*
+                                * Some OHCI hardware doesn't handle late TDs
+                                * correctly.  After retiring them it proceeds
+                                * to the next ED instead of the next TD.
+                                * Therefore we have to omit the late TDs
+                                * entirely.
+                                */
+                               urb_priv->td_cnt = DIV_ROUND_UP(
+                                               (u16) (next - frame),
+                                               ed->interval);
+                       }
                }
                urb->start_frame = frame;
        }
index f4988fbe78e79988c59390b216f75b8c0ff8c8f7..5d7eb72c506403b6e3022f14b09aa47e2c8a97c1 100644 (file)
@@ -223,8 +223,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
 
        isp1301_i2c_client = isp1301_get_client(isp1301_node);
        if (!isp1301_i2c_client) {
-               ret = -EPROBE_DEFER;
-               goto out;
+               return -EPROBE_DEFER;
        }
 
        pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
@@ -234,7 +233,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        if (usb_disabled()) {
                dev_err(&pdev->dev, "USB is disabled\n");
                ret = -ENODEV;
-               goto out;
+               goto fail_disable;
        }
 
        /* Enable AHB slave USB clock, needed for further USB clock control */
@@ -245,19 +244,19 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        if (IS_ERR(usb_pll_clk)) {
                dev_err(&pdev->dev, "failed to acquire USB PLL\n");
                ret = PTR_ERR(usb_pll_clk);
-               goto out1;
+               goto fail_pll;
        }
 
        ret = clk_enable(usb_pll_clk);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to start USB PLL\n");
-               goto out2;
+               goto fail_pllen;
        }
 
        ret = clk_set_rate(usb_pll_clk, 48000);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to set USB clock rate\n");
-               goto out3;
+               goto fail_rate;
        }
 
        /* Enable USB device clock */
@@ -265,13 +264,13 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        if (IS_ERR(usb_dev_clk)) {
                dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
                ret = PTR_ERR(usb_dev_clk);
-               goto out4;
+               goto fail_dev;
        }
 
        ret = clk_enable(usb_dev_clk);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
-               goto out5;
+               goto fail_deven;
        }
 
        /* Enable USB otg clocks */
@@ -279,7 +278,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        if (IS_ERR(usb_otg_clk)) {
                dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
                ret = PTR_ERR(usb_otg_clk);
-               goto out6;
+               goto fail_otg;
        }
 
        __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL);
@@ -287,7 +286,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        ret = clk_enable(usb_otg_clk);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
-               goto out7;
+               goto fail_otgen;
        }
 
        isp1301_configure();
@@ -296,20 +295,14 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        if (!hcd) {
                dev_err(&pdev->dev, "Failed to allocate HC buffer\n");
                ret = -ENOMEM;
-               goto out8;
+               goto fail_hcd;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get MEM resource\n");
-               ret =  -ENOMEM;
-               goto out8;
-       }
-
        hcd->regs = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(hcd->regs)) {
                ret = PTR_ERR(hcd->regs);
-               goto out8;
+               goto fail_resource;
        }
        hcd->rsrc_start = res->start;
        hcd->rsrc_len = resource_size(res);
@@ -317,7 +310,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                ret = -ENXIO;
-               goto out8;
+               goto fail_resource;
        }
 
        nxp_start_hc();
@@ -331,23 +324,24 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
                return ret;
 
        nxp_stop_hc();
-out8:
+fail_resource:
        usb_put_hcd(hcd);
-out7:
+fail_hcd:
        clk_disable(usb_otg_clk);
-out6:
+fail_otgen:
        clk_put(usb_otg_clk);
-out5:
+fail_otg:
        clk_disable(usb_dev_clk);
-out4:
+fail_deven:
        clk_put(usb_dev_clk);
-out3:
+fail_dev:
+fail_rate:
        clk_disable(usb_pll_clk);
-out2:
+fail_pllen:
        clk_put(usb_pll_clk);
-out1:
+fail_pll:
+fail_disable:
        isp1301_i2c_client = NULL;
-out:
        return ret;
 }
 
index ddfc31427bc09e1fc95c63ea40f006ad00631e66..8663851c8d8eac79b5d358d9e6f81f6116d5fc41 100644 (file)
@@ -114,8 +114,6 @@ static const struct hc_driver ohci_omap3_hc_driver = {
 
 /*-------------------------------------------------------------------------*/
 
-static u64 omap_ohci_dma_mask = DMA_BIT_MASK(32);
-
 /*
  * configure so an HC device and id are always provided
  * always called with process context; sleeping is OK
@@ -168,8 +166,10 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &omap_ohci_dma_mask;
+       if (!dev->dma_mask)
+               dev->dma_mask = &dev->coherent_dma_mask;
+       if (!dev->coherent_dma_mask)
+               dev->coherent_dma_mask = DMA_BIT_MASK(32);
 
        hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
                        dev_name(dev));
index efe71f3ca4772068c557b3063f33cd247a0e19df..279b2ef1741149fe05deda764c09151763f56f1d 100644 (file)
@@ -282,8 +282,6 @@ static const struct of_device_id pxa_ohci_dt_ids[] = {
 
 MODULE_DEVICE_TABLE(of, pxa_ohci_dt_ids);
 
-static u64 pxa_ohci_dma_mask = DMA_BIT_MASK(32);
-
 static int ohci_pxa_of_init(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
@@ -298,7 +296,9 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pxa_ohci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
index 9020bf0e2eca13f8837aa46ab8c5ee55969c8b49..3e19e0170d1195711b4f235fd4be043fb234d65e 100644 (file)
@@ -91,8 +91,6 @@ static const struct hc_driver ohci_spear_hc_driver = {
        .start_port_reset       = ohci_start_port_reset,
 };
 
-static u64 spear_ohci_dma_mask = DMA_BIT_MASK(32);
-
 static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
 {
        const struct hc_driver *driver = &ohci_spear_hc_driver;
@@ -114,7 +112,9 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &spear_ohci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        usbh_clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(usbh_clk)) {
index 4f0f0339532f7e2729fe90f6972301ec32116041..0f401dbfaf073bb59f7faad445a19663bded3a0e 100644 (file)
@@ -3084,7 +3084,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
        int ports, i, retval = 1;
        unsigned long flags;
 
-       /* if !USB_SUSPEND, root hub timers won't get shut down ... */
+       /* if !PM_RUNTIME, root hub timers won't get shut down ... */
        if (!HC_IS_RUNNING(hcd->state))
                return 0;
 
index ad4483efb6d602cad2a841d8747b5983c16fc33c..b2ec7fe758ddcf3dba22f922493ca6cebdeaea10 100644 (file)
@@ -22,7 +22,7 @@
  * and usb-storage.
  *
  * TODO:
- * - usb suspend/resume triggered by sl811 (with USB_SUSPEND)
+ * - usb suspend/resume triggered by sl811 (with PM_RUNTIME)
  * - various issues noted in the code
  * - performance work; use both register banks; ...
  * - use urb->iso_frame_desc[] with ISO transfers
index f87bee6d2789256fc8c8dcdde71a6e4510108397..9189bc984c98ce9804d51ec94c4aebe4147b8f3f 100644 (file)
@@ -225,7 +225,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
                /* auto-stop if nothing connected for 1 second */
                if (any_ports_active(uhci))
                        uhci->rh_state = UHCI_RH_RUNNING;
-               else if (time_after_eq(jiffies, uhci->auto_stop_time))
+               else if (time_after_eq(jiffies, uhci->auto_stop_time) &&
+                               !uhci->wait_for_hp)
                        suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
                break;
 
index 8c4dace4b14a5c0455a79b389347cdfab640bd05..f1db61ada6a84c435aedc3967f04f8aab76c1e55 100644 (file)
@@ -60,8 +60,6 @@ static const struct hc_driver uhci_platform_hc_driver = {
        .hub_control =          uhci_hub_control,
 };
 
-static u64 platform_uhci_dma_mask = DMA_BIT_MASK(32);
-
 static int uhci_hcd_platform_probe(struct platform_device *pdev)
 {
        struct usb_hcd *hcd;
@@ -78,7 +76,9 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
         * Once we have dma capability bindings this can go away.
         */
        if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &platform_uhci_dma_mask;
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       if (!pdev->dev.coherent_dma_mask)
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 
        hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
                        pdev->name);
index f0976d8190bc363b52d908540de8214fbeed43b8..041c6ddb695c8ec6fa17d371fe7904de2e47d5ab 100644 (file)
@@ -1287,7 +1287,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
                return -EINVAL;         /* Can't change the period */
 
        } else {
-               next = uhci->frame_number + 2;
+               next = uhci->frame_number + 1;
 
                /* Find the next unused frame */
                if (list_empty(&qh->queue)) {
index 965b539bc47410721c9021f41eb7bf051dcc8feb..fbf75e57628b72e0b7a74237d8a9610826b2ab93 100644 (file)
@@ -1423,15 +1423,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
        ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
 
        /* Set the max packet size and max burst */
+       max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+       max_burst = 0;
        switch (udev->speed) {
        case USB_SPEED_SUPER:
-               max_packet = usb_endpoint_maxp(&ep->desc);
-               ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
                /* dig out max burst from ep companion desc */
-               max_packet = ep->ss_ep_comp.bMaxBurst;
-               ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
+               max_burst = ep->ss_ep_comp.bMaxBurst;
                break;
        case USB_SPEED_HIGH:
+               /* Some devices get this wrong */
+               if (usb_endpoint_xfer_bulk(&ep->desc))
+                       max_packet = 512;
                /* bits 11:12 specify the number of additional transaction
                 * opportunities per microframe (USB 2.0, section 9.6.6)
                 */
@@ -1439,17 +1441,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
                                usb_endpoint_xfer_int(&ep->desc)) {
                        max_burst = (usb_endpoint_maxp(&ep->desc)
                                     & 0x1800) >> 11;
-                       ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
                }
-               /* Fall through */
+               break;
        case USB_SPEED_FULL:
        case USB_SPEED_LOW:
-               max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
-               ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
                break;
        default:
                BUG();
        }
+       ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
+                       MAX_BURST(max_burst));
        max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
        ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
 
@@ -1826,6 +1827,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        }
        spin_unlock_irqrestore(&xhci->lock, flags);
 
+       if (!xhci->rh_bw)
+               goto no_bw;
+
        num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
        for (i = 0; i < num_ports; i++) {
                struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
@@ -1844,6 +1848,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
                }
        }
 
+no_bw:
        xhci->num_usb2_ports = 0;
        xhci->num_usb3_ports = 0;
        xhci->num_active_eps = 0;
@@ -2255,6 +2260,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        u32 page_size, temp;
        int i;
 
+       INIT_LIST_HEAD(&xhci->lpm_failed_devs);
+       INIT_LIST_HEAD(&xhci->cancel_cmd_list);
+
        page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
        xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
        for (i = 0; i < 16; i++) {
@@ -2333,7 +2341,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
        if (!xhci->cmd_ring)
                goto fail;
-       INIT_LIST_HEAD(&xhci->cancel_cmd_list);
        xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
        xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
                        (unsigned long long)xhci->cmd_ring->first_seg->dma);
@@ -2444,8 +2451,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        if (xhci_setup_port_arrays(xhci, flags))
                goto fail;
 
-       INIT_LIST_HEAD(&xhci->lpm_failed_devs);
-
        /* Enable USB 3.0 device notifications for function remote wake, which
         * is necessary for allowing USB 3.0 devices to do remote wakeup from
         * U3 (device suspend).
index 1a30c380043ce258aa660e67edb38210707fffd6..cc24e39b97d5b8b2bdf7faada7f7767b4f8013c2 100644 (file)
@@ -221,6 +221,14 @@ static void xhci_pci_remove(struct pci_dev *dev)
 static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct pci_dev          *pdev = to_pci_dev(hcd->self.controller);
+
+       /*
+        * Systems with the TI redriver that loses port status change events
+        * need to have the registers polled during D3, so avoid D3cold.
+        */
+       if (xhci_compliance_mode_recovery_timer_quirk_check())
+               pdev->no_d3cold = true;
 
        return xhci_suspend(xhci);
 }
index b4aa79d154b28b29db4b2a6509e974b74fdd4192..d8f640b12dd9d950e842892858a617b7fa97247e 100644 (file)
@@ -466,7 +466,7 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
  * Systems:
  * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
  */
-static bool compliance_mode_recovery_timer_quirk_check(void)
+bool xhci_compliance_mode_recovery_timer_quirk_check(void)
 {
        const char *dmi_product_name, *dmi_sys_vendor;
 
@@ -517,7 +517,7 @@ int xhci_init(struct usb_hcd *hcd)
        xhci_dbg(xhci, "Finished xhci_init\n");
 
        /* Initializing Compliance Mode Recovery Data If Needed */
-       if (compliance_mode_recovery_timer_quirk_check()) {
+       if (xhci_compliance_mode_recovery_timer_quirk_check()) {
                xhci->quirks |= XHCI_COMP_MODE_QUIRK;
                compliance_mode_recovery_timer_init(xhci);
        }
@@ -956,6 +956,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        struct usb_hcd          *secondary_hcd;
        int                     retval = 0;
+       bool                    comp_timer_running = false;
 
        /* Wait a bit if either of the roothubs need to settle from the
         * transition into bus suspend.
@@ -993,6 +994,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 
        /* If restore operation fails, re-initialize the HC during resume */
        if ((temp & STS_SRE) || hibernated) {
+
+               if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+                               !(xhci_all_ports_seen_u0(xhci))) {
+                       del_timer_sync(&xhci->comp_mode_recovery_timer);
+                       xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n");
+               }
+
                /* Let the USB core know _both_ roothubs lost power. */
                usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
                usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
@@ -1035,6 +1043,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                retval = xhci_init(hcd->primary_hcd);
                if (retval)
                        return retval;
+               comp_timer_running = true;
+
                xhci_dbg(xhci, "Start the primary HCD\n");
                retval = xhci_run(hcd->primary_hcd);
                if (!retval) {
@@ -1076,7 +1086,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
         * to suffer the Compliance Mode issue again. It doesn't matter if
         * ports have entered previously to U0 before system's suspension.
         */
-       if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+       if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
                compliance_mode_recovery_timer_init(xhci);
 
        /* Re-enable port polling. */
index 29c978e37135a9c836808407907803f4770fadae..77600cefcaf1df6ed3209a41c2a00dcdbb31b4dc 100644 (file)
@@ -1853,4 +1853,7 @@ struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
 
+/* xHCI quirks */
+bool xhci_compliance_mode_recovery_timer_quirk_check(void);
+
 #endif /* __LINUX_XHCI_HCD_H */
index 3a18e44e9391850ab7f7ee7858cd6d134b59361e..e1b661d040217a3cdd0e76b325616f85adc04bd6 100644 (file)
@@ -560,6 +560,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, u8 id)
                if (!config) {
                        dev_err(&pdev->dev,
                                "failed to allocate musb hdrc config\n");
+                       ret = -ENOMEM;
                        goto err2;
                }
 
index 8914dec49f01cfc2e70a89d1e2de1c29a70ef08c..9d3044bdebe54c68ac7d506a2fa593f59287c4c0 100644 (file)
@@ -1232,7 +1232,6 @@ void musb_host_tx(struct musb *musb, u8 epnum)
        void __iomem            *mbase = musb->mregs;
        struct dma_channel      *dma;
        bool                    transfer_pending = false;
-       static bool use_sg;
 
        musb_ep_select(mbase, epnum);
        tx_csr = musb_readw(epio, MUSB_TXCSR);
@@ -1463,9 +1462,9 @@ void musb_host_tx(struct musb *musb, u8 epnum)
         * NULL.
         */
        if (!urb->transfer_buffer)
-               use_sg = true;
+               qh->use_sg = true;
 
-       if (use_sg) {
+       if (qh->use_sg) {
                /* sg_miter_start is already done in musb_ep_program */
                if (!sg_miter_next(&qh->sg_miter)) {
                        dev_err(musb->controller, "error: sg list empty\n");
@@ -1484,9 +1483,9 @@ void musb_host_tx(struct musb *musb, u8 epnum)
 
        qh->segsize = length;
 
-       if (use_sg) {
+       if (qh->use_sg) {
                if (offset + length >= urb->transfer_buffer_length)
-                       use_sg = false;
+                       qh->use_sg = false;
        }
 
        musb_ep_select(mbase, epnum);
@@ -1552,7 +1551,6 @@ void musb_host_rx(struct musb *musb, u8 epnum)
        bool                    done = false;
        u32                     status;
        struct dma_channel      *dma;
-       static bool use_sg;
        unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
 
        musb_ep_select(mbase, epnum);
@@ -1878,12 +1876,12 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                         * NULL.
                         */
                        if (!urb->transfer_buffer) {
-                               use_sg = true;
+                               qh->use_sg = true;
                                sg_miter_start(&qh->sg_miter, urb->sg, 1,
                                                sg_flags);
                        }
 
-                       if (use_sg) {
+                       if (qh->use_sg) {
                                if (!sg_miter_next(&qh->sg_miter)) {
                                        dev_err(musb->controller, "error: sg list empty\n");
                                        sg_miter_stop(&qh->sg_miter);
@@ -1913,8 +1911,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
        urb->actual_length += xfer_len;
        qh->offset += xfer_len;
        if (done) {
-               if (use_sg)
-                       use_sg = false;
+               if (qh->use_sg)
+                       qh->use_sg = false;
 
                if (urb->status == -EINPROGRESS)
                        urb->status = status;
index 5a9c8feec10c8bc5814bec18a7018585c25be47a..738f7eb60df96d8aa78859720940af81c007cd3b 100644 (file)
@@ -74,6 +74,7 @@ struct musb_qh {
        u16                     frame;          /* for periodic schedule */
        unsigned                iso_idx;        /* in urb->iso_frame_desc[] */
        struct sg_mapping_iter sg_miter;        /* for highmem in PIO mode */
+       bool                    use_sg;         /* to track urb using sglist */
 };
 
 /* map from control or bulk queue head to the first qh on that ring */
index 3551f1a30c655efdc2977556e9e4f6c540ea115a..628b93fe5cccf585d21759fa112086a345a2e92a 100644 (file)
@@ -549,7 +549,8 @@ static int omap2430_probe(struct platform_device *pdev)
                glue->control_otghs = omap_get_control_dev();
                if (IS_ERR(glue->control_otghs)) {
                        dev_vdbg(&pdev->dev, "Failed to get control device\n");
-                       return -ENODEV;
+                       ret = PTR_ERR(glue->control_otghs);
+                       goto err2;
                }
        } else {
                glue->control_otghs = ERR_PTR(-ENODEV);
index 371d0e74e9094132eb378366858dedd7e654d89f..7ef3eb8617a6c7adc7775bc76a5fb0fd3d699ef8 100644 (file)
@@ -25,7 +25,7 @@ config AB8500_USB
 
 config FSL_USB2_OTG
        bool "Freescale USB OTG Transceiver Driver"
-       depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_SUSPEND
+       depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
        select USB_OTG
        help
          Enable this to support Freescale USB OTG transceiver.
@@ -139,7 +139,6 @@ config USB_ISP1301
        tristate "NXP ISP1301 USB transceiver support"
        depends on USB || USB_GADGET
        depends on I2C
-       select USB_OTG_UTILS
        help
          Say Y here to add support for the NXP ISP1301 USB transceiver driver.
          This chip is typically used as USB transceiver for USB host, gadget
@@ -162,7 +161,7 @@ config USB_MSM_OTG
 
 config USB_MV_OTG
        tristate "Marvell USB OTG support"
-       depends on USB_EHCI_MV && USB_MV_UDC && USB_SUSPEND
+       depends on USB_EHCI_MV && USB_MV_UDC && PM_RUNTIME
        select USB_OTG
        help
          Say Y here if you want to build Marvell USB OTG transciever
index 4acef26a2ef5c2daa5b7a163e2afd297903e9642..e5eb1b5a04ebb815ffd46be5c1ab48b2fb8ed193 100644 (file)
@@ -892,8 +892,6 @@ static int ab8500_usb_remove(struct platform_device *pdev)
        else if (ab->mode == USB_PERIPHERAL)
                ab8500_usb_peri_phy_dis(ab);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 97b9308507c3dae718828645a79069c6af2de46e..e771bafb9f1da3375f9f0492493c973fe0862484 100644 (file)
@@ -799,6 +799,7 @@ static int fsl_otg_conf(struct platform_device *pdev)
 
        /* initialize the otg structure */
        fsl_otg_tc->phy.label = DRIVER_DESC;
+       fsl_otg_tc->phy.dev = &pdev->dev;
        fsl_otg_tc->phy.set_power = fsl_otg_set_power;
 
        fsl_otg_tc->phy.otg->phy = &fsl_otg_tc->phy;
index 4c76074e518d56f60c192f560dfc7a3d049de301..8443335c2ea0260414ea183fe69c809b5b328d64 100644 (file)
@@ -266,6 +266,7 @@ static int __init gpio_vbus_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, gpio_vbus);
        gpio_vbus->dev = &pdev->dev;
        gpio_vbus->phy.label = "gpio-vbus";
+       gpio_vbus->phy.dev = gpio_vbus->dev;
        gpio_vbus->phy.set_power = gpio_vbus_set_power;
        gpio_vbus->phy.set_suspend = gpio_vbus_set_suspend;
        gpio_vbus->phy.state = OTG_STATE_UNDEFINED;
@@ -343,7 +344,6 @@ static int __init gpio_vbus_probe(struct platform_device *pdev)
                gpio_free(pdata->gpio_pullup);
        gpio_free(pdata->gpio_vbus);
 err_gpio:
-       platform_set_drvdata(pdev, NULL);
        kfree(gpio_vbus->phy.otg);
        kfree(gpio_vbus);
        return err;
@@ -365,7 +365,6 @@ static int __exit gpio_vbus_remove(struct platform_device *pdev)
        if (gpio_is_valid(pdata->gpio_pullup))
                gpio_free(pdata->gpio_pullup);
        gpio_free(gpio);
-       platform_set_drvdata(pdev, NULL);
        kfree(gpio_vbus->phy.otg);
        kfree(gpio_vbus);
 
index 225ae6c97eeb4758945cc08b558bdd27ddc7822d..8a55b37d1a024687f6be8078ef4f54f60cdc5111 100644 (file)
@@ -102,6 +102,7 @@ static int isp1301_probe(struct i2c_client *client,
        mutex_init(&isp->mutex);
 
        phy = &isp->phy;
+       phy->dev = &client->dev;
        phy->label = DRV_NAME;
        phy->init = isp1301_phy_init;
        phy->set_vbus = isp1301_phy_set_vbus;
index f7838a43347c64d855fe396c7da3ec2c81246a7b..1568ea63e3380766be04a0491f891e5471bf9ba9 100644 (file)
@@ -278,11 +278,6 @@ static int mv_u3d_phy_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "missing mem resource\n");
-               return -ENODEV;
-       }
-
        phy_base = devm_ioremap_resource(dev, res);
        if (IS_ERR(phy_base))
                return PTR_ERR(phy_base);
index c987bbe278519c04bfc2e390dd59dd0ba6a8be60..4a6b03c738765814adc2eefe6bbbdef6eba8bf0b 100644 (file)
@@ -667,7 +667,6 @@ int mv_otg_remove(struct platform_device *pdev)
        mv_otg_disable(mvotg);
 
        usb_remove_phy(&mvotg->phy);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
@@ -850,8 +849,6 @@ static int mv_otg_probe(struct platform_device *pdev)
        flush_workqueue(mvotg->qwork);
        destroy_workqueue(mvotg->qwork);
 
-       platform_set_drvdata(pdev, NULL);
-
        return retval;
 }
 
index 9d4381e64d5126d086af1cbca806519e4813f602..bd601c537c8d434a3e33a7aa59d88d583472244f 100644 (file)
@@ -130,11 +130,6 @@ static int mxs_phy_probe(struct platform_device *pdev)
        int ret;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "can't get device resources\n");
-               return -ENOENT;
-       }
-
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
@@ -160,6 +155,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
        mxs_phy->phy.set_suspend        = mxs_phy_suspend;
        mxs_phy->phy.notify_connect     = mxs_phy_on_connect;
        mxs_phy->phy.notify_disconnect  = mxs_phy_on_disconnect;
+       mxs_phy->phy.type               = USB_PHY_TYPE_USB2;
 
        ATOMIC_INIT_NOTIFIER_HEAD(&mxs_phy->phy.notifier);
 
@@ -180,8 +176,6 @@ static int mxs_phy_remove(struct platform_device *pdev)
 
        usb_remove_phy(&mxs_phy->phy);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 2b10cc969bbb1f4457a463c2072508953a42fdc2..638cc5dade35227732e77e6c5ccb0d769eb47d8b 100644 (file)
@@ -254,8 +254,6 @@ static int nop_usb_xceiv_remove(struct platform_device *pdev)
 
        usb_remove_phy(&nop->phy);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 45ffe036dacc5acc455b8d3b22de6df0dfe7bef7..9d5e273abcc7652f1b3dbe17992720a170124cdf 100644 (file)
@@ -363,11 +363,6 @@ static int samsung_usb2phy_probe(struct platform_device *pdev)
        int ret;
 
        phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!phy_mem) {
-               dev_err(dev, "%s: missing mem resource\n", __func__);
-               return -ENODEV;
-       }
-
        phy_base = devm_ioremap_resource(dev, phy_mem);
        if (IS_ERR(phy_base))
                return PTR_ERR(phy_base);
index 133f3d0c554f9e7ed8979c1198f854db08def9ef..5a9efcbcb532cc7ecf7d118b0767db0b970b7960 100644 (file)
@@ -239,11 +239,6 @@ static int samsung_usb3phy_probe(struct platform_device *pdev)
        int ret;
 
        phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!phy_mem) {
-               dev_err(dev, "%s: missing mem resource\n", __func__);
-               return -ENODEV;
-       }
-
        phy_base = devm_ioremap_resource(dev, phy_mem);
        if (IS_ERR(phy_base))
                return PTR_ERR(phy_base);
index 3b16118cbf62d7d8be032f6a243a39fd5965a5be..40e7fd94646f4bfbad247c486bc8659dad6d8f0a 100644 (file)
@@ -43,7 +43,7 @@
 #define DRIVER_NAME "ark3116"
 
 /* usb timeout of 1 second */
-#define ARK_TIMEOUT (1*HZ)
+#define ARK_TIMEOUT 1000
 
 static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x6547, 0x0232) },
index d341555d37d8bbe85f719d782049806522d2a066..082120198f870b2104d5230a5f1274803bc1074f 100644 (file)
@@ -65,6 +65,7 @@ static const struct usb_device_id id_table_earthmate[] = {
 static const struct usb_device_id id_table_cyphidcomrs232[] = {
        { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
        { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
+       { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
        { }                                             /* Terminating entry */
 };
 
@@ -78,6 +79,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
        { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
        { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
+       { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
        { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
        { }                                             /* Terminating entry */
 };
@@ -229,6 +231,12 @@ static struct usb_serial_driver * const serial_drivers[] = {
  * Cypress serial helper functions
  *****************************************************************************/
 
+/* FRWD Dongle hidcom needs to skip reset and speed checks */
+static inline bool is_frwd(struct usb_device *dev)
+{
+       return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) &&
+               (le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD));
+}
 
 static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
 {
@@ -238,6 +246,10 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
        if (unstable_bauds)
                return new_rate;
 
+       /* FRWD Dongle uses 115200 bps */
+       if (is_frwd(port->serial->dev))
+               return new_rate;
+
        /*
         * The general purpose firmware for the Cypress M8 allows for
         * a maximum speed of 57600bps (I have no idea whether DeLorme
@@ -448,7 +460,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
                return -ENOMEM;
        }
 
-       usb_reset_configuration(serial->dev);
+       /* Skip reset for FRWD device. It is a workaound:
+          device hangs if it receives SET_CONFIGURE in Configured
+          state. */
+       if (!is_frwd(serial->dev))
+               usb_reset_configuration(serial->dev);
 
        priv->cmd_ctrl = 0;
        priv->line_control = 0;
index 67cf60826884fbb3c9eaebaa7ba621e0e22ad947..b461311a2ae71d7ddfd7695e6bb8801daa6354aa 100644 (file)
 #define VENDOR_ID_CYPRESS              0x04b4
 #define PRODUCT_ID_CYPHIDCOM           0x5500
 
+/* FRWD Dongle - a GPS sports watch */
+#define VENDOR_ID_FRWD                 0x6737
+#define PRODUCT_ID_CYPHIDCOM_FRWD      0x0001
+
 /* Powercom UPS, chip CY7C63723 */
 #define VENDOR_ID_POWERCOM             0x0d9f
 #define PRODUCT_ID_UPS                 0x0002
index 242b5776648ab31c1e820bcc82dad0a48c629775..7260ec66034715e7a065d0f5fe6441f49a37f6b4 100644 (file)
@@ -189,6 +189,8 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) },
        { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
+       { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) },
+       { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) },
        { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
        { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
@@ -924,8 +926,8 @@ static int  ftdi_tiocmset(struct tty_struct *tty,
 static int  ftdi_ioctl(struct tty_struct *tty,
                        unsigned int cmd, unsigned long arg);
 static void ftdi_break_ctl(struct tty_struct *tty, int break_state);
-static int ftdi_chars_in_buffer(struct tty_struct *tty);
-static int ftdi_get_modem_status(struct tty_struct *tty,
+static bool ftdi_tx_empty(struct usb_serial_port *port);
+static int ftdi_get_modem_status(struct usb_serial_port *port,
                                                unsigned char status[2]);
 
 static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base);
@@ -961,7 +963,7 @@ static struct usb_serial_driver ftdi_sio_device = {
        .ioctl =                ftdi_ioctl,
        .set_termios =          ftdi_set_termios,
        .break_ctl =            ftdi_break_ctl,
-       .chars_in_buffer =      ftdi_chars_in_buffer,
+       .tx_empty =             ftdi_tx_empty,
 };
 
 static struct usb_serial_driver * const serial_drivers[] = {
@@ -2056,27 +2058,18 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
 
 }
 
-static int ftdi_chars_in_buffer(struct tty_struct *tty)
+static bool ftdi_tx_empty(struct usb_serial_port *port)
 {
-       struct usb_serial_port *port = tty->driver_data;
-       int chars;
        unsigned char buf[2];
        int ret;
 
-       chars = usb_serial_generic_chars_in_buffer(tty);
-       if (chars)
-               goto out;
-
-       /* Check if hardware buffer is empty. */
-       ret = ftdi_get_modem_status(tty, buf);
+       ret = ftdi_get_modem_status(port, buf);
        if (ret == 2) {
                if (!(buf[1] & FTDI_RS_TEMT))
-                       chars = 1;
+                       return false;
        }
-out:
-       dev_dbg(&port->dev, "%s - %d\n", __func__, chars);
 
-       return chars;
+       return true;
 }
 
 /* old_termios contains the original termios settings and tty->termios contains
@@ -2268,10 +2261,9 @@ static void ftdi_set_termios(struct tty_struct *tty,
  * Returns the number of status bytes retrieved (device dependant), or
  * negative error code.
  */
-static int ftdi_get_modem_status(struct tty_struct *tty,
+static int ftdi_get_modem_status(struct usb_serial_port *port,
                                                unsigned char status[2])
 {
-       struct usb_serial_port *port = tty->driver_data;
        struct ftdi_private *priv = usb_get_serial_port_data(port);
        unsigned char *buf;
        int len;
@@ -2336,7 +2328,7 @@ static int ftdi_tiocmget(struct tty_struct *tty)
        unsigned char buf[2];
        int ret;
 
-       ret = ftdi_get_modem_status(tty, buf);
+       ret = ftdi_get_modem_status(port, buf);
        if (ret < 0)
                return ret;
 
index 98528270c43c21f5f8d66ccf594a269b32f0600f..6dd79253205dd7ec54278ddb078f08346cdbb4f8 100644 (file)
  */
 #define NEWPORT_VID                    0x104D
 #define NEWPORT_AGILIS_PID             0x3000
+#define NEWPORT_CONEX_CC_PID           0x3002
+#define NEWPORT_CONEX_AGP_PID          0x3006
 
 /* Interbiometrics USB I/O Board */
 /* Developed for Interbiometrics by Rudolf Gugler */
index 297665fdd16d31993a5e95cddd313626453be6c7..ba45170c78e5f601ec6b2c57f5f05600247803f4 100644 (file)
@@ -253,6 +253,37 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
 }
 EXPORT_SYMBOL_GPL(usb_serial_generic_chars_in_buffer);
 
+void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
+{
+       struct usb_serial_port *port = tty->driver_data;
+       unsigned int bps;
+       unsigned long period;
+       unsigned long expire;
+
+       bps = tty_get_baud_rate(tty);
+       if (!bps)
+               bps = 9600;     /* B0 */
+       /*
+        * Use a poll-period of roughly the time it takes to send one
+        * character or at least one jiffy.
+        */
+       period = max_t(unsigned long, (10 * HZ / bps), 1);
+       period = min_t(unsigned long, period, timeout);
+
+       dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
+                                       __func__, jiffies_to_msecs(timeout),
+                                       jiffies_to_msecs(period));
+       expire = jiffies + timeout;
+       while (!port->serial->type->tx_empty(port)) {
+               schedule_timeout_interruptible(period);
+               if (signal_pending(current))
+                       break;
+               if (time_after(jiffies, expire))
+                       break;
+       }
+}
+EXPORT_SYMBOL_GPL(usb_serial_generic_wait_until_sent);
+
 static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
                                                int index, gfp_t mem_flags)
 {
index 158bf4bc29cc2b6f60c7211ad0e9f8b75186bf04..1be6ba7bee27452ac55c08a06d98ff2394a2f7fb 100644 (file)
@@ -2019,8 +2019,6 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
        struct edgeport_port *edge_port = usb_get_serial_port_data(port);
        int chars = 0;
        unsigned long flags;
-       int ret;
-
        if (edge_port == NULL)
                return 0;
 
@@ -2028,16 +2026,22 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
        chars = kfifo_len(&edge_port->write_fifo);
        spin_unlock_irqrestore(&edge_port->ep_lock, flags);
 
-       if (!chars) {
-               ret = tx_active(edge_port);
-               if (ret > 0)
-                       chars = ret;
-       }
-
        dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
        return chars;
 }
 
+static bool edge_tx_empty(struct usb_serial_port *port)
+{
+       struct edgeport_port *edge_port = usb_get_serial_port_data(port);
+       int ret;
+
+       ret = tx_active(edge_port);
+       if (ret > 0)
+               return false;
+
+       return true;
+}
+
 static void edge_throttle(struct tty_struct *tty)
 {
        struct usb_serial_port *port = tty->driver_data;
@@ -2557,6 +2561,7 @@ static struct usb_serial_driver edgeport_1port_device = {
        .write                  = edge_write,
        .write_room             = edge_write_room,
        .chars_in_buffer        = edge_chars_in_buffer,
+       .tx_empty               = edge_tx_empty,
        .break_ctl              = edge_break,
        .read_int_callback      = edge_interrupt_callback,
        .read_bulk_callback     = edge_bulk_in_callback,
@@ -2589,6 +2594,7 @@ static struct usb_serial_driver edgeport_2port_device = {
        .write                  = edge_write,
        .write_room             = edge_write_room,
        .chars_in_buffer        = edge_chars_in_buffer,
+       .tx_empty               = edge_tx_empty,
        .break_ctl              = edge_break,
        .read_int_callback      = edge_interrupt_callback,
        .read_bulk_callback     = edge_bulk_in_callback,
index 9d74c278b7b58d5f9ce6878d35d5ff66d869a5cf..790673e5faa744de7da4dfcb38fa2ba11b418ea6 100644 (file)
@@ -287,7 +287,7 @@ static int bulk_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
            usb_bulk_msg(serial->dev,
                         usb_sndbulkpipe(serial->dev,
                                         port->bulk_out_endpointAddress), buf,
-                        count, &actual, HZ * 1);
+                        count, &actual, 1000);
 
        if (status != IUU_OPERATION_OK)
                dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
@@ -307,7 +307,7 @@ static int read_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
            usb_bulk_msg(serial->dev,
                         usb_rcvbulkpipe(serial->dev,
                                         port->bulk_in_endpointAddress), buf,
-                        count, &actual, HZ * 1);
+                        count, &actual, 1000);
 
        if (status != IUU_OPERATION_OK)
                dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
index eb30d7b01f3681bd00c7fe73827533a613f162b6..3549d073df229617690f90d29b6c2e1f77648b64 100644 (file)
@@ -1548,7 +1548,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
        struct keyspan_serial_private           *s_priv;
        struct keyspan_port_private             *p_priv;
        const struct keyspan_device_details     *d_details;
-       int                                     outcont_urb;
        struct urb                              *this_urb;
        int                                     device_port, err;
 
@@ -1559,7 +1558,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
        d_details = s_priv->device_details;
        device_port = port->number - port->serial->minor;
 
-       outcont_urb = d_details->outcont_endpoints[port->number];
        this_urb = p_priv->outcont_urb;
 
        dev_dbg(&port->dev, "%s - endpoint %d\n", __func__, usb_pipeendpoint(this_urb->pipe));
@@ -1685,14 +1683,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
        err = usb_submit_urb(this_urb, GFP_ATOMIC);
        if (err != 0)
                dev_dbg(&port->dev, "%s - usb_submit_urb(setup) failed (%d)\n", __func__, err);
-#if 0
-       else {
-               dev_dbg(&port->dev, "%s - usb_submit_urb(%d) OK %d bytes (end %d)\n", __func__
-                       outcont_urb, this_urb->transfer_buffer_length,
-                       usb_pipeendpoint(this_urb->pipe));
-       }
-#endif
-
        return 0;
 }
 
index cc0e54345df98c03621949fab2bdc91b7a90af3e..f27c621a9297f3c896b84c9ea700d6285ef6b94f 100644 (file)
@@ -40,7 +40,7 @@
 #define DRIVER_DESC "Moschip USB Serial Driver"
 
 /* default urb timeout */
-#define MOS_WDR_TIMEOUT        (HZ * 5)
+#define MOS_WDR_TIMEOUT        5000
 
 #define MOS_MAX_PORT   0x02
 #define MOS_WRITE      0x0E
@@ -227,11 +227,22 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
        __u8 requesttype = (__u8)0xc0;
        __u16 index = get_reg_index(reg);
        __u16 value = get_reg_value(reg, serial_portnum);
-       int status = usb_control_msg(usbdev, pipe, request, requesttype, value,
-                                    index, data, 1, MOS_WDR_TIMEOUT);
-       if (status < 0)
+       u8 *buf;
+       int status;
+
+       buf = kmalloc(1, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       status = usb_control_msg(usbdev, pipe, request, requesttype, value,
+                                    index, buf, 1, MOS_WDR_TIMEOUT);
+       if (status == 1)
+               *data = *buf;
+       else if (status < 0)
                dev_err(&usbdev->dev,
                        "mos7720: usb_control_msg() failed: %d", status);
+       kfree(buf);
+
        return status;
 }
 
@@ -1618,7 +1629,7 @@ static void change_port_settings(struct tty_struct *tty,
                mos7720_port->shadowMCR |= (UART_MCR_XONANY);
                /* To set hardware flow control to the specified *
                 * serial port, in SP1/2_CONTROL_REG             */
-               if (port->number)
+               if (port_number)
                        write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01);
                else
                        write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02);
@@ -1927,7 +1938,7 @@ static int mos7720_startup(struct usb_serial *serial)
 
        /* setting configuration feature to one */
        usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
-                       (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ);
+                       (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
 
        /* start the interrupt urb */
        ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
@@ -1970,7 +1981,7 @@ static void mos7720_release(struct usb_serial *serial)
                /* wait for synchronous usb calls to return */
                if (mos_parport->msg_pending)
                        wait_for_completion_timeout(&mos_parport->syncmsg_compl,
-                                                   MOS_WDR_TIMEOUT);
+                                           msecs_to_jiffies(MOS_WDR_TIMEOUT));
 
                parport_remove_port(mos_parport->pp);
                usb_set_serial_data(serial, NULL);
index a0d5ea5459823fbd528d83f5676120a8c456eca0..7e998081e1cd9b42651143e7f01e63286410393d 100644 (file)
@@ -2142,13 +2142,21 @@ static int mos7840_ioctl(struct tty_struct *tty,
 static int mos7810_check(struct usb_serial *serial)
 {
        int i, pass_count = 0;
+       u8 *buf;
        __u16 data = 0, mcr_data = 0;
        __u16 test_pattern = 0x55AA;
+       int res;
+
+       buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+       if (!buf)
+               return 0;       /* failed to identify 7810 */
 
        /* Store MCR setting */
-       usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+       res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
                MCS_RDREQ, MCS_RD_RTYPE, 0x0300, MODEM_CONTROL_REGISTER,
-               &mcr_data, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+               buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+       if (res == VENDOR_READ_LENGTH)
+               mcr_data = *buf;
 
        for (i = 0; i < 16; i++) {
                /* Send the 1-bit test pattern out to MCS7810 test pin */
@@ -2158,9 +2166,12 @@ static int mos7810_check(struct usb_serial *serial)
                        MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT);
 
                /* Read the test pattern back */
-               usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
-                       MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data,
-                       VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+               res = usb_control_msg(serial->dev,
+                               usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ,
+                               MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
+                               VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+               if (res == VENDOR_READ_LENGTH)
+                       data = *buf;
 
                /* If this is a MCS7810 device, both test patterns must match */
                if (((test_pattern >> i) ^ (~data >> 1)) & 0x0001)
@@ -2174,6 +2185,8 @@ static int mos7810_check(struct usb_serial *serial)
                MCS_WR_RTYPE, 0x0300 | mcr_data, MODEM_CONTROL_REGISTER, NULL,
                0, MOS_WDR_TIMEOUT);
 
+       kfree(buf);
+
        if (pass_count == 16)
                return 1;
 
@@ -2183,11 +2196,17 @@ static int mos7810_check(struct usb_serial *serial)
 static int mos7840_calc_num_ports(struct usb_serial *serial)
 {
        __u16 data = 0x00;
+       u8 *buf;
        int mos7840_num_ports;
 
-       usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
-               MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &data,
-               VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+       buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+       if (buf) {
+               usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+                       MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
+                       VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
+               data = *buf;
+               kfree(buf);
+       }
 
        if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 ||
                serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) {
index 734372846abbb31b1381841f4f144a09a4f0b2a0..bd4323ddae1aa19002b92998d52d00b9bd13a7cf 100644 (file)
@@ -196,6 +196,7 @@ static void option_instat_callback(struct urb *urb);
 
 #define DELL_PRODUCT_5800_MINICARD_VZW         0x8195  /* Novatel E362 */
 #define DELL_PRODUCT_5800_V2_MINICARD_VZW      0x8196  /* Novatel E362 */
+#define DELL_PRODUCT_5804_MINICARD_ATT         0x819b  /* Novatel E371 */
 
 #define KYOCERA_VENDOR_ID                      0x0c88
 #define KYOCERA_PRODUCT_KPC650                 0x17da
@@ -249,13 +250,7 @@ static void option_instat_callback(struct urb *urb);
 #define ZTE_PRODUCT_MF622                      0x0001
 #define ZTE_PRODUCT_MF628                      0x0015
 #define ZTE_PRODUCT_MF626                      0x0031
-#define ZTE_PRODUCT_CDMA_TECH                  0xfffe
-#define ZTE_PRODUCT_AC8710                     0xfff1
-#define ZTE_PRODUCT_AC2726                     0xfff5
-#define ZTE_PRODUCT_AC8710T                    0xffff
 #define ZTE_PRODUCT_MC2718                     0xffe8
-#define ZTE_PRODUCT_AD3812                     0xffeb
-#define ZTE_PRODUCT_MC2716                     0xffed
 
 #define BENQ_VENDOR_ID                         0x04a5
 #define BENQ_PRODUCT_H10                       0x4068
@@ -341,8 +336,8 @@ static void option_instat_callback(struct urb *urb);
 #define CINTERION_PRODUCT_EU3_E                        0x0051
 #define CINTERION_PRODUCT_EU3_P                        0x0052
 #define CINTERION_PRODUCT_PH8                  0x0053
-#define CINTERION_PRODUCT_AH                 0x0055
-#define CINTERION_PRODUCT_PLS8                 0x0060
+#define CINTERION_PRODUCT_AHXX                 0x0055
+#define CINTERION_PRODUCT_PLXX                 0x0060
 
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID                     0x0b3c
@@ -494,18 +489,10 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
        .reserved = BIT(4),
 };
 
-static const struct option_blacklist_info zte_ad3812_z_blacklist = {
-       .sendsetup = BIT(0) | BIT(1) | BIT(2),
-};
-
 static const struct option_blacklist_info zte_mc2718_z_blacklist = {
        .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
 };
 
-static const struct option_blacklist_info zte_mc2716_z_blacklist = {
-       .sendsetup = BIT(1) | BIT(2) | BIT(3),
-};
-
 static const struct option_blacklist_info huawei_cdc12_blacklist = {
        .reserved = BIT(1) | BIT(2),
 };
@@ -592,6 +579,8 @@ static const struct usb_device_id option_ids[] = {
                .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff),    /* Huawei E1820 */
+               .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
@@ -771,6 +760,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) },         /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
        { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },   /* ADU-E100, ADU-310 */
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
        { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -795,7 +785,6 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) },
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
-       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
@@ -966,6 +955,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
@@ -1195,16 +1186,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
 
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
+       /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
         .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
-        .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
-        .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
@@ -1264,8 +1248,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
-       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) },
-       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) },
+       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
index 59b32b7821264d6d02b34fe4df881496ebf271b2..bd794b43898cce03ef4ac6fb2c71ecc6563bc4c0 100644 (file)
@@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE(0x1199, 0x901b)},   /* Sierra Wireless MC7770 */
        {USB_DEVICE(0x12D1, 0x14F0)},   /* Sony Gobi 3000 QDL */
        {USB_DEVICE(0x12D1, 0x14F1)},   /* Sony Gobi 3000 Composite */
+       {USB_DEVICE(0x0AF0, 0x8120)},   /* Option GTM681W */
 
        /* non Gobi Qualcomm serial devices */
        {USB_DEVICE_INTERFACE_NUMBER(0x0f3d, 0x68a2, 0)},       /* Sierra Wireless MC7700 Device Management */
index cac47aef2918972ead4aeb01a43bd9166daec0ca..c92c5ed4e580ec6761d0acabc379cfe667faaeb1 100644 (file)
@@ -101,6 +101,7 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
                const unsigned char *data, int count);
 static int ti_write_room(struct tty_struct *tty);
 static int ti_chars_in_buffer(struct tty_struct *tty);
+static bool ti_tx_empty(struct usb_serial_port *port);
 static void ti_throttle(struct tty_struct *tty);
 static void ti_unthrottle(struct tty_struct *tty);
 static int ti_ioctl(struct tty_struct *tty,
@@ -222,6 +223,7 @@ static struct usb_serial_driver ti_1port_device = {
        .write                  = ti_write,
        .write_room             = ti_write_room,
        .chars_in_buffer        = ti_chars_in_buffer,
+       .tx_empty               = ti_tx_empty,
        .throttle               = ti_throttle,
        .unthrottle             = ti_unthrottle,
        .ioctl                  = ti_ioctl,
@@ -253,6 +255,7 @@ static struct usb_serial_driver ti_2port_device = {
        .write                  = ti_write,
        .write_room             = ti_write_room,
        .chars_in_buffer        = ti_chars_in_buffer,
+       .tx_empty               = ti_tx_empty,
        .throttle               = ti_throttle,
        .unthrottle             = ti_unthrottle,
        .ioctl                  = ti_ioctl,
@@ -684,8 +687,6 @@ static int ti_chars_in_buffer(struct tty_struct *tty)
        struct ti_port *tport = usb_get_serial_port_data(port);
        int chars = 0;
        unsigned long flags;
-       int ret;
-       u8 lsr;
 
        if (tport == NULL)
                return 0;
@@ -694,16 +695,22 @@ static int ti_chars_in_buffer(struct tty_struct *tty)
        chars = kfifo_len(&tport->write_fifo);
        spin_unlock_irqrestore(&tport->tp_lock, flags);
 
-       if (!chars) {
-               ret = ti_get_lsr(tport, &lsr);
-               if (!ret && !(lsr & TI_LSR_TX_EMPTY))
-                       chars = 1;
-       }
-
        dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
        return chars;
 }
 
+static bool ti_tx_empty(struct usb_serial_port *port)
+{
+       struct ti_port *tport = usb_get_serial_port_data(port);
+       int ret;
+       u8 lsr;
+
+       ret = ti_get_lsr(tport, &lsr);
+       if (!ret && !(lsr & TI_LSR_TX_EMPTY))
+               return false;
+
+       return true;
+}
 
 static void ti_throttle(struct tty_struct *tty)
 {
index cf75beb1251bc85b3c709b989e1ddd5220094c7c..5f6b1ff9d29e6c4166213d04263b83aca0a51a52 100644 (file)
@@ -359,20 +359,29 @@ static int serial_chars_in_buffer(struct tty_struct *tty)
 {
        struct usb_serial_port *port = tty->driver_data;
        struct usb_serial *serial = port->serial;
-       int count = 0;
 
        dev_dbg(tty->dev, "%s\n", __func__);
 
-       mutex_lock(&serial->disc_mutex);
-       /* if the device was unplugged then any remaining characters
-          fell out of the connector ;) */
        if (serial->disconnected)
-               count = 0;
-       else
-               count = serial->type->chars_in_buffer(tty);
-       mutex_unlock(&serial->disc_mutex);
+               return 0;
 
-       return count;
+       return serial->type->chars_in_buffer(tty);
+}
+
+static void serial_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+       struct usb_serial_port *port = tty->driver_data;
+       struct usb_serial *serial = port->serial;
+
+       dev_dbg(tty->dev, "%s\n", __func__);
+
+       if (!port->serial->type->wait_until_sent)
+               return;
+
+       mutex_lock(&serial->disc_mutex);
+       if (!serial->disconnected)
+               port->serial->type->wait_until_sent(tty, timeout);
+       mutex_unlock(&serial->disc_mutex);
 }
 
 static void serial_throttle(struct tty_struct *tty)
@@ -399,7 +408,7 @@ static int serial_ioctl(struct tty_struct *tty,
                                        unsigned int cmd, unsigned long arg)
 {
        struct usb_serial_port *port = tty->driver_data;
-       int retval = -ENODEV;
+       int retval = -ENOIOCTLCMD;
 
        dev_dbg(tty->dev, "%s - cmd 0x%.4x\n", __func__, cmd);
 
@@ -411,8 +420,6 @@ static int serial_ioctl(struct tty_struct *tty,
        default:
                if (port->serial->type->ioctl)
                        retval = port->serial->type->ioctl(tty, cmd, arg);
-               else
-                       retval = -ENOIOCTLCMD;
        }
 
        return retval;
@@ -1191,6 +1198,7 @@ static const struct tty_operations serial_ops = {
        .unthrottle =           serial_unthrottle,
        .break_ctl =            serial_break,
        .chars_in_buffer =      serial_chars_in_buffer,
+       .wait_until_sent =      serial_wait_until_sent,
        .tiocmget =             serial_tiocmget,
        .tiocmset =             serial_tiocmset,
        .get_icount =           serial_get_icount,
@@ -1316,6 +1324,8 @@ static void usb_serial_operations_init(struct usb_serial_driver *device)
        set_to_generic_if_null(device, close);
        set_to_generic_if_null(device, write_room);
        set_to_generic_if_null(device, chars_in_buffer);
+       if (device->tx_empty)
+               set_to_generic_if_null(device, wait_until_sent);
        set_to_generic_if_null(device, read_bulk_callback);
        set_to_generic_if_null(device, write_bulk_callback);
        set_to_generic_if_null(device, process_read_urb);
index 7573ec8a084f3f55c1f2597c2f2b699e58790351..9910aa2edf4b1916c518e3f83d11e39ff94226a0 100644 (file)
@@ -560,10 +560,19 @@ static int treo_attach(struct usb_serial *serial)
        */
 #define COPY_PORT(dest, src)                                           \
        do { \
+               int i;                                                  \
+                                                                       \
+               for (i = 0; i < ARRAY_SIZE(src->read_urbs); ++i) {      \
+                       dest->read_urbs[i] = src->read_urbs[i];         \
+                       dest->read_urbs[i]->context = dest;             \
+                       dest->bulk_in_buffers[i] = src->bulk_in_buffers[i]; \
+               }                                                       \
                dest->read_urb = src->read_urb;                         \
                dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\
                dest->bulk_in_buffer = src->bulk_in_buffer;             \
+               dest->bulk_in_size = src->bulk_in_size;                 \
                dest->interrupt_in_urb = src->interrupt_in_urb;         \
+               dest->interrupt_in_urb->context = dest;                 \
                dest->interrupt_in_endpointAddress = \
                                        src->interrupt_in_endpointAddress;\
                dest->interrupt_in_buffer = src->interrupt_in_buffer;   \
index b9fca3586d741003a50ffb435be7de74020c48e8..347caad47a121d3f7a26ec7f3246a6960114ffc2 100644 (file)
@@ -649,7 +649,7 @@ static void firm_setup_port(struct tty_struct *tty)
        struct whiteheat_port_settings port_settings;
        unsigned int cflag = tty->termios.c_cflag;
 
-       port_settings.port = port->number + 1;
+       port_settings.port = port->number - port->serial->minor + 1;
 
        /* get the byte size */
        switch (cflag & CSIZE) {
index 39ee7373b4ee8f08cea1871867b498e1b72c8d2f..fca4c752a4ed233199d82a787c0ebfbfff32e71c 100644 (file)
@@ -41,9 +41,6 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        int len;
        unsigned char *buf;
 
-       if (port->number != 0)
-               return -ENODEV;
-
        buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
@@ -53,7 +50,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x22, 0x21,
                                 0x0001, 0x0000, NULL, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        dev_dbg(dev, "result = %d\n", result);
 
        /* send  2st cmd and recieve data */
@@ -65,7 +62,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
                                 0x21, 0xa1,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 3 cmd */
@@ -84,7 +81,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x20, 0x21,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 4 cmd */
@@ -95,7 +92,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x22, 0x21,
                                 0x0003, 0x0000, NULL, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        dev_dbg(dev, "result = %d\n", result);
 
        /* send 5 cmd */
@@ -107,7 +104,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
                                 0x21, 0xa1,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 6 cmd */
@@ -126,7 +123,7 @@ static int zte_ev_usb_serial_open(struct tty_struct *tty,
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x20, 0x21,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
        kfree(buf);
 
@@ -166,9 +163,6 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        int len;
        unsigned char *buf;
 
-       if (port->number != 0)
-               return;
-
        buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL);
        if (!buf)
                return;
@@ -178,7 +172,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x22, 0x21,
                                 0x0002, 0x0000, NULL, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        dev_dbg(dev, "result = %d\n", result);
 
        /* send 2st ctl cmd(CTL    21 22 03 00  00 00 00 00 ) */
@@ -186,7 +180,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x22, 0x21,
                                 0x0003, 0x0000, NULL, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        dev_dbg(dev, "result = %d\n", result);
 
        /* send  3st cmd and recieve data */
@@ -198,7 +192,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
                                 0x21, 0xa1,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 4 cmd */
@@ -217,7 +211,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x20, 0x21,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 5 cmd */
@@ -228,7 +222,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x22, 0x21,
                                 0x0003, 0x0000, NULL, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        dev_dbg(dev, "result = %d\n", result);
 
        /* send 6 cmd */
@@ -240,7 +234,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
                                 0x21, 0xa1,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 7 cmd */
@@ -259,7 +253,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x20, 0x21,
                                 0x0000, 0x0000, buf, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        debug_data(dev, __func__, len, buf, result);
 
        /* send 8 cmd */
@@ -270,7 +264,7 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                                 0x22, 0x21,
                                 0x0003, 0x0000, NULL, len,
-                                HZ * USB_CTRL_GET_TIMEOUT);
+                                USB_CTRL_GET_TIMEOUT);
        dev_dbg(dev, "result = %d\n", result);
 
        kfree(buf);
@@ -279,11 +273,29 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port)
 }
 
 static const struct usb_device_id id_table[] = {
-       { USB_DEVICE(0x19d2, 0xffff) }, /* AC8700 */
-       { USB_DEVICE(0x19d2, 0xfffe) },
-       { USB_DEVICE(0x19d2, 0xfffd) }, /* MG880 */
+       /* AC8710, AC8710T */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) },
+        /* AC8700 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) },
+       /* MG880 */
+       { USB_DEVICE(0x19d2, 0xfffd) },
+       { USB_DEVICE(0x19d2, 0xfffc) },
+       { USB_DEVICE(0x19d2, 0xfffb) },
+       /* AC2726, AC8710_V3 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
+       { USB_DEVICE(0x19d2, 0xfff6) },
+       { USB_DEVICE(0x19d2, 0xfff7) },
+       { USB_DEVICE(0x19d2, 0xfff8) },
+       { USB_DEVICE(0x19d2, 0xfff9) },
+       { USB_DEVICE(0x19d2, 0xffee) },
+       /* AC2716, MC2716 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) },
+       /* AD3812 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) },
+       { USB_DEVICE(0x19d2, 0xffec) },
        { USB_DEVICE(0x05C6, 0x3197) },
        { USB_DEVICE(0x05C6, 0x6000) },
+       { USB_DEVICE(0x05C6, 0x9008) },
        { },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index 8623577bbbe70d608eb0866f18be56c20513a5bf..281be56d56485535eaef90e890bb2bfebb30ad00 100644 (file)
@@ -105,8 +105,9 @@ struct rts51x_chip {
        int status_len;
 
        u32 flag;
-#ifdef CONFIG_REALTEK_AUTOPM
        struct us_data *us;
+
+#ifdef CONFIG_REALTEK_AUTOPM
        struct timer_list rts51x_suspend_timer;
        unsigned long timer_expires;
        int pwr_state;
@@ -988,6 +989,7 @@ static int init_realtek_cr(struct us_data *us)
        us->extra = chip;
        us->extra_destructor = realtek_cr_destructor;
        us->max_lun = chip->max_lun = rts51x_get_max_lun(us);
+       chip->us = us;
 
        usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun);
 
@@ -1010,10 +1012,8 @@ static int init_realtek_cr(struct us_data *us)
                        SET_AUTO_DELINK(chip);
        }
 #ifdef CONFIG_REALTEK_AUTOPM
-       if (ss_en) {
-               chip->us = us;
+       if (ss_en)
                realtek_cr_autosuspend_setup(us);
-       }
 #endif
 
        usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag);
index 2b51e2336aa20006e47cb0f1ac7ea146120413a3..f80d3dd41d8c6d420c8fb2d640a81739f2049674 100644 (file)
@@ -155,14 +155,11 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
 
 static void vhost_net_clear_ubuf_info(struct vhost_net *n)
 {
-
-       bool zcopy;
        int i;
 
-       for (i = 0; i < n->dev.nvqs; ++i) {
-               zcopy = vhost_net_zcopy_mask & (0x1 << i);
-               if (zcopy)
-                       kfree(n->vqs[i].ubuf_info);
+       for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
+               kfree(n->vqs[i].ubuf_info);
+               n->vqs[i].ubuf_info = NULL;
        }
 }
 
@@ -171,7 +168,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
        bool zcopy;
        int i;
 
-       for (i = 0; i < n->dev.nvqs; ++i) {
+       for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
                zcopy = vhost_net_zcopy_mask & (0x1 << i);
                if (!zcopy)
                        continue;
@@ -183,12 +180,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
        return 0;
 
 err:
-       while (i--) {
-               zcopy = vhost_net_zcopy_mask & (0x1 << i);
-               if (!zcopy)
-                       continue;
-               kfree(n->vqs[i].ubuf_info);
-       }
+       vhost_net_clear_ubuf_info(n);
        return -ENOMEM;
 }
 
@@ -196,12 +188,12 @@ void vhost_net_vq_reset(struct vhost_net *n)
 {
        int i;
 
+       vhost_net_clear_ubuf_info(n);
+
        for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
                n->vqs[i].done_idx = 0;
                n->vqs[i].upend_idx = 0;
                n->vqs[i].ubufs = NULL;
-               kfree(n->vqs[i].ubuf_info);
-               n->vqs[i].ubuf_info = NULL;
                n->vqs[i].vhost_hlen = 0;
                n->vqs[i].sock_hlen = 0;
        }
@@ -436,7 +428,8 @@ static void handle_tx(struct vhost_net *net)
                                kref_get(&ubufs->kref);
                        }
                        nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
-               }
+               } else
+                       msg.msg_control = NULL;
                /* TODO: Check specific error and bomb out unless ENOBUFS? */
                err = sock->ops->sendmsg(NULL, sock, &msg, len);
                if (unlikely(err < 0)) {
@@ -1053,6 +1046,10 @@ static long vhost_net_set_owner(struct vhost_net *n)
        int r;
 
        mutex_lock(&n->dev.mutex);
+       if (vhost_dev_has_owner(&n->dev)) {
+               r = -EBUSY;
+               goto out;
+       }
        r = vhost_net_set_ubuf_info(n);
        if (r)
                goto out;
index beee7f5787e6c814b2420f745be2555ca6a71af5..60aa5ad09a2fdb74a457ecadf0130f056f114c8e 100644 (file)
@@ -343,6 +343,12 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
        return attach.ret;
 }
 
+/* Caller should have device mutex */
+bool vhost_dev_has_owner(struct vhost_dev *dev)
+{
+       return dev->mm;
+}
+
 /* Caller should have device mutex */
 long vhost_dev_set_owner(struct vhost_dev *dev)
 {
@@ -350,7 +356,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
        int err;
 
        /* Is there an owner already? */
-       if (dev->mm) {
+       if (vhost_dev_has_owner(dev)) {
                err = -EBUSY;
                goto err_mm;
        }
index a7ad63592987f33ed4fd773691b7f5f604220d51..64adcf99ff33893f1eb71b4c181dba4df739a8f3 100644 (file)
@@ -133,6 +133,7 @@ struct vhost_dev {
 
 long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
 long vhost_dev_set_owner(struct vhost_dev *dev);
+bool vhost_dev_has_owner(struct vhost_dev *dev);
 long vhost_dev_check_owner(struct vhost_dev *);
 struct vhost_memory *vhost_dev_reset_owner_prepare(void);
 void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
index bff0775e258c9ce30b1615e38492fa123eb49f8a..5174ebac288d65e70f31c13867e44aa98cc18ff4 100644 (file)
@@ -3,6 +3,7 @@
  *
  * Since these may be in userspace, we use (inline) accessors.
  */
+#include <linux/module.h>
 #include <linux/vringh.h>
 #include <linux/virtio_ring.h>
 #include <linux/kernel.h>
@@ -1005,3 +1006,5 @@ int vringh_need_notify_kern(struct vringh *vrh)
        return __vringh_need_notify(vrh, getu16_kern);
 }
 EXPORT_SYMBOL(vringh_need_notify_kern);
+
+MODULE_LICENSE("GPL");
index d71d60f94fc19a7fcc34ace0940dfb841e9904fc..2e937bdace6f123a1b6f1c0c055b4422dde78875 100644 (file)
@@ -2199,7 +2199,7 @@ config FB_XILINX
 
 config FB_GOLDFISH
        tristate "Goldfish Framebuffer"
-       depends on FB
+       depends on FB && HAS_DMA
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
@@ -2453,6 +2453,23 @@ config FB_HYPERV
        help
          This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
 
+config FB_SIMPLE
+       bool "Simple framebuffer support"
+       depends on (FB = y) && OF
+       select FB_CFB_FILLRECT
+       select FB_CFB_COPYAREA
+       select FB_CFB_IMAGEBLIT
+       help
+         Say Y if you want support for a simple frame-buffer.
+
+         This driver assumes that the display hardware has been initialized
+         before the kernel boots, and the kernel will simply render to the
+         pre-allocated frame buffer surface.
+
+         Configuration re: surface address, size, and format must be provided
+         through device tree, or potentially plain old platform data in the
+         future.
+
 source "drivers/video/omap/Kconfig"
 source "drivers/video/omap2/Kconfig"
 source "drivers/video/exynos/Kconfig"
index 7234e4a959e8c5b61e9d49b32d1057f83a7f290c..e8bae8dd4804d4bf797f444f239d5005a689390e 100644 (file)
@@ -166,6 +166,7 @@ obj-$(CONFIG_FB_MX3)                  += mx3fb.o
 obj-$(CONFIG_FB_DA8XX)           += da8xx-fb.o
 obj-$(CONFIG_FB_MXS)             += mxsfb.o
 obj-$(CONFIG_FB_SSD1307)         += ssd1307fb.o
+obj-$(CONFIG_FB_SIMPLE)           += simplefb.o
 
 # the test framebuffer is last
 obj-$(CONFIG_FB_VIRTUAL)          += vfb.o
index 540909de6247b19daabb6fc6affa650fe6a70f9a..effdb373b8db0ffd55d4f56523c8c379f50e4cbc 100644 (file)
@@ -223,8 +223,14 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
 
 static void exit_backlight(struct atmel_lcdfb_info *sinfo)
 {
-       if (sinfo->backlight)
-               backlight_device_unregister(sinfo->backlight);
+       if (!sinfo->backlight)
+               return;
+
+       if (sinfo->backlight->ops) {
+               sinfo->backlight->props.power = FB_BLANK_POWERDOWN;
+               sinfo->backlight->ops->update_status(sinfo->backlight);
+       }
+       backlight_device_unregister(sinfo->backlight);
 }
 
 #else
@@ -461,8 +467,11 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
        if (info->fix.smem_len) {
                unsigned int smem_len = (var->xres_virtual * var->yres_virtual
                                         * ((var->bits_per_pixel + 7) / 8));
-               if (smem_len > info->fix.smem_len)
+               if (smem_len > info->fix.smem_len) {
+                       dev_err(dev, "Frame buffer is too small (%u) for screen size (need at least %u)\n",
+                               info->fix.smem_len, smem_len);
                        return -EINVAL;
+               }
        }
 
        /* Saturate vertical and horizontal timings at maximum values */
index ddabaa867b0dc38e24ab090d6e62108c88a02a2c..700cac067b4611891af50d90ee083bdbb0f94eb9 100644 (file)
@@ -111,30 +111,16 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
        switch (blank_mode) {
 
        case VESA_NO_BLANKING:
-                       /* Turn on panel */
-                       fbdev->regs->lcd_control |= LCD_CONTROL_GO;
-#ifdef CONFIG_MIPS_PB1100
-                       if (fbdev->panel_idx == 1) {
-                               au_writew(au_readw(PB1100_G_CONTROL)
-                                         | (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
-                       PB1100_G_CONTROL);
-                       }
-#endif
+               /* Turn on panel */
+               fbdev->regs->lcd_control |= LCD_CONTROL_GO;
                au_sync();
                break;
 
        case VESA_VSYNC_SUSPEND:
        case VESA_HSYNC_SUSPEND:
        case VESA_POWERDOWN:
-                       /* Turn off panel */
-                       fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
-#ifdef CONFIG_MIPS_PB1100
-                       if (fbdev->panel_idx == 1) {
-                               au_writew(au_readw(PB1100_G_CONTROL)
-                                         & ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
-                       PB1100_G_CONTROL);
-                       }
-#endif
+               /* Turn off panel */
+               fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
                au_sync();
                break;
        default:
index a862e9173ebed67d67cac954a98eb70a9f94c346..48da25c96cd3c2a160077e880f58fed3a52da0f8 100644 (file)
@@ -18,6 +18,8 @@ font-objs-$(CONFIG_FONT_MINI_4x6)  += font_mini_4x6.o
 
 font-objs += $(font-objs-y)
 
+obj-$(CONFIG_FONTS) += font.o
+
 # Each configuration option enables a list of files.
 
 obj-$(CONFIG_DUMMY_CONSOLE)       += dummycon.o
index 60cc6fee654815444c21d46909351f0e15af6b10..c9c2252e371945939dcf0b5e1f667359cd7ba42d 100644 (file)
@@ -53,6 +53,8 @@ static char *def_disp_name;
 module_param_named(def_disp, def_disp_name, charp, 0);
 MODULE_PARM_DESC(def_disp, "default display name");
 
+static bool dss_initialized;
+
 const char *omapdss_get_default_display_name(void)
 {
        return core.default_display_name;
@@ -66,6 +68,12 @@ enum omapdss_version omapdss_get_version(void)
 }
 EXPORT_SYMBOL(omapdss_get_version);
 
+bool omapdss_is_initialized(void)
+{
+       return dss_initialized;
+}
+EXPORT_SYMBOL(omapdss_is_initialized);
+
 struct platform_device *dss_get_core_pdev(void)
 {
        return core.pdev;
@@ -603,6 +611,8 @@ static int __init omap_dss_init(void)
                return r;
        }
 
+       dss_initialized = true;
+
        return 0;
 }
 
@@ -633,7 +643,15 @@ static int __init omap_dss_init(void)
 
 static int __init omap_dss_init2(void)
 {
-       return omap_dss_register_drivers();
+       int r;
+
+       r = omap_dss_register_drivers();
+       if (r)
+               return r;
+
+       dss_initialized = true;
+
+       return 0;
 }
 
 core_initcall(omap_dss_init);
index 17f4d55c621ca446afb7daed1cc08b6b1e6e2035..a109934c0478ea72808eb0205018d0a1253341f0 100644 (file)
@@ -1065,10 +1065,6 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
        mutex_init(&hdmi.ip_data.lock);
 
        res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               DSSERR("can't get IORESOURCE_MEM HDMI\n");
-               return -EINVAL;
-       }
 
        /* Base address taken from platform */
        hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res);
index c84bb8a4d0c4c1b7b1433fb97f6e55796d067434..856917b3361665137d676fda89f1c0f426ae41c2 100644 (file)
@@ -2416,6 +2416,9 @@ static int omapfb_probe(struct platform_device *pdev)
 
        DBG("omapfb_probe\n");
 
+       if (omapdss_is_initialized() == false)
+               return -EPROBE_DEFER;
+
        if (pdev->num_resources != 0) {
                dev_err(&pdev->dev, "probed for an unknown device\n");
                r = -ENODEV;
index 5261229c79afbef8afa4e80f24936f6dbb6e6f4e..f346b02eee1d84c2d8da77b15e3d64c72aa4fa17 100644 (file)
@@ -353,11 +353,6 @@ static int __init vrfb_probe(struct platform_device *pdev)
        /* first resource is the register res, the rest are vrfb contexts */
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!mem) {
-               dev_err(&pdev->dev, "can't get vrfb base address\n");
-               return -EINVAL;
-       }
-
        vrfb_base = devm_ioremap_resource(&pdev->dev, mem);
        if (IS_ERR(vrfb_base))
                return PTR_ERR(vrfb_base);
index d9f08c653d6275ebce317cd8679d92cafdbac5bc..dbfe2c18a4342dbcc37ffb9806a2e000ddfc379c 100644 (file)
@@ -710,7 +710,7 @@ static int ps3fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
        r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
 
        dev_dbg(info->device, "ps3fb: mmap framebuffer P(%lx)->V(%lx)\n",
-               info->fix.smem_start + vma->vm_pgoff << PAGE_SHIFT,
+               info->fix.smem_start + (vma->vm_pgoff << PAGE_SHIFT),
                vma->vm_start);
 
        return r;
diff --git a/drivers/video/simplefb.c b/drivers/video/simplefb.c
new file mode 100644 (file)
index 0000000..e2e9e3e
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * Simplest possible simple frame-buffer driver, as a platform device
+ *
+ * Copyright (c) 2013, Stephen Warren
+ *
+ * Based on q40fb.c, which was:
+ * Copyright (C) 2001 Richard Zidlicky <rz@linux-m68k.org>
+ *
+ * Also based on offb.c, which was:
+ * Copyright (C) 1997 Geert Uytterhoeven
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static struct fb_fix_screeninfo simplefb_fix = {
+       .id             = "simple",
+       .type           = FB_TYPE_PACKED_PIXELS,
+       .visual         = FB_VISUAL_TRUECOLOR,
+       .accel          = FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo simplefb_var = {
+       .height         = -1,
+       .width          = -1,
+       .activate       = FB_ACTIVATE_NOW,
+       .vmode          = FB_VMODE_NONINTERLACED,
+};
+
+static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+                             u_int transp, struct fb_info *info)
+{
+       u32 *pal = info->pseudo_palette;
+       u32 cr = red >> (16 - info->var.red.length);
+       u32 cg = green >> (16 - info->var.green.length);
+       u32 cb = blue >> (16 - info->var.blue.length);
+       u32 value;
+
+       if (regno >= 16)
+               return -EINVAL;
+
+       value = (cr << info->var.red.offset) |
+               (cg << info->var.green.offset) |
+               (cb << info->var.blue.offset);
+       if (info->var.transp.length > 0) {
+               u32 mask = (1 << info->var.transp.length) - 1;
+               mask <<= info->var.transp.offset;
+               value |= mask;
+       }
+       pal[regno] = value;
+
+       return 0;
+}
+
+static struct fb_ops simplefb_ops = {
+       .owner          = THIS_MODULE,
+       .fb_setcolreg   = simplefb_setcolreg,
+       .fb_fillrect    = cfb_fillrect,
+       .fb_copyarea    = cfb_copyarea,
+       .fb_imageblit   = cfb_imageblit,
+};
+
+struct simplefb_format {
+       const char *name;
+       u32 bits_per_pixel;
+       struct fb_bitfield red;
+       struct fb_bitfield green;
+       struct fb_bitfield blue;
+       struct fb_bitfield transp;
+};
+
+static struct simplefb_format simplefb_formats[] = {
+       { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0} },
+};
+
+struct simplefb_params {
+       u32 width;
+       u32 height;
+       u32 stride;
+       struct simplefb_format *format;
+};
+
+static int simplefb_parse_dt(struct platform_device *pdev,
+                          struct simplefb_params *params)
+{
+       struct device_node *np = pdev->dev.of_node;
+       int ret;
+       const char *format;
+       int i;
+
+       ret = of_property_read_u32(np, "width", &params->width);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't parse width property\n");
+               return ret;
+       }
+
+       ret = of_property_read_u32(np, "height", &params->height);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't parse height property\n");
+               return ret;
+       }
+
+       ret = of_property_read_u32(np, "stride", &params->stride);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't parse stride property\n");
+               return ret;
+       }
+
+       ret = of_property_read_string(np, "format", &format);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't parse format property\n");
+               return ret;
+       }
+       params->format = NULL;
+       for (i = 0; i < ARRAY_SIZE(simplefb_formats); i++) {
+               if (strcmp(format, simplefb_formats[i].name))
+                       continue;
+               params->format = &simplefb_formats[i];
+               break;
+       }
+       if (!params->format) {
+               dev_err(&pdev->dev, "Invalid format value\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int simplefb_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct simplefb_params params;
+       struct fb_info *info;
+       struct resource *mem;
+
+       if (fb_get_options("simplefb", NULL))
+               return -ENODEV;
+
+       ret = simplefb_parse_dt(pdev, &params);
+       if (ret)
+               return ret;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem) {
+               dev_err(&pdev->dev, "No memory resource\n");
+               return -EINVAL;
+       }
+
+       info = framebuffer_alloc(sizeof(u32) * 16, &pdev->dev);
+       if (!info)
+               return -ENOMEM;
+       platform_set_drvdata(pdev, info);
+
+       info->fix = simplefb_fix;
+       info->fix.smem_start = mem->start;
+       info->fix.smem_len = resource_size(mem);
+       info->fix.line_length = params.stride;
+
+       info->var = simplefb_var;
+       info->var.xres = params.width;
+       info->var.yres = params.height;
+       info->var.xres_virtual = params.width;
+       info->var.yres_virtual = params.height;
+       info->var.bits_per_pixel = params.format->bits_per_pixel;
+       info->var.red = params.format->red;
+       info->var.green = params.format->green;
+       info->var.blue = params.format->blue;
+       info->var.transp = params.format->transp;
+
+       info->fbops = &simplefb_ops;
+       info->flags = FBINFO_DEFAULT;
+       info->screen_base = devm_ioremap(&pdev->dev, info->fix.smem_start,
+                                        info->fix.smem_len);
+       if (!info->screen_base) {
+               framebuffer_release(info);
+               return -ENODEV;
+       }
+       info->pseudo_palette = (void *)(info + 1);
+
+       ret = register_framebuffer(info);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
+               framebuffer_release(info);
+               return ret;
+       }
+
+       dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node);
+
+       return 0;
+}
+
+static int simplefb_remove(struct platform_device *pdev)
+{
+       struct fb_info *info = platform_get_drvdata(pdev);
+
+       unregister_framebuffer(info);
+       framebuffer_release(info);
+
+       return 0;
+}
+
+static const struct of_device_id simplefb_of_match[] = {
+       { .compatible = "simple-framebuffer", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, simplefb_of_match);
+
+static struct platform_driver simplefb_driver = {
+       .driver = {
+               .name = "simple-framebuffer",
+               .owner = THIS_MODULE,
+               .of_match_table = simplefb_of_match,
+       },
+       .probe = simplefb_probe,
+       .remove = simplefb_remove,
+};
+module_platform_driver(simplefb_driver);
+
+MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>");
+MODULE_DESCRIPTION("Simple framebuffer driver");
+MODULE_LICENSE("GPL v2");
index db2390aed38781705deeb179790d8a540d9660eb..6e94d8dd3d00a31d696fc7e140329d9b4dc72c6b 100644 (file)
@@ -555,11 +555,6 @@ static int omap_hdq_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, hdq_data);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_dbg(&pdev->dev, "unable to get resource\n");
-               return -ENXIO;
-       }
-
        hdq_data->hdq_base = devm_ioremap_resource(dev, res);
        if (IS_ERR(hdq_data->hdq_base))
                return PTR_ERR(hdq_data->hdq_base);
index d184c48a0482b6a72fab3db591b5a8c425b531ab..37cb09b27b6328e85581955400cb94bf18b66f0e 100644 (file)
@@ -248,11 +248,6 @@ static int ath79_wdt_probe(struct platform_device *pdev)
                return -EBUSY;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no memory resource found\n");
-               return -EINVAL;
-       }
-
        wdt_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(wdt_base))
                return PTR_ERR(wdt_base);
index 100d4fbfde2adf594203cd5a874dd72f20e06b03..bead7740c86a44da31d3b8dc876b20b51a62cd78 100644 (file)
@@ -217,11 +217,6 @@ static int davinci_wdt_probe(struct platform_device *pdev)
        dev_info(dev, "heartbeat %d sec\n", heartbeat);
 
        wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (wdt_mem == NULL) {
-               dev_err(dev, "failed to get memory region resource\n");
-               return -ENOENT;
-       }
-
        wdt_base = devm_ioremap_resource(dev, wdt_mem);
        if (IS_ERR(wdt_base))
                return PTR_ERR(wdt_base);
index ff908823688cf969e5fd420141827f0cc9a91a39..62946c2cb4f8b1d579f42cd18c5e72a6e4531434 100644 (file)
@@ -257,11 +257,6 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
        struct resource *res;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "can't get device resources\n");
-               return -ENODEV;
-       }
-
        imx2_wdt.base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(imx2_wdt.base))
                return PTR_ERR(imx2_wdt.base);
index dd4d9cb862432b4ce6715aba3164a53ffaaa87a5..9e02d60a364b00d271efb0b4d4eda046a25fbf08 100644 (file)
@@ -19,11 +19,10 @@ config XEN_SELFBALLOONING
          by the current usage of anonymous memory ("committed AS") and
          controlled by various sysfs-settable parameters.  Configuring
          FRONTSWAP is highly recommended; if it is not configured, self-
-         ballooning is disabled by default but can be enabled with the
-         'selfballooning' kernel boot parameter.  If FRONTSWAP is configured,
+         ballooning is disabled by default. If FRONTSWAP is configured,
          frontswap-selfshrinking is enabled by default but can be disabled
-         with the 'noselfshrink' kernel boot parameter; and self-ballooning
-         is enabled by default but can be disabled with the 'noselfballooning'
+         with the 'tmem.selfshrink=0' kernel boot parameter; and self-ballooning
+         is enabled by default but can be disabled with the 'tmem.selfballooning=0'
          kernel boot parameter.  Note that systems without a sufficiently
          large swap device should not enable self-ballooning.
 
@@ -141,7 +140,7 @@ config XEN_GRANT_DEV_ALLOC
 
 config SWIOTLB_XEN
        def_bool y
-       depends on PCI
+       depends on PCI && X86
        select SWIOTLB
 
 config XEN_TMEM
index a56776dbe0958ece67b1c12a4a13b8be5bdb023c..930fb68179012355952c20d759111a6755b3acbe 100644 (file)
@@ -407,7 +407,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                nr_pages = ARRAY_SIZE(frame_list);
 
        for (i = 0; i < nr_pages; i++) {
-               if ((page = alloc_page(gfp)) == NULL) {
+               page = alloc_page(gfp);
+               if (page == NULL) {
                        nr_pages = i;
                        state = BP_EAGAIN;
                        break;
index d8cc8127f19c18f9b6a4ea9f937015363b912bd8..6a6bbe4ede92c67afe4c88efd105839f14b3a240 100644 (file)
@@ -167,6 +167,8 @@ static void xen_irq_info_common_init(struct irq_info *info,
        info->cpu = cpu;
 
        evtchn_to_irq[evtchn] = irq;
+
+       irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
 }
 
 static void xen_irq_info_evtchn_init(unsigned irq,
@@ -874,7 +876,6 @@ int bind_evtchn_to_irq(unsigned int evtchn)
                struct irq_info *info = info_for_irq(irq);
                WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
        }
-       irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
 
 out:
        mutex_unlock(&irq_mapping_update_lock);
index ca2b00e9d558c8fea579f4667106eaccee0adb84..2cfc24d76fc54d97d9e8106c5fdf108379b7ba9f 100644 (file)
@@ -504,7 +504,7 @@ static void privcmd_close(struct vm_area_struct *vma)
        struct page **pages = vma->vm_private_data;
        int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 
-       if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages))
+       if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
                return;
 
        xen_unmap_domain_mfn_range(vma, numpgs, pages);
index e3600be4e7fabe54660d5adb1d2f7446b482ff00..cc072c66c766523e4191c5cf4cd40762b77645a9 100644 (file)
 #include <linux/init.h>
 #include <linux/pagemap.h>
 #include <linux/cleancache.h>
-
-/* temporary ifdef until include/linux/frontswap.h is upstream */
-#ifdef CONFIG_FRONTSWAP
 #include <linux/frontswap.h>
-#endif
 
 #include <xen/xen.h>
 #include <xen/interface/xen.h>
 #include <asm/xen/hypervisor.h>
 #include <xen/tmem.h>
 
+#ifndef CONFIG_XEN_TMEM_MODULE
+bool __read_mostly tmem_enabled = false;
+
+static int __init enable_tmem(char *s)
+{
+       tmem_enabled = true;
+       return 1;
+}
+__setup("tmem", enable_tmem);
+#endif
+
+#ifdef CONFIG_CLEANCACHE
+static bool cleancache __read_mostly = true;
+module_param(cleancache, bool, S_IRUGO);
+static bool selfballooning __read_mostly = true;
+module_param(selfballooning, bool, S_IRUGO);
+#endif /* CONFIG_CLEANCACHE */
+
+#ifdef CONFIG_FRONTSWAP
+static bool frontswap __read_mostly = true;
+module_param(frontswap, bool, S_IRUGO);
+#else /* CONFIG_FRONTSWAP */
+#define frontswap (0)
+#endif /* CONFIG_FRONTSWAP */
+
+#ifdef CONFIG_XEN_SELFBALLOONING
+static bool selfshrinking __read_mostly = true;
+module_param(selfshrinking, bool, S_IRUGO);
+#endif /* CONFIG_XEN_SELFBALLOONING */
+
 #define TMEM_CONTROL               0
 #define TMEM_NEW_POOL              1
 #define TMEM_DESTROY_POOL          2
@@ -129,16 +155,6 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
        return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
 }
 
-#ifndef CONFIG_XEN_TMEM_MODULE
-bool __read_mostly tmem_enabled = false;
-
-static int __init enable_tmem(char *s)
-{
-       tmem_enabled = true;
-       return 1;
-}
-__setup("tmem", enable_tmem);
-#endif
 
 #ifdef CONFIG_CLEANCACHE
 static int xen_tmem_destroy_pool(u32 pool_id)
@@ -230,20 +246,6 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
        return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
 }
 
-static bool disable_cleancache __read_mostly;
-static bool disable_selfballooning __read_mostly;
-#ifdef CONFIG_XEN_TMEM_MODULE
-module_param(disable_cleancache, bool, S_IRUGO);
-module_param(disable_selfballooning, bool, S_IRUGO);
-#else
-static int __init no_cleancache(char *s)
-{
-       disable_cleancache = true;
-       return 1;
-}
-__setup("nocleancache", no_cleancache);
-#endif
-
 static struct cleancache_ops tmem_cleancache_ops = {
        .put_page = tmem_cleancache_put_page,
        .get_page = tmem_cleancache_get_page,
@@ -361,20 +363,6 @@ static void tmem_frontswap_init(unsigned ignored)
                    xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
 }
 
-static bool disable_frontswap __read_mostly;
-static bool disable_frontswap_selfshrinking __read_mostly;
-#ifdef CONFIG_XEN_TMEM_MODULE
-module_param(disable_frontswap, bool, S_IRUGO);
-module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
-#else
-static int __init no_frontswap(char *s)
-{
-       disable_frontswap = true;
-       return 1;
-}
-__setup("nofrontswap", no_frontswap);
-#endif
-
 static struct frontswap_ops tmem_frontswap_ops = {
        .store = tmem_frontswap_store,
        .load = tmem_frontswap_load,
@@ -382,8 +370,6 @@ static struct frontswap_ops tmem_frontswap_ops = {
        .invalidate_area = tmem_frontswap_flush_area,
        .init = tmem_frontswap_init
 };
-#else  /* CONFIG_FRONTSWAP */
-#define disable_frontswap_selfshrinking 1
 #endif
 
 static int xen_tmem_init(void)
@@ -391,7 +377,7 @@ static int xen_tmem_init(void)
        if (!xen_domain())
                return 0;
 #ifdef CONFIG_FRONTSWAP
-       if (tmem_enabled && !disable_frontswap) {
+       if (tmem_enabled && frontswap) {
                char *s = "";
                struct frontswap_ops *old_ops =
                        frontswap_register_ops(&tmem_frontswap_ops);
@@ -408,7 +394,7 @@ static int xen_tmem_init(void)
 #endif
 #ifdef CONFIG_CLEANCACHE
        BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
-       if (tmem_enabled && !disable_cleancache) {
+       if (tmem_enabled && cleancache) {
                char *s = "";
                struct cleancache_ops *old_ops =
                        cleancache_register_ops(&tmem_cleancache_ops);
@@ -419,8 +405,15 @@ static int xen_tmem_init(void)
        }
 #endif
 #ifdef CONFIG_XEN_SELFBALLOONING
-       xen_selfballoon_init(!disable_selfballooning,
-                               !disable_frontswap_selfshrinking);
+       /*
+        * There is no point of driving pages to the swap system if they
+        * aren't going anywhere in tmem universe.
+        */
+       if (!frontswap) {
+               selfshrinking = false;
+               selfballooning = false;
+       }
+       xen_selfballoon_init(selfballooning, selfshrinking);
 #endif
        return 0;
 }
index a2278ba7fb273a523476e0504388e49e48511093..4e8ba38aa0c9cb0499ecdf27172baf44d55a65b5 100644 (file)
@@ -106,7 +106,7 @@ static void pcistub_device_release(struct kref *kref)
        else
                pci_restore_state(dev);
 
-       if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
+       if (dev->msix_cap) {
                struct physdev_pci_device ppdev = {
                        .seg = pci_domain_nr(dev->bus),
                        .bus = dev->bus->number,
@@ -371,7 +371,7 @@ static int pcistub_init_device(struct pci_dev *dev)
        if (err)
                goto config_release;
 
-       if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
+       if (dev->msix_cap) {
                struct physdev_pci_device ppdev = {
                        .seg = pci_domain_nr(dev->bus),
                        .bus = dev->bus->number,
index f2ef569c7cc147ad5865a3630eb4913a7164d262..f70984a892aa5c16a0e24deaee28acb2497dc9f6 100644 (file)
  * System configuration note: Selfballooning should not be enabled on
  * systems without a sufficiently large swap device configured; for best
  * results, it is recommended that total swap be increased by the size
- * of the guest memory.  Also, while technically not required to be
- * configured, it is highly recommended that frontswap also be configured
- * and enabled when selfballooning is running.  So, selfballooning
- * is disabled by default if frontswap is not configured and can only
- * be enabled with the "selfballooning" kernel boot option; similarly
- * selfballooning is enabled by default if frontswap is configured and
- * can be disabled with the "noselfballooning" kernel boot option.  Finally,
- * when frontswap is configured, frontswap-selfshrinking can be disabled
- * with the "noselfshrink" kernel boot option.
+ * of the guest memory. Note, that selfballooning should be disabled by default
+ * if frontswap is not configured.  Similarly selfballooning should be enabled
+ * by default if frontswap is configured and can be disabled with the
+ * "tmem.selfballooning=0" kernel boot option.  Finally, when frontswap is
+ * configured, frontswap-selfshrinking can be disabled  with the
+ * "tmem.selfshrink=0" kernel boot option.
  *
  * Selfballooning is disallowed in domain0 and force-disabled.
  *
@@ -120,9 +117,6 @@ static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
 /* Enable/disable with sysfs. */
 static bool frontswap_selfshrinking __read_mostly;
 
-/* Enable/disable with kernel boot option. */
-static bool use_frontswap_selfshrink = true;
-
 /*
  * The default values for the following parameters were deemed reasonable
  * by experimentation, may be workload-dependent, and can all be
@@ -176,35 +170,6 @@ static void frontswap_selfshrink(void)
        frontswap_shrink(tgt_frontswap_pages);
 }
 
-static int __init xen_nofrontswap_selfshrink_setup(char *s)
-{
-       use_frontswap_selfshrink = false;
-       return 1;
-}
-
-__setup("noselfshrink", xen_nofrontswap_selfshrink_setup);
-
-/* Disable with kernel boot option. */
-static bool use_selfballooning = true;
-
-static int __init xen_noselfballooning_setup(char *s)
-{
-       use_selfballooning = false;
-       return 1;
-}
-
-__setup("noselfballooning", xen_noselfballooning_setup);
-#else /* !CONFIG_FRONTSWAP */
-/* Enable with kernel boot option. */
-static bool use_selfballooning;
-
-static int __init xen_selfballooning_setup(char *s)
-{
-       use_selfballooning = true;
-       return 1;
-}
-
-__setup("selfballooning", xen_selfballooning_setup);
 #endif /* CONFIG_FRONTSWAP */
 
 #define MB2PAGES(mb)   ((mb) << (20 - PAGE_SHIFT))
index 61786be9138b96877ccadce65642a4590eddead1..ec097d6f964dab5e695ff9abdf818581b3473e74 100644 (file)
@@ -534,7 +534,7 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
 
        err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
        if (err)
-               goto out_err;
+               goto out_err_free_ballooned_pages;
 
        spin_lock(&xenbus_valloc_lock);
        list_add(&node->next, &xenbus_valloc_pages);
@@ -543,8 +543,9 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
        *vaddr = addr;
        return 0;
 
- out_err:
+ out_err_free_ballooned_pages:
        free_xenballooned_pages(1, &node->page);
+ out_err:
        kfree(node);
        return err;
 }
index c8abd3b8a6c48087967ddcfb2ac012bf642a0e10..e74f9c1fbd80a9fe4c6cfbba0176785b005c06fd 100644 (file)
@@ -45,6 +45,7 @@ int xb_wait_for_data_to_read(void);
 int xs_input_avail(void);
 extern struct xenstore_domain_interface *xen_store_interface;
 extern int xen_store_evtchn;
+extern enum xenstore_init xen_store_domain_type;
 
 extern const struct file_operations xen_xenbus_fops;
 
index d730008007624b4227780457a96c77f8662c248e..a6f42fc01407e435c477862d380e8a1da55d6656 100644 (file)
@@ -70,22 +70,21 @@ static long xenbus_alloc(domid_t domid)
        return err;
 }
 
-static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
+static long xenbus_backend_ioctl(struct file *file, unsigned int cmd,
+                                unsigned long data)
 {
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
        switch (cmd) {
-               case IOCTL_XENBUS_BACKEND_EVTCHN:
-                       if (xen_store_evtchn > 0)
-                               return xen_store_evtchn;
-                       return -ENODEV;
-
-               case IOCTL_XENBUS_BACKEND_SETUP:
-                       return xenbus_alloc(data);
-
-               default:
-                       return -ENOTTY;
+       case IOCTL_XENBUS_BACKEND_EVTCHN:
+               if (xen_store_evtchn > 0)
+                       return xen_store_evtchn;
+               return -ENODEV;
+       case IOCTL_XENBUS_BACKEND_SETUP:
+               return xenbus_alloc(data);
+       default:
+               return -ENOTTY;
        }
 }
 
index 3325884c693f8f1db3d94e43be43a622b857af2d..56cfaaa9d006ab1b86c47d6c1a9f1ab90268f399 100644 (file)
@@ -69,6 +69,9 @@ EXPORT_SYMBOL_GPL(xen_store_evtchn);
 struct xenstore_domain_interface *xen_store_interface;
 EXPORT_SYMBOL_GPL(xen_store_interface);
 
+enum xenstore_init xen_store_domain_type;
+EXPORT_SYMBOL_GPL(xen_store_domain_type);
+
 static unsigned long xen_store_mfn;
 
 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
@@ -719,17 +722,11 @@ static int __init xenstored_local_init(void)
        return err;
 }
 
-enum xenstore_init {
-       UNKNOWN,
-       PV,
-       HVM,
-       LOCAL,
-};
 static int __init xenbus_init(void)
 {
        int err = 0;
-       enum xenstore_init usage = UNKNOWN;
        uint64_t v = 0;
+       xen_store_domain_type = XS_UNKNOWN;
 
        if (!xen_domain())
                return -ENODEV;
@@ -737,29 +734,29 @@ static int __init xenbus_init(void)
        xenbus_ring_ops_init();
 
        if (xen_pv_domain())
-               usage = PV;
+               xen_store_domain_type = XS_PV;
        if (xen_hvm_domain())
-               usage = HVM;
+               xen_store_domain_type = XS_HVM;
        if (xen_hvm_domain() && xen_initial_domain())
-               usage = LOCAL;
+               xen_store_domain_type = XS_LOCAL;
        if (xen_pv_domain() && !xen_start_info->store_evtchn)
-               usage = LOCAL;
+               xen_store_domain_type = XS_LOCAL;
        if (xen_pv_domain() && xen_start_info->store_evtchn)
                xenstored_ready = 1;
 
-       switch (usage) {
-       case LOCAL:
+       switch (xen_store_domain_type) {
+       case XS_LOCAL:
                err = xenstored_local_init();
                if (err)
                        goto out_error;
                xen_store_interface = mfn_to_virt(xen_store_mfn);
                break;
-       case PV:
+       case XS_PV:
                xen_store_evtchn = xen_start_info->store_evtchn;
                xen_store_mfn = xen_start_info->store_mfn;
                xen_store_interface = mfn_to_virt(xen_store_mfn);
                break;
-       case HVM:
+       case XS_HVM:
                err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
                if (err)
                        goto out_error;
index bb4f92ed87305452a3f2a5ad8a25293c75da352b..146f857a36f83b5a2f4646424c1251a23abf1b07 100644 (file)
@@ -47,6 +47,13 @@ struct xen_bus_type {
        struct bus_type bus;
 };
 
+enum xenstore_init {
+       XS_UNKNOWN,
+       XS_PV,
+       XS_HVM,
+       XS_LOCAL,
+};
+
 extern struct device_attribute xenbus_dev_attrs[];
 
 extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
index 3159a37d966d57755b6bd6d0d2a19379309c7e6e..a7e25073de19bc3b70e95b2a8697792f62312b63 100644 (file)
@@ -29,6 +29,8 @@
 #include "xenbus_probe.h"
 
 
+static struct workqueue_struct *xenbus_frontend_wq;
+
 /* device/<type>/<id> => <type>-<id> */
 static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
 {
@@ -89,9 +91,40 @@ static void backend_changed(struct xenbus_watch *watch,
        xenbus_otherend_changed(watch, vec, len, 1);
 }
 
+static void xenbus_frontend_delayed_resume(struct work_struct *w)
+{
+       struct xenbus_device *xdev = container_of(w, struct xenbus_device, work);
+
+       xenbus_dev_resume(&xdev->dev);
+}
+
+static int xenbus_frontend_dev_resume(struct device *dev)
+{
+       /*
+        * If xenstored is running in this domain, we cannot access the backend
+        * state at the moment, so we need to defer xenbus_dev_resume
+        */
+       if (xen_store_domain_type == XS_LOCAL) {
+               struct xenbus_device *xdev = to_xenbus_device(dev);
+
+               if (!xenbus_frontend_wq) {
+                       pr_err("%s: no workqueue to process delayed resume\n",
+                              xdev->nodename);
+                       return -EFAULT;
+               }
+
+               INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume);
+               queue_work(xenbus_frontend_wq, &xdev->work);
+
+               return 0;
+       }
+
+       return xenbus_dev_resume(dev);
+}
+
 static const struct dev_pm_ops xenbus_pm_ops = {
        .suspend        = xenbus_dev_suspend,
-       .resume         = xenbus_dev_resume,
+       .resume         = xenbus_frontend_dev_resume,
        .freeze         = xenbus_dev_suspend,
        .thaw           = xenbus_dev_cancel,
        .restore        = xenbus_dev_resume,
@@ -440,6 +473,8 @@ static int __init xenbus_probe_frontend_init(void)
 
        register_xenstore_notifier(&xenstore_notifier);
 
+       xenbus_frontend_wq = create_workqueue("xenbus_frontend");
+
        return 0;
 }
 subsys_initcall(xenbus_probe_frontend_init);
index c5b1a8c10411ab108960eb74ed20f1b1a4bed601..7fe5bdee1630ec7fc89d3bc5d598d427f85e75df 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -307,7 +307,9 @@ static void free_ioctx(struct kioctx *ctx)
        kunmap_atomic(ring);
 
        while (atomic_read(&ctx->reqs_active) > 0) {
-               wait_event(ctx->wait, head != ctx->tail);
+               wait_event(ctx->wait,
+                               head != ctx->tail ||
+                               atomic_read(&ctx->reqs_active) <= 0);
 
                avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
 
@@ -1299,8 +1301,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
  *     < min_nr if the timeout specified by timeout has elapsed
  *     before sufficient events are available, where timeout == NULL
  *     specifies an infinite timeout. Note that the timeout pointed to by
- *     timeout is relative and will be updated if not NULL and the
- *     operation blocks. Will fail with -ENOSYS if not implemented.
+ *     timeout is relative.  Will fail with -ENOSYS if not implemented.
  */
 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
                long, min_nr,
index 8615ee89ab55d8c6d96a26556327b31c3967c0b6..f95dddced968f6f4509f1536b80462baa1c4393b 100644 (file)
@@ -265,8 +265,8 @@ befs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                result = filldir(dirent, keybuf, keysize, filp->f_pos,
                                 (ino_t) value, d_type);
        }
-
-       filp->f_pos++;
+       if (!result)
+               filp->f_pos++;
 
        befs_debug(sb, "<--- befs_readdir() filp->f_pos %Ld", filp->f_pos);
 
index b4fb41558111b75d461096d8acdfca34690b8728..290e347b6db3f925f414fd9be4e6ea394da6f887 100644 (file)
@@ -918,7 +918,8 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
                                                           ref->parent, bsz, 0);
                                if (!eb || !extent_buffer_uptodate(eb)) {
                                        free_extent_buffer(eb);
-                                       return -EIO;
+                                       ret = -EIO;
+                                       goto out;
                                }
                                ret = find_extent_in_eb(eb, bytenr,
                                                        *extent_item_pos, &eie);
index 18af6f48781a1f31e1d41c23bb08a2e1b6ea12a7..1431a696501704d3f9e0901c64de537b2b20183a 100644 (file)
@@ -1700,7 +1700,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
                unsigned int j;
                DECLARE_COMPLETION_ONSTACK(complete);
 
-               bio = bio_alloc(GFP_NOFS, num_pages - i);
+               bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
                if (!bio) {
                        printk(KERN_INFO
                               "btrfsic: bio_alloc() for %u pages failed!\n",
index de6de8e60b46019f528b93505d3848adb8699191..02fae7f7e42cb417a14fe0243805bcad4270b592 100644 (file)
@@ -951,10 +951,12 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                        BUG_ON(ret); /* -ENOMEM */
                }
                if (new_flags != 0) {
+                       int level = btrfs_header_level(buf);
+
                        ret = btrfs_set_disk_extent_flags(trans, root,
                                                          buf->start,
                                                          buf->len,
-                                                         new_flags, 0);
+                                                         new_flags, level, 0);
                        if (ret)
                                return ret;
                }
index 63c328a9ce956c716c8ed6e1a0131b5260bb022b..d6dd49b51ba8dfe27069a139ab42cfd88e58c117 100644 (file)
@@ -88,12 +88,12 @@ struct btrfs_ordered_sum;
 /* holds checksums of all the data extents */
 #define BTRFS_CSUM_TREE_OBJECTID 7ULL
 
-/* for storing balance parameters in the root tree */
-#define BTRFS_BALANCE_OBJECTID -4ULL
-
 /* holds quota configuration and tracking */
 #define BTRFS_QUOTA_TREE_OBJECTID 8ULL
 
+/* for storing balance parameters in the root tree */
+#define BTRFS_BALANCE_OBJECTID -4ULL
+
 /* orhpan objectid for tracking unlinked/truncated files */
 #define BTRFS_ORPHAN_OBJECTID -5ULL
 
@@ -3075,7 +3075,7 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
                                u64 bytenr, u64 num_bytes, u64 flags,
-                               int is_data);
+                               int level, int is_data);
 int btrfs_free_extent(struct btrfs_trans_handle *trans,
                      struct btrfs_root *root,
                      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
index f75fcaf79aebd11d54ebcc6df94d6f5b67c55dfe..70b962cc177d973688b0d2deb56965bffbc423ae 100644 (file)
@@ -60,6 +60,7 @@ struct btrfs_delayed_ref_node {
 struct btrfs_delayed_extent_op {
        struct btrfs_disk_key key;
        u64 flags_to_set;
+       int level;
        unsigned int update_key:1;
        unsigned int update_flags:1;
        unsigned int is_data:1;
index 7ba7b3900cb8eb749b82241cc4533b4fc978ef8a..65241f32d3f8aec282a80d443c7acc677f68726a 100644 (file)
@@ -313,6 +313,11 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
        struct btrfs_device *tgt_device = NULL;
        struct btrfs_device *src_device = NULL;
 
+       if (btrfs_fs_incompat(fs_info, RAID56)) {
+               pr_warn("btrfs: dev_replace cannot yet handle RAID5/RAID6\n");
+               return -EINVAL;
+       }
+
        switch (args->start.cont_reading_from_srcdev_mode) {
        case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS:
        case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID:
index 4e9ebe1f1827895712afdea8e0e8016bd20088ca..e7b3cb5286a5a699c4716a8a5dc2ae6a0e54f622 100644 (file)
@@ -152,7 +152,7 @@ static struct btrfs_lockdep_keyset {
        { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
        { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
        { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
-       { .id = BTRFS_ORPHAN_OBJECTID,          .name_stem = "orphan"   },
+       { .id = BTRFS_QUOTA_TREE_OBJECTID,      .name_stem = "quota"    },
        { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
        { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
        { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
@@ -1513,7 +1513,6 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
        }
 
        root->commit_root = btrfs_root_node(root);
-       BUG_ON(!root->node); /* -ENOMEM */
 out:
        if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
                root->ref_cows = 1;
@@ -1988,30 +1987,33 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
 {
        free_extent_buffer(info->tree_root->node);
        free_extent_buffer(info->tree_root->commit_root);
-       free_extent_buffer(info->dev_root->node);
-       free_extent_buffer(info->dev_root->commit_root);
-       free_extent_buffer(info->extent_root->node);
-       free_extent_buffer(info->extent_root->commit_root);
-       free_extent_buffer(info->csum_root->node);
-       free_extent_buffer(info->csum_root->commit_root);
-       if (info->quota_root) {
-               free_extent_buffer(info->quota_root->node);
-               free_extent_buffer(info->quota_root->commit_root);
-       }
-
        info->tree_root->node = NULL;
        info->tree_root->commit_root = NULL;
-       info->dev_root->node = NULL;
-       info->dev_root->commit_root = NULL;
-       info->extent_root->node = NULL;
-       info->extent_root->commit_root = NULL;
-       info->csum_root->node = NULL;
-       info->csum_root->commit_root = NULL;
+
+       if (info->dev_root) {
+               free_extent_buffer(info->dev_root->node);
+               free_extent_buffer(info->dev_root->commit_root);
+               info->dev_root->node = NULL;
+               info->dev_root->commit_root = NULL;
+       }
+       if (info->extent_root) {
+               free_extent_buffer(info->extent_root->node);
+               free_extent_buffer(info->extent_root->commit_root);
+               info->extent_root->node = NULL;
+               info->extent_root->commit_root = NULL;
+       }
+       if (info->csum_root) {
+               free_extent_buffer(info->csum_root->node);
+               free_extent_buffer(info->csum_root->commit_root);
+               info->csum_root->node = NULL;
+               info->csum_root->commit_root = NULL;
+       }
        if (info->quota_root) {
+               free_extent_buffer(info->quota_root->node);
+               free_extent_buffer(info->quota_root->commit_root);
                info->quota_root->node = NULL;
                info->quota_root->commit_root = NULL;
        }
-
        if (chunk_root) {
                free_extent_buffer(info->chunk_root->node);
                free_extent_buffer(info->chunk_root->commit_root);
@@ -3128,7 +3130,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
         * caller
         */
        device->flush_bio = NULL;
-       bio = bio_alloc(GFP_NOFS, 0);
+       bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
        if (!bio)
                return -ENOMEM;
 
@@ -3659,8 +3661,11 @@ static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
                                         ordered_operations);
 
                list_del_init(&btrfs_inode->ordered_operations);
+               spin_unlock(&root->fs_info->ordered_extent_lock);
 
                btrfs_invalidate_inodes(btrfs_inode->root);
+
+               spin_lock(&root->fs_info->ordered_extent_lock);
        }
 
        spin_unlock(&root->fs_info->ordered_extent_lock);
@@ -3782,8 +3787,11 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
                list_del_init(&btrfs_inode->delalloc_inodes);
                clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
                          &btrfs_inode->runtime_flags);
+               spin_unlock(&root->fs_info->delalloc_lock);
 
                btrfs_invalidate_inodes(btrfs_inode->root);
+
+               spin_lock(&root->fs_info->delalloc_lock);
        }
 
        spin_unlock(&root->fs_info->delalloc_lock);
@@ -3808,7 +3816,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
                while (start <= end) {
                        eb = btrfs_find_tree_block(root, start,
                                                   root->leafsize);
-                       start += eb->len;
+                       start += root->leafsize;
                        if (!eb)
                                continue;
                        wait_on_extent_buffer_writeback(eb);
index 2305b5c5cf0012d77de5dde14721ddc1cc0ccc5c..df472ab1b5acca7b411b05bcc414913bcebcc244 100644 (file)
@@ -2070,8 +2070,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
        u32 item_size;
        int ret;
        int err = 0;
-       int metadata = (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
-                       node->type == BTRFS_SHARED_BLOCK_REF_KEY);
+       int metadata = !extent_op->is_data;
 
        if (trans->aborted)
                return 0;
@@ -2086,11 +2085,8 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
        key.objectid = node->bytenr;
 
        if (metadata) {
-               struct btrfs_delayed_tree_ref *tree_ref;
-
-               tree_ref = btrfs_delayed_node_to_tree_ref(node);
                key.type = BTRFS_METADATA_ITEM_KEY;
-               key.offset = tree_ref->level;
+               key.offset = extent_op->level;
        } else {
                key.type = BTRFS_EXTENT_ITEM_KEY;
                key.offset = node->num_bytes;
@@ -2719,7 +2715,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
                                u64 bytenr, u64 num_bytes, u64 flags,
-                               int is_data)
+                               int level, int is_data)
 {
        struct btrfs_delayed_extent_op *extent_op;
        int ret;
@@ -2732,6 +2728,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
        extent_op->update_flags = 1;
        extent_op->update_key = 0;
        extent_op->is_data = is_data ? 1 : 0;
+       extent_op->level = level;
 
        ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
                                          num_bytes, extent_op);
@@ -3109,6 +3106,11 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
        WARN_ON(ret);
 
        if (i_size_read(inode) > 0) {
+               ret = btrfs_check_trunc_cache_free_space(root,
+                                       &root->fs_info->global_block_rsv);
+               if (ret)
+                       goto out_put;
+
                ret = btrfs_truncate_free_space_cache(root, trans, path,
                                                      inode);
                if (ret)
@@ -4562,6 +4564,8 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
        fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
        fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
        fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
+       if (fs_info->quota_root)
+               fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
        fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
 
        update_global_block_rsv(fs_info);
@@ -6651,51 +6655,51 @@ use_block_rsv(struct btrfs_trans_handle *trans,
        struct btrfs_block_rsv *block_rsv;
        struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
        int ret;
+       bool global_updated = false;
 
        block_rsv = get_block_rsv(trans, root);
 
-       if (block_rsv->size == 0) {
-               ret = reserve_metadata_bytes(root, block_rsv, blocksize,
-                                            BTRFS_RESERVE_NO_FLUSH);
-               /*
-                * If we couldn't reserve metadata bytes try and use some from
-                * the global reserve.
-                */
-               if (ret && block_rsv != global_rsv) {
-                       ret = block_rsv_use_bytes(global_rsv, blocksize);
-                       if (!ret)
-                               return global_rsv;
-                       return ERR_PTR(ret);
-               } else if (ret) {
-                       return ERR_PTR(ret);
-               }
+       if (unlikely(block_rsv->size == 0))
+               goto try_reserve;
+again:
+       ret = block_rsv_use_bytes(block_rsv, blocksize);
+       if (!ret)
                return block_rsv;
+
+       if (block_rsv->failfast)
+               return ERR_PTR(ret);
+
+       if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
+               global_updated = true;
+               update_global_block_rsv(root->fs_info);
+               goto again;
        }
 
-       ret = block_rsv_use_bytes(block_rsv, blocksize);
+       if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+               static DEFINE_RATELIMIT_STATE(_rs,
+                               DEFAULT_RATELIMIT_INTERVAL * 10,
+                               /*DEFAULT_RATELIMIT_BURST*/ 1);
+               if (__ratelimit(&_rs))
+                       WARN(1, KERN_DEBUG
+                               "btrfs: block rsv returned %d\n", ret);
+       }
+try_reserve:
+       ret = reserve_metadata_bytes(root, block_rsv, blocksize,
+                                    BTRFS_RESERVE_NO_FLUSH);
        if (!ret)
                return block_rsv;
-       if (ret && !block_rsv->failfast) {
-               if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
-                       static DEFINE_RATELIMIT_STATE(_rs,
-                                       DEFAULT_RATELIMIT_INTERVAL * 10,
-                                       /*DEFAULT_RATELIMIT_BURST*/ 1);
-                       if (__ratelimit(&_rs))
-                               WARN(1, KERN_DEBUG
-                                       "btrfs: block rsv returned %d\n", ret);
-               }
-               ret = reserve_metadata_bytes(root, block_rsv, blocksize,
-                                            BTRFS_RESERVE_NO_FLUSH);
-               if (!ret) {
-                       return block_rsv;
-               } else if (ret && block_rsv != global_rsv) {
-                       ret = block_rsv_use_bytes(global_rsv, blocksize);
-                       if (!ret)
-                               return global_rsv;
-               }
+       /*
+        * If we couldn't reserve metadata bytes try and use some from
+        * the global reserve if its space type is the same as the global
+        * reservation.
+        */
+       if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
+           block_rsv->space_info == global_rsv->space_info) {
+               ret = block_rsv_use_bytes(global_rsv, blocksize);
+               if (!ret)
+                       return global_rsv;
        }
-
-       return ERR_PTR(-ENOSPC);
+       return ERR_PTR(ret);
 }
 
 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
@@ -6763,6 +6767,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                        extent_op->update_key = 1;
                extent_op->update_flags = 1;
                extent_op->is_data = 0;
+               extent_op->level = level;
 
                ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
                                        ins.objectid,
@@ -6934,7 +6939,8 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
                ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
                BUG_ON(ret); /* -ENOMEM */
                ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
-                                                 eb->len, flag, 0);
+                                                 eb->len, flag,
+                                                 btrfs_header_level(eb), 0);
                BUG_ON(ret); /* -ENOMEM */
                wc->flags[level] |= flag;
        }
index 32d67a822e93c0cf53f5c0409bd73ff863bf90bc..e7e7afb4a87268211e8b0ef881a6eeac0068eefd 100644 (file)
@@ -23,6 +23,7 @@
 
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
+static struct bio_set *btrfs_bioset;
 
 #ifdef CONFIG_BTRFS_DEBUG
 static LIST_HEAD(buffers);
@@ -125,10 +126,20 @@ int __init extent_io_init(void)
                        SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
        if (!extent_buffer_cache)
                goto free_state_cache;
+
+       btrfs_bioset = bioset_create(BIO_POOL_SIZE,
+                                    offsetof(struct btrfs_io_bio, bio));
+       if (!btrfs_bioset)
+               goto free_buffer_cache;
        return 0;
 
+free_buffer_cache:
+       kmem_cache_destroy(extent_buffer_cache);
+       extent_buffer_cache = NULL;
+
 free_state_cache:
        kmem_cache_destroy(extent_state_cache);
+       extent_state_cache = NULL;
        return -ENOMEM;
 }
 
@@ -145,6 +156,8 @@ void extent_io_exit(void)
                kmem_cache_destroy(extent_state_cache);
        if (extent_buffer_cache)
                kmem_cache_destroy(extent_buffer_cache);
+       if (btrfs_bioset)
+               bioset_free(btrfs_bioset);
 }
 
 void extent_io_tree_init(struct extent_io_tree *tree,
@@ -1947,28 +1960,6 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
                SetPageUptodate(page);
 }
 
-/*
- * helper function to unlock a page if all the extents in the tree
- * for that page are unlocked
- */
-static void check_page_locked(struct extent_io_tree *tree, struct page *page)
-{
-       u64 start = page_offset(page);
-       u64 end = start + PAGE_CACHE_SIZE - 1;
-       if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
-               unlock_page(page);
-}
-
-/*
- * helper function to end page writeback if all the extents
- * in the tree for that page are done with writeback
- */
-static void check_page_writeback(struct extent_io_tree *tree,
-                                struct page *page)
-{
-       end_page_writeback(page);
-}
-
 /*
  * When IO fails, either with EIO or csum verification fails, we
  * try other mirrors that might have a good copy of the data.  This
@@ -2046,7 +2037,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
        if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
                return 0;
 
-       bio = bio_alloc(GFP_NOFS, 1);
+       bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
        if (!bio)
                return -EIO;
        bio->bi_private = &compl;
@@ -2336,7 +2327,7 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page,
                return -EIO;
        }
 
-       bio = bio_alloc(GFP_NOFS, 1);
+       bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
        if (!bio) {
                free_io_failure(inode, failrec, 0);
                return -EIO;
@@ -2398,19 +2389,24 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
        struct extent_io_tree *tree;
        u64 start;
        u64 end;
-       int whole_page;
 
        do {
                struct page *page = bvec->bv_page;
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
-               start = page_offset(page) + bvec->bv_offset;
-               end = start + bvec->bv_len - 1;
+               /* We always issue full-page reads, but if some block
+                * in a page fails to read, blk_update_request() will
+                * advance bv_offset and adjust bv_len to compensate.
+                * Print a warning for nonzero offsets, and an error
+                * if they don't add up to a full page.  */
+               if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
+                       printk("%s page write in btrfs with offset %u and length %u\n",
+                              bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
+                              ? KERN_ERR "partial" : KERN_INFO "incomplete",
+                              bvec->bv_offset, bvec->bv_len);
 
-               if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
-                       whole_page = 1;
-               else
-                       whole_page = 0;
+               start = page_offset(page);
+               end = start + bvec->bv_offset + bvec->bv_len - 1;
 
                if (--bvec >= bio->bi_io_vec)
                        prefetchw(&bvec->bv_page->flags);
@@ -2418,10 +2414,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
                if (end_extent_writepage(page, err, start, end))
                        continue;
 
-               if (whole_page)
-                       end_page_writeback(page);
-               else
-                       check_page_writeback(tree, page);
+               end_page_writeback(page);
        } while (bvec >= bio->bi_io_vec);
 
        bio_put(bio);
@@ -2446,7 +2439,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
        struct extent_io_tree *tree;
        u64 start;
        u64 end;
-       int whole_page;
        int mirror;
        int ret;
 
@@ -2457,19 +2449,26 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                struct page *page = bvec->bv_page;
                struct extent_state *cached = NULL;
                struct extent_state *state;
+               struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
 
                pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
-                        "mirror=%ld\n", (u64)bio->bi_sector, err,
-                        (long int)bio->bi_bdev);
+                        "mirror=%lu\n", (u64)bio->bi_sector, err,
+                        io_bio->mirror_num);
                tree = &BTRFS_I(page->mapping->host)->io_tree;
 
-               start = page_offset(page) + bvec->bv_offset;
-               end = start + bvec->bv_len - 1;
+               /* We always issue full-page reads, but if some block
+                * in a page fails to read, blk_update_request() will
+                * advance bv_offset and adjust bv_len to compensate.
+                * Print a warning for nonzero offsets, and an error
+                * if they don't add up to a full page.  */
+               if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
+                       printk("%s page read in btrfs with offset %u and length %u\n",
+                              bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
+                              ? KERN_ERR "partial" : KERN_INFO "incomplete",
+                              bvec->bv_offset, bvec->bv_len);
 
-               if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
-                       whole_page = 1;
-               else
-                       whole_page = 0;
+               start = page_offset(page);
+               end = start + bvec->bv_offset + bvec->bv_len - 1;
 
                if (++bvec <= bvec_end)
                        prefetchw(&bvec->bv_page->flags);
@@ -2485,7 +2484,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                }
                spin_unlock(&tree->lock);
 
-               mirror = (int)(unsigned long)bio->bi_bdev;
+               mirror = io_bio->mirror_num;
                if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
                        ret = tree->ops->readpage_end_io_hook(page, start, end,
                                                              state, mirror);
@@ -2528,39 +2527,35 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
                }
                unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
 
-               if (whole_page) {
-                       if (uptodate) {
-                               SetPageUptodate(page);
-                       } else {
-                               ClearPageUptodate(page);
-                               SetPageError(page);
-                       }
-                       unlock_page(page);
+               if (uptodate) {
+                       SetPageUptodate(page);
                } else {
-                       if (uptodate) {
-                               check_page_uptodate(tree, page);
-                       } else {
-                               ClearPageUptodate(page);
-                               SetPageError(page);
-                       }
-                       check_page_locked(tree, page);
+                       ClearPageUptodate(page);
+                       SetPageError(page);
                }
+               unlock_page(page);
        } while (bvec <= bvec_end);
 
        bio_put(bio);
 }
 
+/*
+ * this allocates from the btrfs_bioset.  We're returning a bio right now
+ * but you can call btrfs_io_bio for the appropriate container_of magic
+ */
 struct bio *
 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
                gfp_t gfp_flags)
 {
        struct bio *bio;
 
-       bio = bio_alloc(gfp_flags, nr_vecs);
+       bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
 
        if (bio == NULL && (current->flags & PF_MEMALLOC)) {
-               while (!bio && (nr_vecs /= 2))
-                       bio = bio_alloc(gfp_flags, nr_vecs);
+               while (!bio && (nr_vecs /= 2)) {
+                       bio = bio_alloc_bioset(gfp_flags,
+                                              nr_vecs, btrfs_bioset);
+               }
        }
 
        if (bio) {
@@ -2571,6 +2566,19 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
        return bio;
 }
 
+struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
+{
+       return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
+}
+
+
+/* this also allocates from the btrfs_bioset */
+struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+{
+       return bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
+}
+
+
 static int __must_check submit_one_bio(int rw, struct bio *bio,
                                       int mirror_num, unsigned long bio_flags)
 {
@@ -3988,7 +3996,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                last_for_get_extent = isize;
        }
 
-       lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
+       lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
                         &cached_state);
 
        em = get_extent_skip_holes(inode, start, last_for_get_extent,
@@ -4075,7 +4083,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 out_free:
        free_extent_map(em);
 out:
-       unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
+       unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
                             &cached_state, GFP_NOFS);
        return ret;
 }
index a2c03a17500971f131b8a43051b1269e86bfc0af..41fb81e7ec53c3fda80caf28a039af7b4c1a0682 100644 (file)
@@ -336,6 +336,8 @@ int extent_clear_unlock_delalloc(struct inode *inode,
 struct bio *
 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
                gfp_t gfp_flags);
+struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs);
+struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
 
 struct btrfs_fs_info;
 
index ecca6c7375a610dbee385a71781cece9082935e0..e53009657f0e5b91b638f3d31d5e4d2cbf919f62 100644 (file)
@@ -197,30 +197,32 @@ int create_free_space_inode(struct btrfs_root *root,
                                         block_group->key.objectid);
 }
 
-int btrfs_truncate_free_space_cache(struct btrfs_root *root,
-                                   struct btrfs_trans_handle *trans,
-                                   struct btrfs_path *path,
-                                   struct inode *inode)
+int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+                                      struct btrfs_block_rsv *rsv)
 {
-       struct btrfs_block_rsv *rsv;
        u64 needed_bytes;
-       loff_t oldsize;
-       int ret = 0;
-
-       rsv = trans->block_rsv;
-       trans->block_rsv = &root->fs_info->global_block_rsv;
+       int ret;
 
        /* 1 for slack space, 1 for updating the inode */
        needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
                btrfs_calc_trans_metadata_size(root, 1);
 
-       spin_lock(&trans->block_rsv->lock);
-       if (trans->block_rsv->reserved < needed_bytes) {
-               spin_unlock(&trans->block_rsv->lock);
-               trans->block_rsv = rsv;
-               return -ENOSPC;
-       }
-       spin_unlock(&trans->block_rsv->lock);
+       spin_lock(&rsv->lock);
+       if (rsv->reserved < needed_bytes)
+               ret = -ENOSPC;
+       else
+               ret = 0;
+       spin_unlock(&rsv->lock);
+       return 0;
+}
+
+int btrfs_truncate_free_space_cache(struct btrfs_root *root,
+                                   struct btrfs_trans_handle *trans,
+                                   struct btrfs_path *path,
+                                   struct inode *inode)
+{
+       loff_t oldsize;
+       int ret = 0;
 
        oldsize = i_size_read(inode);
        btrfs_i_size_write(inode, 0);
@@ -232,9 +234,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
         */
        ret = btrfs_truncate_inode_items(trans, root, inode,
                                         0, BTRFS_EXTENT_DATA_KEY);
-
        if (ret) {
-               trans->block_rsv = rsv;
                btrfs_abort_transaction(trans, root, ret);
                return ret;
        }
@@ -242,7 +242,6 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
        ret = btrfs_update_inode(trans, root, inode);
        if (ret)
                btrfs_abort_transaction(trans, root, ret);
-       trans->block_rsv = rsv;
 
        return ret;
 }
@@ -920,10 +919,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 
        /* Make sure we can fit our crcs into the first page */
        if (io_ctl.check_crcs &&
-           (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
-               WARN_ON(1);
+           (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
                goto out_nospc;
-       }
 
        io_ctl_set_generation(&io_ctl, trans->transid);
 
index 4dc17d8809c76d25cddb635e29bd8309b340f464..8b7f19f4496153886975547923cdcdc815940e96 100644 (file)
@@ -54,6 +54,8 @@ int create_free_space_inode(struct btrfs_root *root,
                            struct btrfs_block_group_cache *block_group,
                            struct btrfs_path *path);
 
+int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+                                      struct btrfs_block_rsv *rsv);
 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
                                    struct btrfs_trans_handle *trans,
                                    struct btrfs_path *path,
index d26f67a59e36f9caf3e91832fa1d4c943c8c9040..2c66ddbbe670e0ab80753dc41eb4054aacd00388 100644 (file)
@@ -429,11 +429,12 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
        num_bytes = trans->bytes_reserved;
        /*
         * 1 item for inode item insertion if need
-        * 3 items for inode item update (in the worst case)
+        * 4 items for inode item update (in the worst case)
+        * 1 items for slack space if we need do truncation
         * 1 item for free space object
         * 3 items for pre-allocation
         */
-       trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
+       trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 10);
        ret = btrfs_block_rsv_add(root, trans->block_rsv,
                                  trans->bytes_reserved,
                                  BTRFS_RESERVE_NO_FLUSH);
@@ -468,7 +469,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
        if (i_size_read(inode) > 0) {
                ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
                if (ret) {
-                       btrfs_abort_transaction(trans, root, ret);
+                       if (ret != -ENOSPC)
+                               btrfs_abort_transaction(trans, root, ret);
                        goto out_put;
                }
        }
index 9b31b3b091fceb6536612b30e64f2fd191d47e89..af978f7682b3441282b0f54e62a60b7534d51203 100644 (file)
@@ -715,8 +715,10 @@ static noinline int submit_compressed_extents(struct inode *inode,
                                        async_extent->ram_size - 1, 0);
 
                em = alloc_extent_map();
-               if (!em)
+               if (!em) {
+                       ret = -ENOMEM;
                        goto out_free_reserve;
+               }
                em->start = async_extent->start;
                em->len = async_extent->ram_size;
                em->orig_start = em->start;
@@ -923,8 +925,10 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
                }
 
                em = alloc_extent_map();
-               if (!em)
+               if (!em) {
+                       ret = -ENOMEM;
                        goto out_reserve;
+               }
                em->start = start;
                em->orig_start = em->start;
                ram_size = ins.offset;
@@ -4724,6 +4728,7 @@ void btrfs_evict_inode(struct inode *inode)
        btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root);
 no_delete:
+       btrfs_remove_delayed_node(inode);
        clear_inode(inode);
        return;
 }
@@ -4839,14 +4844,13 @@ static void inode_tree_add(struct inode *inode)
        struct rb_node **p;
        struct rb_node *parent;
        u64 ino = btrfs_ino(inode);
-again:
-       p = &root->inode_tree.rb_node;
-       parent = NULL;
 
        if (inode_unhashed(inode))
                return;
-
+again:
+       parent = NULL;
        spin_lock(&root->inode_lock);
+       p = &root->inode_tree.rb_node;
        while (*p) {
                parent = *p;
                entry = rb_entry(parent, struct btrfs_inode, rb_node);
@@ -6928,7 +6932,11 @@ struct btrfs_dio_private {
        /* IO errors */
        int errors;
 
+       /* orig_bio is our btrfs_io_bio */
        struct bio *orig_bio;
+
+       /* dio_bio came from fs/direct-io.c */
+       struct bio *dio_bio;
 };
 
 static void btrfs_endio_direct_read(struct bio *bio, int err)
@@ -6938,6 +6946,7 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
        struct bio_vec *bvec = bio->bi_io_vec;
        struct inode *inode = dip->inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct bio *dio_bio;
        u64 start;
 
        start = dip->logical_offset;
@@ -6977,14 +6986,15 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
 
        unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
                      dip->logical_offset + dip->bytes - 1);
-       bio->bi_private = dip->private;
+       dio_bio = dip->dio_bio;
 
        kfree(dip);
 
        /* If we had a csum failure make sure to clear the uptodate flag */
        if (err)
-               clear_bit(BIO_UPTODATE, &bio->bi_flags);
-       dio_end_io(bio, err);
+               clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
+       dio_end_io(dio_bio, err);
+       bio_put(bio);
 }
 
 static void btrfs_endio_direct_write(struct bio *bio, int err)
@@ -6995,6 +7005,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
        struct btrfs_ordered_extent *ordered = NULL;
        u64 ordered_offset = dip->logical_offset;
        u64 ordered_bytes = dip->bytes;
+       struct bio *dio_bio;
        int ret;
 
        if (err)
@@ -7022,14 +7033,15 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
                goto again;
        }
 out_done:
-       bio->bi_private = dip->private;
+       dio_bio = dip->dio_bio;
 
        kfree(dip);
 
        /* If we had an error make sure to clear the uptodate flag */
        if (err)
-               clear_bit(BIO_UPTODATE, &bio->bi_flags);
-       dio_end_io(bio, err);
+               clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
+       dio_end_io(dio_bio, err);
+       bio_put(bio);
 }
 
 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
@@ -7065,10 +7077,10 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
        if (!atomic_dec_and_test(&dip->pending_bios))
                goto out;
 
-       if (dip->errors)
+       if (dip->errors) {
                bio_io_error(dip->orig_bio);
-       else {
-               set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
+       else {
+               set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags);
                bio_endio(dip->orig_bio, 0);
        }
 out:
@@ -7243,25 +7255,34 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        return 0;
 }
 
-static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
-                               loff_t file_offset)
+static void btrfs_submit_direct(int rw, struct bio *dio_bio,
+                               struct inode *inode, loff_t file_offset)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_dio_private *dip;
-       struct bio_vec *bvec = bio->bi_io_vec;
+       struct bio_vec *bvec = dio_bio->bi_io_vec;
+       struct bio *io_bio;
        int skip_sum;
        int write = rw & REQ_WRITE;
        int ret = 0;
 
        skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
+       io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
+
+       if (!io_bio) {
+               ret = -ENOMEM;
+               goto free_ordered;
+       }
+
        dip = kmalloc(sizeof(*dip), GFP_NOFS);
        if (!dip) {
                ret = -ENOMEM;
-               goto free_ordered;
+               goto free_io_bio;
        }
 
-       dip->private = bio->bi_private;
+       dip->private = dio_bio->bi_private;
+       io_bio->bi_private = dio_bio->bi_private;
        dip->inode = inode;
        dip->logical_offset = file_offset;
 
@@ -7269,22 +7290,27 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
        do {
                dip->bytes += bvec->bv_len;
                bvec++;
-       } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
+       } while (bvec <= (dio_bio->bi_io_vec + dio_bio->bi_vcnt - 1));
 
-       dip->disk_bytenr = (u64)bio->bi_sector << 9;
-       bio->bi_private = dip;
+       dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
+       io_bio->bi_private = dip;
        dip->errors = 0;
-       dip->orig_bio = bio;
+       dip->orig_bio = io_bio;
+       dip->dio_bio = dio_bio;
        atomic_set(&dip->pending_bios, 0);
 
        if (write)
-               bio->bi_end_io = btrfs_endio_direct_write;
+               io_bio->bi_end_io = btrfs_endio_direct_write;
        else
-               bio->bi_end_io = btrfs_endio_direct_read;
+               io_bio->bi_end_io = btrfs_endio_direct_read;
 
        ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
        if (!ret)
                return;
+
+free_io_bio:
+       bio_put(io_bio);
+
 free_ordered:
        /*
         * If this is a write, we need to clean up the reserved space and kill
@@ -7300,7 +7326,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
                btrfs_put_ordered_extent(ordered);
                btrfs_put_ordered_extent(ordered);
        }
-       bio_endio(bio, ret);
+       bio_endio(dio_bio, ret);
 }
 
 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
@@ -7979,7 +8005,6 @@ void btrfs_destroy_inode(struct inode *inode)
        inode_tree_del(inode);
        btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
 free:
-       btrfs_remove_delayed_node(inode);
        call_rcu(&inode->i_rcu, btrfs_i_callback);
 }
 
index 0de4a2fcfb24f1cdd2e8ae9574405bd4ce247526..0f81d67cdc8da651890df1a89c01df9188667d97 100644 (file)
@@ -1801,7 +1801,11 @@ static noinline int copy_to_sk(struct btrfs_root *root,
                item_off = btrfs_item_ptr_offset(leaf, i);
                item_len = btrfs_item_size_nr(leaf, i);
 
-               if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
+               btrfs_item_key_to_cpu(leaf, key, i);
+               if (!key_in_sk(key, sk))
+                       continue;
+
+               if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
                        item_len = 0;
 
                if (sizeof(sh) + item_len + *sk_offset >
@@ -1810,10 +1814,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
                        goto overflow;
                }
 
-               btrfs_item_key_to_cpu(leaf, key, i);
-               if (!key_in_sk(key, sk))
-                       continue;
-
                sh.objectid = key->objectid;
                sh.offset = key->offset;
                sh.type = key->type;
index 0740621daf6ca370498e78688922347c54e172df..0525e1389f5b16658ccea028da6408da812c974b 100644 (file)
@@ -1050,7 +1050,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
        }
 
        /* put a new bio on the list */
-       bio = bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
+       bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
        if (!bio)
                return -ENOMEM;
 
index 704a1b8d2a2bae870fcf678d27aa4601f6745011..395b82031a4286f2a8a1481bc87a684172468b96 100644 (file)
@@ -1773,7 +1773,7 @@ int replace_path(struct btrfs_trans_handle *trans,
                        if (!eb || !extent_buffer_uptodate(eb)) {
                                ret = (!eb) ? -ENOMEM : -EIO;
                                free_extent_buffer(eb);
-                               return ret;
+                               break;
                        }
                        btrfs_tree_lock(eb);
                        if (cow) {
@@ -3350,6 +3350,11 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
        }
 
 truncate:
+       ret = btrfs_check_trunc_cache_free_space(root,
+                                                &fs_info->global_block_rsv);
+       if (ret)
+               goto out;
+
        path = btrfs_alloc_path();
        if (!path) {
                ret = -ENOMEM;
index f489e24659a434fa2b67984a5a8a7cf9df2b06b6..79bd479317cb53cbea30a7021d81a891ed9ae20c 100644 (file)
@@ -1296,7 +1296,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
                }
 
                WARN_ON(!page->page);
-               bio = bio_alloc(GFP_NOFS, 1);
+               bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
                if (!bio) {
                        page->io_error = 1;
                        sblock->no_io_error_seen = 0;
@@ -1431,7 +1431,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                        return -EIO;
                }
 
-               bio = bio_alloc(GFP_NOFS, 1);
+               bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
                if (!bio)
                        return -EIO;
                bio->bi_bdev = page_bad->dev->bdev;
@@ -1522,7 +1522,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
                sbio->dev = wr_ctx->tgtdev;
                bio = sbio->bio;
                if (!bio) {
-                       bio = bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
+                       bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
                        if (!bio) {
                                mutex_unlock(&wr_ctx->wr_lock);
                                return -ENOMEM;
@@ -1930,7 +1930,7 @@ static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
                sbio->dev = spage->dev;
                bio = sbio->bio;
                if (!bio) {
-                       bio = bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
+                       bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
                        if (!bio)
                                return -ENOMEM;
                        sbio->bio = bio;
@@ -3307,7 +3307,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
                        "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
                return -EIO;
        }
-       bio = bio_alloc(GFP_NOFS, 1);
+       bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
        if (!bio) {
                spin_lock(&sctx->stat_lock);
                sctx->stat.malloc_errors++;
index a4807ced23cc5791d500d28ee9f91fdc915f5df0..f0857e092a3cb1af485604850052440579a1bbf1 100644 (file)
@@ -1263,6 +1263,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
 
                btrfs_dev_replace_suspend_for_unmount(fs_info);
                btrfs_scrub_cancel(fs_info);
+               btrfs_pause_balance(fs_info);
 
                ret = btrfs_commit_super(root);
                if (ret)
index 0e925ced971ba87bc0e356c6a2c7e3dbf1b5e91e..8bffb9174afba04d8375b96f754256b68ff9b4ef 100644 (file)
@@ -3120,14 +3120,13 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
        allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
        if (num_devices == 1)
                allowed |= BTRFS_BLOCK_GROUP_DUP;
-       else if (num_devices < 4)
+       else if (num_devices > 1)
                allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
-       else
-               allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
-                               BTRFS_BLOCK_GROUP_RAID10 |
-                               BTRFS_BLOCK_GROUP_RAID5 |
-                               BTRFS_BLOCK_GROUP_RAID6);
-
+       if (num_devices > 2)
+               allowed |= BTRFS_BLOCK_GROUP_RAID5;
+       if (num_devices > 3)
+               allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
+                           BTRFS_BLOCK_GROUP_RAID6);
        if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
            (!alloc_profile_is_valid(bctl->data.target, 1) ||
             (bctl->data.target & ~allowed))) {
@@ -5019,42 +5018,16 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        return 0;
 }
 
-static void *merge_stripe_index_into_bio_private(void *bi_private,
-                                                unsigned int stripe_index)
-{
-       /*
-        * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
-        * at most 1.
-        * The alternative solution (instead of stealing bits from the
-        * pointer) would be to allocate an intermediate structure
-        * that contains the old private pointer plus the stripe_index.
-        */
-       BUG_ON((((uintptr_t)bi_private) & 3) != 0);
-       BUG_ON(stripe_index > 3);
-       return (void *)(((uintptr_t)bi_private) | stripe_index);
-}
-
-static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
-{
-       return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
-}
-
-static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
-{
-       return (unsigned int)((uintptr_t)bi_private) & 3;
-}
-
 static void btrfs_end_bio(struct bio *bio, int err)
 {
-       struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
+       struct btrfs_bio *bbio = bio->bi_private;
        int is_orig_bio = 0;
 
        if (err) {
                atomic_inc(&bbio->error);
                if (err == -EIO || err == -EREMOTEIO) {
                        unsigned int stripe_index =
-                               extract_stripe_index_from_bio_private(
-                                       bio->bi_private);
+                               btrfs_io_bio(bio)->stripe_index;
                        struct btrfs_device *dev;
 
                        BUG_ON(stripe_index >= bbio->num_stripes);
@@ -5084,8 +5057,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
                }
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
-               bio->bi_bdev = (struct block_device *)
-                                       (unsigned long)bbio->mirror_num;
+               btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
                /* only send an error to the higher layers if it is
                 * beyond the tolerance of the btrfs bio
                 */
@@ -5211,8 +5183,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
        struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
 
        bio->bi_private = bbio;
-       bio->bi_private = merge_stripe_index_into_bio_private(
-                       bio->bi_private, (unsigned int)dev_nr);
+       btrfs_io_bio(bio)->stripe_index = dev_nr;
        bio->bi_end_io = btrfs_end_bio;
        bio->bi_sector = physical >> 9;
 #ifdef DEBUG
@@ -5273,8 +5244,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
        if (atomic_dec_and_test(&bbio->stripes_pending)) {
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
-               bio->bi_bdev = (struct block_device *)
-                       (unsigned long)bbio->mirror_num;
+               btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
                bio->bi_sector = logical >> 9;
                kfree(bbio);
                bio_endio(bio, -EIO);
@@ -5352,7 +5322,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                }
 
                if (dev_nr < total_devs - 1) {
-                       bio = bio_clone(first_bio, GFP_NOFS);
+                       bio = btrfs_bio_clone(first_bio, GFP_NOFS);
                        BUG_ON(!bio); /* -ENOMEM */
                } else {
                        bio = first_bio;
index 845ccbb0d2e35e9ad5b92101102b7e853d800ab0..f6247e2a47f7b643d8c88f2ee4c5b53f5c79f043 100644 (file)
@@ -152,6 +152,26 @@ struct btrfs_fs_devices {
        int rotating;
 };
 
+/*
+ * we need the mirror number and stripe index to be passed around
+ * the call chain while we are processing end_io (especially errors).
+ * Really, what we need is a btrfs_bio structure that has this info
+ * and is properly sized with its stripe array, but we're not there
+ * quite yet.  We have our own btrfs bioset, and all of the bios
+ * we allocate are actually btrfs_io_bios.  We'll cram as much of
+ * struct btrfs_bio as we can into this over time.
+ */
+struct btrfs_io_bio {
+       unsigned long mirror_num;
+       unsigned long stripe_index;
+       struct bio bio;
+};
+
+static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio)
+{
+       return container_of(bio, struct btrfs_io_bio, bio);
+}
+
 struct btrfs_bio_stripe {
        struct btrfs_device *dev;
        u64 physical;
index 8e33ec65847b1989749a4b1b72e239fa0c64d94a..58df174deb10e5b354a9003ea3ccbfc5240e5cd0 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/vfs.h>
 #include <linux/fs.h>
+#include <linux/inet.h>
 #include "cifsglob.h"
 #include "cifsproto.h"
 #include "cifsfs.h"
@@ -48,58 +49,74 @@ void cifs_dfs_release_automount_timer(void)
 }
 
 /**
- * cifs_get_share_name -       extracts share name from UNC
- * @node_name: pointer to UNC string
+ * cifs_build_devname - build a devicename from a UNC and optional prepath
+ * @nodename:  pointer to UNC string
+ * @prepath:   pointer to prefixpath (or NULL if there isn't one)
  *
- * Extracts sharename form full UNC.
- * i.e. strips from UNC trailing path that is not part of share
- * name and fixup missing '\' in the beginning of DFS node refferal
- * if necessary.
- * Returns pointer to share name on success or ERR_PTR on error.
- * Caller is responsible for freeing returned string.
+ * Build a new cifs devicename after chasing a DFS referral. Allocate a buffer
+ * big enough to hold the final thing. Copy the UNC from the nodename, and
+ * concatenate the prepath onto the end of it if there is one.
+ *
+ * Returns pointer to the built string, or a ERR_PTR. Caller is responsible
+ * for freeing the returned string.
  */
-static char *cifs_get_share_name(const char *node_name)
+static char *
+cifs_build_devname(char *nodename, const char *prepath)
 {
-       int len;
-       char *UNC;
-       char *pSep;
-
-       len = strlen(node_name);
-       UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */,
-                        GFP_KERNEL);
-       if (!UNC)
-               return ERR_PTR(-ENOMEM);
+       size_t pplen;
+       size_t unclen;
+       char *dev;
+       char *pos;
+
+       /* skip over any preceding delimiters */
+       nodename += strspn(nodename, "\\");
+       if (!*nodename)
+               return ERR_PTR(-EINVAL);
 
-       /* get share name and server name */
-       if (node_name[1] != '\\') {
-               UNC[0] = '\\';
-               strncpy(UNC+1, node_name, len);
-               len++;
-               UNC[len] = 0;
-       } else {
-               strncpy(UNC, node_name, len);
-               UNC[len] = 0;
-       }
+       /* get length of UNC and set pos to last char */
+       unclen = strlen(nodename);
+       pos = nodename + unclen - 1;
 
-       /* find server name end */
-       pSep = memchr(UNC+2, '\\', len-2);
-       if (!pSep) {
-               cifs_dbg(VFS, "%s: no server name end in node name: %s\n",
-                        __func__, node_name);
-               kfree(UNC);
-               return ERR_PTR(-EINVAL);
+       /* trim off any trailing delimiters */
+       while (*pos == '\\') {
+               --pos;
+               --unclen;
        }
 
-       /* find sharename end */
-       pSep++;
-       pSep = memchr(UNC+(pSep-UNC), '\\', len-(pSep-UNC));
-       if (pSep) {
-               /* trim path up to sharename end
-                * now we have share name in UNC */
-               *pSep = 0;
+       /* allocate a buffer:
+        * +2 for preceding "//"
+        * +1 for delimiter between UNC and prepath
+        * +1 for trailing NULL
+        */
+       pplen = prepath ? strlen(prepath) : 0;
+       dev = kmalloc(2 + unclen + 1 + pplen + 1, GFP_KERNEL);
+       if (!dev)
+               return ERR_PTR(-ENOMEM);
+
+       pos = dev;
+       /* add the initial "//" */
+       *pos = '/';
+       ++pos;
+       *pos = '/';
+       ++pos;
+
+       /* copy in the UNC portion from referral */
+       memcpy(pos, nodename, unclen);
+       pos += unclen;
+
+       /* copy the prefixpath remainder (if there is one) */
+       if (pplen) {
+               *pos = '/';
+               ++pos;
+               memcpy(pos, prepath, pplen);
+               pos += pplen;
        }
 
-       return UNC;
+       /* NULL terminator */
+       *pos = '\0';
+
+       convert_delimiter(dev, '/');
+       return dev;
 }
 
 
@@ -123,6 +140,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
 {
        int rc;
        char *mountdata = NULL;
+       const char *prepath = NULL;
        int md_len;
        char *tkn_e;
        char *srvIP = NULL;
@@ -132,7 +150,10 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
        if (sb_mountdata == NULL)
                return ERR_PTR(-EINVAL);
 
-       *devname = cifs_get_share_name(ref->node_name);
+       if (strlen(fullpath) - ref->path_consumed)
+               prepath = fullpath + ref->path_consumed;
+
+       *devname = cifs_build_devname(ref->node_name, prepath);
        if (IS_ERR(*devname)) {
                rc = PTR_ERR(*devname);
                *devname = NULL;
@@ -146,12 +167,14 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
                goto compose_mount_options_err;
        }
 
-       /* md_len = strlen(...) + 12 for 'sep+prefixpath='
-        * assuming that we have 'unc=' and 'ip=' in
-        * the original sb_mountdata
+       /*
+        * In most cases, we'll be building a shorter string than the original,
+        * but we do have to assume that the address in the ip= option may be
+        * much longer than the original. Add the max length of an address
+        * string to the length of the original string to allow for worst case.
         */
-       md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12;
-       mountdata = kzalloc(md_len+1, GFP_KERNEL);
+       md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN;
+       mountdata = kzalloc(md_len + 1, GFP_KERNEL);
        if (mountdata == NULL) {
                rc = -ENOMEM;
                goto compose_mount_options_err;
@@ -195,26 +218,6 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
                strncat(mountdata, &sep, 1);
        strcat(mountdata, "ip=");
        strcat(mountdata, srvIP);
-       strncat(mountdata, &sep, 1);
-       strcat(mountdata, "unc=");
-       strcat(mountdata, *devname);
-
-       /* find & copy prefixpath */
-       tkn_e = strchr(ref->node_name + 2, '\\');
-       if (tkn_e == NULL) {
-               /* invalid unc, missing share name*/
-               rc = -EINVAL;
-               goto compose_mount_options_err;
-       }
-
-       tkn_e = strchr(tkn_e + 1, '\\');
-       if (tkn_e || (strlen(fullpath) - ref->path_consumed)) {
-               strncat(mountdata, &sep, 1);
-               strcat(mountdata, "prefixpath=");
-               if (tkn_e)
-                       strcat(mountdata, tkn_e + 1);
-               strcat(mountdata, fullpath + ref->path_consumed);
-       }
 
        /*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
        /*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
index 72e4efee138986e65f376496eca41016b55570b2..3752b9f6d9e46e90876b18f4d14527f9262baff6 100644 (file)
@@ -372,9 +372,6 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
        cifs_show_security(s, tcon->ses->server);
        cifs_show_cache_flavor(s, cifs_sb);
 
-       seq_printf(s, ",unc=");
-       seq_escape(s, tcon->treeName, " \t\n\\");
-
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
                seq_printf(s, ",multiuser");
        else if (tcon->ses->user_name)
index 99eeaa17ee006956d37350cad00bfd854c8b5306..5b97e56ddbca8fd8b4a4552c95acd97d5ca6c318 100644 (file)
@@ -1061,6 +1061,7 @@ static int cifs_parse_security_flavors(char *value,
 #endif
        case Opt_sec_none:
                vol->nullauth = 1;
+               vol->secFlg |= CIFSSEC_MAY_NTLM;
                break;
        default:
                cifs_dbg(VFS, "bad security option: %s\n", value);
@@ -1257,14 +1258,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
        vol->backupuid_specified = false; /* no backup intent for a user */
        vol->backupgid_specified = false; /* no backup intent for a group */
 
-       /*
-        * For now, we ignore -EINVAL errors under the assumption that the
-        * unc= and prefixpath= options will be usable.
-        */
-       if (cifs_parse_devname(devname, vol) == -ENOMEM) {
-               printk(KERN_ERR "CIFS: Unable to allocate memory to parse "
-                               "device string.\n");
-               goto out_nomem;
+       switch (cifs_parse_devname(devname, vol)) {
+       case 0:
+               break;
+       case -ENOMEM:
+               cifs_dbg(VFS, "Unable to allocate memory for devname.\n");
+               goto cifs_parse_mount_err;
+       case -EINVAL:
+               cifs_dbg(VFS, "Malformed UNC in devname.\n");
+               goto cifs_parse_mount_err;
+       default:
+               cifs_dbg(VFS, "Unknown error parsing devname.\n");
+               goto cifs_parse_mount_err;
        }
 
        while ((data = strsep(&options, separator)) != NULL) {
@@ -1826,7 +1831,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
        }
 #endif
        if (!vol->UNC) {
-               cifs_dbg(VFS, "CIFS mount error: No usable UNC path provided in device string or in unc= option!\n");
+               cifs_dbg(VFS, "CIFS mount error: No usable UNC path provided in device string!\n");
                goto cifs_parse_mount_err;
        }
 
index e7512e497611fdcbed49d2be533ae4e97f7250fe..7ede7306599f47449bdd02533db47bfede2c81df 100644 (file)
@@ -34,7 +34,7 @@
 
 /**
  * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address.
- * @unc: UNC path specifying the server
+ * @unc: UNC path specifying the server (with '/' as delimiter)
  * @ip_addr: Where to return the IP address.
  *
  * The IP address will be returned in string form, and the caller is
@@ -64,7 +64,7 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
        hostname = unc + 2;
 
        /* Search for server name delimiter */
-       sep = memchr(hostname, '\\', len);
+       sep = memchr(hostname, '/', len);
        if (sep)
                len = sep - hostname;
        else
index fc3025199cb336f6c07c8172d19e501601a3f1f7..20efd81266c643338bcd81b434edfe9fbcacc0ac 100644 (file)
@@ -171,7 +171,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
 
        if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
                inode->i_flags |= S_AUTOMOUNT;
-       cifs_set_ops(inode);
+       if (inode->i_state & I_NEW)
+               cifs_set_ops(inode);
 }
 
 void
index d5c25db4398f0244f0f1d0c1f1e20a55f8f51aa9..f71ec125290db7da87355f444f7308826ee1c034 100644 (file)
@@ -243,7 +243,7 @@ void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
        struct ecryptfs_key_sig *key_sig, *key_sig_tmp;
 
        if (crypt_stat->tfm)
-               crypto_free_blkcipher(crypt_stat->tfm);
+               crypto_free_ablkcipher(crypt_stat->tfm);
        if (crypt_stat->hash_tfm)
                crypto_free_hash(crypt_stat->hash_tfm);
        list_for_each_entry_safe(key_sig, key_sig_tmp,
@@ -319,6 +319,22 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
        return i;
 }
 
+struct extent_crypt_result {
+       struct completion completion;
+       int rc;
+};
+
+static void extent_crypt_complete(struct crypto_async_request *req, int rc)
+{
+       struct extent_crypt_result *ecr = req->data;
+
+       if (rc == -EINPROGRESS)
+               return;
+
+       ecr->rc = rc;
+       complete(&ecr->completion);
+}
+
 /**
  * encrypt_scatterlist
  * @crypt_stat: Pointer to the crypt_stat struct to initialize.
@@ -334,11 +350,8 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                               struct scatterlist *src_sg, int size,
                               unsigned char *iv)
 {
-       struct blkcipher_desc desc = {
-               .tfm = crypt_stat->tfm,
-               .info = iv,
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
-       };
+       struct ablkcipher_request *req = NULL;
+       struct extent_crypt_result ecr;
        int rc = 0;
 
        BUG_ON(!crypt_stat || !crypt_stat->tfm
@@ -349,24 +362,47 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                ecryptfs_dump_hex(crypt_stat->key,
                                  crypt_stat->key_size);
        }
-       /* Consider doing this once, when the file is opened */
+
+       init_completion(&ecr.completion);
+
        mutex_lock(&crypt_stat->cs_tfm_mutex);
-       if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
-               rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
-                                            crypt_stat->key_size);
-               crypt_stat->flags |= ECRYPTFS_KEY_SET;
-       }
-       if (rc) {
-               ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
-                               rc);
+       req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
+       if (!req) {
                mutex_unlock(&crypt_stat->cs_tfm_mutex);
-               rc = -EINVAL;
+               rc = -ENOMEM;
                goto out;
        }
-       ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size);
-       crypto_blkcipher_encrypt_iv(&desc, dest_sg, src_sg, size);
+
+       ablkcipher_request_set_callback(req,
+                       CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+                       extent_crypt_complete, &ecr);
+       /* Consider doing this once, when the file is opened */
+       if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
+               rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
+                                             crypt_stat->key_size);
+               if (rc) {
+                       ecryptfs_printk(KERN_ERR,
+                                       "Error setting key; rc = [%d]\n",
+                                       rc);
+                       mutex_unlock(&crypt_stat->cs_tfm_mutex);
+                       rc = -EINVAL;
+                       goto out;
+               }
+               crypt_stat->flags |= ECRYPTFS_KEY_SET;
+       }
        mutex_unlock(&crypt_stat->cs_tfm_mutex);
+       ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size);
+       ablkcipher_request_set_crypt(req, src_sg, dest_sg, size, iv);
+       rc = crypto_ablkcipher_encrypt(req);
+       if (rc == -EINPROGRESS || rc == -EBUSY) {
+               struct extent_crypt_result *ecr = req->base.data;
+
+               wait_for_completion(&ecr->completion);
+               rc = ecr->rc;
+               INIT_COMPLETION(ecr->completion);
+       }
 out:
+       ablkcipher_request_free(req);
        return rc;
 }
 
@@ -624,35 +660,61 @@ static int decrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                               struct scatterlist *src_sg, int size,
                               unsigned char *iv)
 {
-       struct blkcipher_desc desc = {
-               .tfm = crypt_stat->tfm,
-               .info = iv,
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
-       };
+       struct ablkcipher_request *req = NULL;
+       struct extent_crypt_result ecr;
        int rc = 0;
 
-       /* Consider doing this once, when the file is opened */
+       BUG_ON(!crypt_stat || !crypt_stat->tfm
+              || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
+       if (unlikely(ecryptfs_verbosity > 0)) {
+               ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
+                               crypt_stat->key_size);
+               ecryptfs_dump_hex(crypt_stat->key,
+                                 crypt_stat->key_size);
+       }
+
+       init_completion(&ecr.completion);
+
        mutex_lock(&crypt_stat->cs_tfm_mutex);
-       rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
-                                    crypt_stat->key_size);
-       if (rc) {
-               ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
-                               rc);
+       req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
+       if (!req) {
                mutex_unlock(&crypt_stat->cs_tfm_mutex);
-               rc = -EINVAL;
+               rc = -ENOMEM;
                goto out;
        }
-       ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size);
-       rc = crypto_blkcipher_decrypt_iv(&desc, dest_sg, src_sg, size);
+
+       ablkcipher_request_set_callback(req,
+                       CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+                       extent_crypt_complete, &ecr);
+       /* Consider doing this once, when the file is opened */
+       if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
+               rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
+                                             crypt_stat->key_size);
+               if (rc) {
+                       ecryptfs_printk(KERN_ERR,
+                                       "Error setting key; rc = [%d]\n",
+                                       rc);
+                       mutex_unlock(&crypt_stat->cs_tfm_mutex);
+                       rc = -EINVAL;
+                       goto out;
+               }
+               crypt_stat->flags |= ECRYPTFS_KEY_SET;
+       }
        mutex_unlock(&crypt_stat->cs_tfm_mutex);
-       if (rc) {
-               ecryptfs_printk(KERN_ERR, "Error decrypting; rc = [%d]\n",
-                               rc);
-               goto out;
+       ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size);
+       ablkcipher_request_set_crypt(req, src_sg, dest_sg, size, iv);
+       rc = crypto_ablkcipher_decrypt(req);
+       if (rc == -EINPROGRESS || rc == -EBUSY) {
+               struct extent_crypt_result *ecr = req->base.data;
+
+               wait_for_completion(&ecr->completion);
+               rc = ecr->rc;
+               INIT_COMPLETION(ecr->completion);
        }
-       rc = size;
 out:
+       ablkcipher_request_free(req);
        return rc;
+
 }
 
 /**
@@ -746,8 +808,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
                                                    crypt_stat->cipher, "cbc");
        if (rc)
                goto out_unlock;
-       crypt_stat->tfm = crypto_alloc_blkcipher(full_alg_name, 0,
-                                                CRYPTO_ALG_ASYNC);
+       crypt_stat->tfm = crypto_alloc_ablkcipher(full_alg_name, 0, 0);
        kfree(full_alg_name);
        if (IS_ERR(crypt_stat->tfm)) {
                rc = PTR_ERR(crypt_stat->tfm);
@@ -757,7 +818,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
                                crypt_stat->cipher);
                goto out_unlock;
        }
-       crypto_blkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       crypto_ablkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
        rc = 0;
 out_unlock:
        mutex_unlock(&crypt_stat->cs_tfm_mutex);
index dd299b389d4e4fc36b7694dbf6acac0dcf9742f7..f622a733f7adc3ff1778e4f74790db507c61c824 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/nsproxy.h>
 #include <linux/backing-dev.h>
 #include <linux/ecryptfs.h>
+#include <linux/crypto.h>
 
 #define ECRYPTFS_DEFAULT_IV_BYTES 16
 #define ECRYPTFS_DEFAULT_EXTENT_SIZE 4096
@@ -233,7 +234,7 @@ struct ecryptfs_crypt_stat {
        size_t extent_shift;
        unsigned int extent_mask;
        struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
-       struct crypto_blkcipher *tfm;
+       struct crypto_ablkcipher *tfm;
        struct crypto_hash *hash_tfm; /* Crypto context for generating
                                       * the initialization vectors */
        unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE];
index bfb531564319608be19b63885142eb6b5c7648d9..8dd524f322847b346e119c6d5c5b3693de6fb8bc 100644 (file)
@@ -44,8 +44,11 @@ static ssize_t efivarfs_file_write(struct file *file,
 
        bytes = efivar_entry_set_get_size(var, attributes, &datasize,
                                          data, &set);
-       if (!set && bytes)
+       if (!set && bytes) {
+               if (bytes == -ENOENT)
+                       bytes = -EIO;
                goto out;
+       }
 
        if (bytes == -ENOENT) {
                drop_nlink(inode);
@@ -76,7 +79,14 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
        int err;
 
        err = efivar_entry_size(var, &datasize);
-       if (err)
+
+       /*
+        * efivarfs represents uncommitted variables with
+        * zero-length files. Reading them should return EOF.
+        */
+       if (err == -ENOENT)
+               return 0;
+       else if (err)
                return err;
 
        data = kmalloc(datasize + sizeof(attributes), GFP_KERNEL);
index 0aabb344b02e6779ed410adff60a9c0e3ab4003e..5aae3d12d4004109cff811b77011486abbaed1b5 100644 (file)
@@ -209,7 +209,6 @@ typedef struct ext4_io_end {
        ssize_t                 size;           /* size of the extent */
        struct kiocb            *iocb;          /* iocb struct for AIO */
        int                     result;         /* error value for AIO */
-       atomic_t                count;          /* reference counter */
 } ext4_io_end_t;
 
 struct ext4_io_submit {
@@ -2651,14 +2650,11 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
 
 /* page-io.c */
 extern int __init ext4_init_pageio(void);
+extern void ext4_add_complete_io(ext4_io_end_t *io_end);
 extern void ext4_exit_pageio(void);
 extern void ext4_ioend_shutdown(struct inode *);
+extern void ext4_free_io_end(ext4_io_end_t *io);
 extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
-extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end);
-extern int ext4_put_io_end(ext4_io_end_t *io_end);
-extern void ext4_put_io_end_defer(ext4_io_end_t *io_end);
-extern void ext4_io_submit_init(struct ext4_io_submit *io,
-                               struct writeback_control *wbc);
 extern void ext4_end_io_work(struct work_struct *work);
 extern void ext4_io_submit(struct ext4_io_submit *io);
 extern int ext4_bio_write_page(struct ext4_io_submit *io,
index 107936db244eddd5e7192b657ccef02841d3b617..bc0f1910b9cfa7dd5aa93b9c5d889c6fbacd037d 100644 (file)
@@ -3642,7 +3642,7 @@ int ext4_find_delalloc_range(struct inode *inode,
 {
        struct extent_status es;
 
-       ext4_es_find_delayed_extent(inode, lblk_start, &es);
+       ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
        if (es.es_len == 0)
                return 0; /* there is no delay extent in this tree */
        else if (es.es_lblk <= lblk_start &&
@@ -4608,9 +4608,10 @@ static int ext4_find_delayed_extent(struct inode *inode,
        struct extent_status es;
        ext4_lblk_t block, next_del;
 
-       ext4_es_find_delayed_extent(inode, newes->es_lblk, &es);
-
        if (newes->es_pblk == 0) {
+               ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
+                               newes->es_lblk + newes->es_len - 1, &es);
+
                /*
                 * No extent in extent-tree contains block @newes->es_pblk,
                 * then the block may stay in 1)a hole or 2)delayed-extent.
@@ -4630,7 +4631,7 @@ static int ext4_find_delayed_extent(struct inode *inode,
        }
 
        block = newes->es_lblk + newes->es_len;
-       ext4_es_find_delayed_extent(inode, block, &es);
+       ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
        if (es.es_len == 0)
                next_del = EXT_MAX_BLOCKS;
        else
index fe3337a85edeaecd6135333759173d8c6fbc0e78..e6941e622d310eb1ab47793b7c2e984609cebf0b 100644 (file)
@@ -232,14 +232,16 @@ static struct extent_status *__es_tree_search(struct rb_root *root,
 }
 
 /*
- * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk
- * if it exists, otherwise, the next extent after @es->lblk.
+ * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
+ * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
  *
  * @inode: the inode which owns delayed extents
  * @lblk: the offset where we start to search
+ * @end: the offset where we stop to search
  * @es: delayed extent that we found
  */
-void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+void ext4_es_find_delayed_extent_range(struct inode *inode,
+                                ext4_lblk_t lblk, ext4_lblk_t end,
                                 struct extent_status *es)
 {
        struct ext4_es_tree *tree = NULL;
@@ -247,7 +249,8 @@ void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
        struct rb_node *node;
 
        BUG_ON(es == NULL);
-       trace_ext4_es_find_delayed_extent_enter(inode, lblk);
+       BUG_ON(end < lblk);
+       trace_ext4_es_find_delayed_extent_range_enter(inode, lblk);
 
        read_lock(&EXT4_I(inode)->i_es_lock);
        tree = &EXT4_I(inode)->i_es_tree;
@@ -270,6 +273,10 @@ void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
        if (es1 && !ext4_es_is_delayed(es1)) {
                while ((node = rb_next(&es1->rb_node)) != NULL) {
                        es1 = rb_entry(node, struct extent_status, rb_node);
+                       if (es1->es_lblk > end) {
+                               es1 = NULL;
+                               break;
+                       }
                        if (ext4_es_is_delayed(es1))
                                break;
                }
@@ -285,7 +292,7 @@ void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
        read_unlock(&EXT4_I(inode)->i_es_lock);
 
        ext4_es_lru_add(inode);
-       trace_ext4_es_find_delayed_extent_exit(inode, es);
+       trace_ext4_es_find_delayed_extent_range_exit(inode, es);
 }
 
 static struct extent_status *
index d8e2d4dc311e62c99843fa16c7fc8d4a0798bddb..f740eb03b7079b755d3033225a08546ab450c117 100644 (file)
@@ -62,7 +62,8 @@ extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
                                 unsigned long long status);
 extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
                                 ext4_lblk_t len);
-extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
+extern void ext4_es_find_delayed_extent_range(struct inode *inode,
+                                       ext4_lblk_t lblk, ext4_lblk_t end,
                                        struct extent_status *es);
 extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
                                 struct extent_status *es);
index 4959e29573b68d0ed2fec94c28015c52cb22afb9..b1b4d51b5d86b4e54c179ddce5f5b574238b3629 100644 (file)
@@ -465,7 +465,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
                 * If there is a delay extent at this offset,
                 * it will be as a data.
                 */
-               ext4_es_find_delayed_extent(inode, last, &es);
+               ext4_es_find_delayed_extent_range(inode, last, last, &es);
                if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
                        if (last != start)
                                dataoff = last << blkbits;
@@ -548,7 +548,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
                 * If there is a delay extent at this offset,
                 * we will skip this extent.
                 */
-               ext4_es_find_delayed_extent(inode, last, &es);
+               ext4_es_find_delayed_extent_range(inode, last, last, &es);
                if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
                        last = es.es_lblk + es.es_len;
                        holeoff = last << blkbits;
index 0723774bdfb5cd3ef27600eaeac7062c330414ac..d6382b89ecbde3077720ebc6a9bb56254883e2d7 100644 (file)
@@ -1488,10 +1488,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
        struct ext4_io_submit io_submit;
 
        BUG_ON(mpd->next_page <= mpd->first_page);
-       ext4_io_submit_init(&io_submit, mpd->wbc);
-       io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
-       if (!io_submit.io_end)
-               return -ENOMEM;
+       memset(&io_submit, 0, sizeof(io_submit));
        /*
         * We need to start from the first_page to the next_page - 1
         * to make sure we also write the mapped dirty buffer_heads.
@@ -1579,8 +1576,6 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
                pagevec_release(&pvec);
        }
        ext4_io_submit(&io_submit);
-       /* Drop io_end reference we got from init */
-       ext4_put_io_end_defer(io_submit.io_end);
        return ret;
 }
 
@@ -2239,16 +2234,9 @@ static int ext4_writepage(struct page *page,
                 */
                return __ext4_journalled_writepage(page, len);
 
-       ext4_io_submit_init(&io_submit, wbc);
-       io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
-       if (!io_submit.io_end) {
-               redirty_page_for_writepage(wbc, page);
-               return -ENOMEM;
-       }
+       memset(&io_submit, 0, sizeof(io_submit));
        ret = ext4_bio_write_page(&io_submit, page, len, wbc);
        ext4_io_submit(&io_submit);
-       /* Drop io_end reference we got from init */
-       ext4_put_io_end_defer(io_submit.io_end);
        return ret;
 }
 
@@ -3079,13 +3067,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
        struct inode *inode = file_inode(iocb->ki_filp);
         ext4_io_end_t *io_end = iocb->private;
 
-       /* if not async direct IO just return */
-       if (!io_end) {
-               inode_dio_done(inode);
-               if (is_async)
-                       aio_complete(iocb, ret, 0);
-               return;
-       }
+       /* if not async direct IO or dio with 0 bytes write, just return */
+       if (!io_end || !size)
+               goto out;
 
        ext_debug("ext4_end_io_dio(): io_end 0x%p "
                  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
@@ -3093,13 +3077,25 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                  size);
 
        iocb->private = NULL;
+
+       /* if not aio dio with unwritten extents, just free io and return */
+       if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+               ext4_free_io_end(io_end);
+out:
+               inode_dio_done(inode);
+               if (is_async)
+                       aio_complete(iocb, ret, 0);
+               return;
+       }
+
        io_end->offset = offset;
        io_end->size = size;
        if (is_async) {
                io_end->iocb = iocb;
                io_end->result = ret;
        }
-       ext4_put_io_end_defer(io_end);
+
+       ext4_add_complete_io(io_end);
 }
 
 /*
@@ -3133,7 +3129,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
        get_block_t *get_block_func = NULL;
        int dio_flags = 0;
        loff_t final_size = offset + count;
-       ext4_io_end_t *io_end = NULL;
 
        /* Use the old path for reads and writes beyond i_size. */
        if (rw != WRITE || final_size > inode->i_size)
@@ -3172,16 +3167,13 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
        iocb->private = NULL;
        ext4_inode_aio_set(inode, NULL);
        if (!is_sync_kiocb(iocb)) {
-               io_end = ext4_init_io_end(inode, GFP_NOFS);
+               ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
                if (!io_end) {
                        ret = -ENOMEM;
                        goto retake_lock;
                }
                io_end->flag |= EXT4_IO_END_DIRECT;
-               /*
-                * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
-                */
-               iocb->private = ext4_get_io_end(io_end);
+               iocb->private = io_end;
                /*
                 * we save the io structure for current async direct
                 * IO, so that later ext4_map_blocks() could flag the
@@ -3205,27 +3197,26 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                                   NULL,
                                   dio_flags);
 
+       if (iocb->private)
+               ext4_inode_aio_set(inode, NULL);
        /*
-        * Put our reference to io_end. This can free the io_end structure e.g.
-        * in sync IO case or in case of error. It can even perform extent
-        * conversion if all bios we submitted finished before we got here.
-        * Note that in that case iocb->private can be already set to NULL
-        * here.
+        * The io_end structure takes a reference to the inode, that
+        * structure needs to be destroyed and the reference to the
+        * inode need to be dropped, when IO is complete, even with 0
+        * byte write, or failed.
+        *
+        * In the successful AIO DIO case, the io_end structure will
+        * be destroyed and the reference to the inode will be dropped
+        * after the end_io call back function is called.
+        *
+        * In the case there is 0 byte write, or error case, since VFS
+        * direct IO won't invoke the end_io call back function, we
+        * need to free the end_io structure here.
         */
-       if (io_end) {
-               ext4_inode_aio_set(inode, NULL);
-               ext4_put_io_end(io_end);
-               /*
-                * In case of error or no write ext4_end_io_dio() was not
-                * called so we have to put iocb's reference.
-                */
-               if (ret <= 0 && ret != -EIOCBQUEUED) {
-                       WARN_ON(iocb->private != io_end);
-                       ext4_put_io_end(io_end);
-                       iocb->private = NULL;
-               }
-       }
-       if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
+       if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
+               ext4_free_io_end(iocb->private);
+               iocb->private = NULL;
+       } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
                                                EXT4_STATE_DIO_UNWRITTEN)) {
                int err;
                /*
index b1ed9e07434ba4a15d86d1a538e6b808917901d6..def84082a9a9b73deadc0d369c63a7ef981d8ea9 100644 (file)
@@ -2105,7 +2105,11 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
                group = ac->ac_g_ex.fe_group;
 
                for (i = 0; i < ngroups; group++, i++) {
-                       if (group == ngroups)
+                       /*
+                        * Artificially restricted ngroups for non-extent
+                        * files makes group > ngroups possible on first loop.
+                        */
+                       if (group >= ngroups)
                                group = 0;
 
                        /* This now checks without needing the buddy page */
index 19599bded62a834be3bc52dbf68cfb0414ec564d..4acf1f78881b6c4c61aa10377a62ea4368106593 100644 (file)
@@ -62,28 +62,15 @@ void ext4_ioend_shutdown(struct inode *inode)
                cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
 }
 
-static void ext4_release_io_end(ext4_io_end_t *io_end)
+void ext4_free_io_end(ext4_io_end_t *io)
 {
-       BUG_ON(!list_empty(&io_end->list));
-       BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
-
-       if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
-               wake_up_all(ext4_ioend_wq(io_end->inode));
-       if (io_end->flag & EXT4_IO_END_DIRECT)
-               inode_dio_done(io_end->inode);
-       if (io_end->iocb)
-               aio_complete(io_end->iocb, io_end->result, 0);
-       kmem_cache_free(io_end_cachep, io_end);
-}
-
-static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
-{
-       struct inode *inode = io_end->inode;
+       BUG_ON(!io);
+       BUG_ON(!list_empty(&io->list));
+       BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
 
-       io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
-       /* Wake up anyone waiting on unwritten extent conversion */
-       if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
-               wake_up_all(ext4_ioend_wq(inode));
+       if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
+               wake_up_all(ext4_ioend_wq(io->inode));
+       kmem_cache_free(io_end_cachep, io);
 }
 
 /* check a range of space and convert unwritten extents to written. */
@@ -106,8 +93,13 @@ static int ext4_end_io(ext4_io_end_t *io)
                         "(inode %lu, offset %llu, size %zd, error %d)",
                         inode->i_ino, offset, size, ret);
        }
-       ext4_clear_io_unwritten_flag(io);
-       ext4_release_io_end(io);
+       /* Wake up anyone waiting on unwritten extent conversion */
+       if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
+               wake_up_all(ext4_ioend_wq(inode));
+       if (io->flag & EXT4_IO_END_DIRECT)
+               inode_dio_done(inode);
+       if (io->iocb)
+               aio_complete(io->iocb, io->result, 0);
        return ret;
 }
 
@@ -138,7 +130,7 @@ static void dump_completed_IO(struct inode *inode)
 }
 
 /* Add the io_end to per-inode completed end_io list. */
-static void ext4_add_complete_io(ext4_io_end_t *io_end)
+void ext4_add_complete_io(ext4_io_end_t *io_end)
 {
        struct ext4_inode_info *ei = EXT4_I(io_end->inode);
        struct workqueue_struct *wq;
@@ -175,6 +167,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode)
                err = ext4_end_io(io);
                if (unlikely(!ret && err))
                        ret = err;
+               io->flag &= ~EXT4_IO_END_UNWRITTEN;
+               ext4_free_io_end(io);
        }
        return ret;
 }
@@ -206,43 +200,10 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
                atomic_inc(&EXT4_I(inode)->i_ioend_count);
                io->inode = inode;
                INIT_LIST_HEAD(&io->list);
-               atomic_set(&io->count, 1);
        }
        return io;
 }
 
-void ext4_put_io_end_defer(ext4_io_end_t *io_end)
-{
-       if (atomic_dec_and_test(&io_end->count)) {
-               if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
-                       ext4_release_io_end(io_end);
-                       return;
-               }
-               ext4_add_complete_io(io_end);
-       }
-}
-
-int ext4_put_io_end(ext4_io_end_t *io_end)
-{
-       int err = 0;
-
-       if (atomic_dec_and_test(&io_end->count)) {
-               if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
-                       err = ext4_convert_unwritten_extents(io_end->inode,
-                                               io_end->offset, io_end->size);
-                       ext4_clear_io_unwritten_flag(io_end);
-               }
-               ext4_release_io_end(io_end);
-       }
-       return err;
-}
-
-ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
-{
-       atomic_inc(&io_end->count);
-       return io_end;
-}
-
 /*
  * Print an buffer I/O error compatible with the fs/buffer.c.  This
  * provides compatibility with dmesg scrapers that look for a specific
@@ -325,7 +286,12 @@ static void ext4_end_bio(struct bio *bio, int error)
                             bi_sector >> (inode->i_blkbits - 9));
        }
 
-       ext4_put_io_end_defer(io_end);
+       if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+               ext4_free_io_end(io_end);
+               return;
+       }
+
+       ext4_add_complete_io(io_end);
 }
 
 void ext4_io_submit(struct ext4_io_submit *io)
@@ -339,37 +305,40 @@ void ext4_io_submit(struct ext4_io_submit *io)
                bio_put(io->io_bio);
        }
        io->io_bio = NULL;
-}
-
-void ext4_io_submit_init(struct ext4_io_submit *io,
-                        struct writeback_control *wbc)
-{
-       io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
-       io->io_bio = NULL;
+       io->io_op = 0;
        io->io_end = NULL;
 }
 
-static int io_submit_init_bio(struct ext4_io_submit *io,
-                             struct buffer_head *bh)
+static int io_submit_init(struct ext4_io_submit *io,
+                         struct inode *inode,
+                         struct writeback_control *wbc,
+                         struct buffer_head *bh)
 {
+       ext4_io_end_t *io_end;
+       struct page *page = bh->b_page;
        int nvecs = bio_get_nr_vecs(bh->b_bdev);
        struct bio *bio;
 
+       io_end = ext4_init_io_end(inode, GFP_NOFS);
+       if (!io_end)
+               return -ENOMEM;
        bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
        bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio->bi_bdev = bh->b_bdev;
+       bio->bi_private = io->io_end = io_end;
        bio->bi_end_io = ext4_end_bio;
-       bio->bi_private = ext4_get_io_end(io->io_end);
-       if (!io->io_end->size)
-               io->io_end->offset = (bh->b_page->index << PAGE_CACHE_SHIFT)
-                                    + bh_offset(bh);
+
+       io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
+
        io->io_bio = bio;
+       io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
        io->io_next_block = bh->b_blocknr;
        return 0;
 }
 
 static int io_submit_add_bh(struct ext4_io_submit *io,
                            struct inode *inode,
+                           struct writeback_control *wbc,
                            struct buffer_head *bh)
 {
        ext4_io_end_t *io_end;
@@ -380,18 +349,18 @@ static int io_submit_add_bh(struct ext4_io_submit *io,
                ext4_io_submit(io);
        }
        if (io->io_bio == NULL) {
-               ret = io_submit_init_bio(io, bh);
+               ret = io_submit_init(io, inode, wbc, bh);
                if (ret)
                        return ret;
        }
-       ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
-       if (ret != bh->b_size)
-               goto submit_and_retry;
        io_end = io->io_end;
        if (test_clear_buffer_uninit(bh))
                ext4_set_io_unwritten_flag(inode, io_end);
-       io_end->size += bh->b_size;
+       io->io_end->size += bh->b_size;
        io->io_next_block++;
+       ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
+       if (ret != bh->b_size)
+               goto submit_and_retry;
        return 0;
 }
 
@@ -463,7 +432,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
        do {
                if (!buffer_async_write(bh))
                        continue;
-               ret = io_submit_add_bh(io, inode, bh);
+               ret = io_submit_add_bh(io, inode, wbc, bh);
                if (ret) {
                        /*
                         * We only get here on ENOMEM.  Not much else
index dfce656ddb333a6e7720c8ab0e32984ad82f11f3..5d4513cb1b3c03604792f145125b21fb338a3101 100644 (file)
@@ -1229,6 +1229,19 @@ static int fat_read_root(struct inode *inode)
        return 0;
 }
 
+static unsigned long calc_fat_clusters(struct super_block *sb)
+{
+       struct msdos_sb_info *sbi = MSDOS_SB(sb);
+
+       /* Divide first to avoid overflow */
+       if (sbi->fat_bits != 12) {
+               unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits;
+               return ent_per_sec * sbi->fat_length;
+       }
+
+       return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
+}
+
 /*
  * Read the super block of an MS-DOS FS.
  */
@@ -1434,7 +1447,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                sbi->dirty = b->fat16.state & FAT_STATE_DIRTY;
 
        /* check that FAT table does not overflow */
-       fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
+       fat_clusters = calc_fat_clusters(sb);
        total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
        if (total_clusters > MAX_FAT(sb)) {
                if (!silent)
index 254df56b847b96d5104e62818c0f1c6f4803673c..f3f783dc4f7509096235c7f8f6ed468fe51ddded 100644 (file)
@@ -180,6 +180,8 @@ u64 fuse_get_attr_version(struct fuse_conn *fc)
 static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
 {
        struct inode *inode;
+       struct dentry *parent;
+       struct fuse_conn *fc;
 
        inode = ACCESS_ONCE(entry->d_inode);
        if (inode && is_bad_inode(inode))
@@ -187,10 +189,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
        else if (fuse_dentry_time(entry) < get_jiffies_64()) {
                int err;
                struct fuse_entry_out outarg;
-               struct fuse_conn *fc;
                struct fuse_req *req;
                struct fuse_forget_link *forget;
-               struct dentry *parent;
                u64 attr_version;
 
                /* For negative dentries, always do a fresh lookup */
@@ -241,8 +241,14 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
                                       entry_attr_timeout(&outarg),
                                       attr_version);
                fuse_change_entry_timeout(entry, &outarg);
+       } else if (inode) {
+               fc = get_fuse_conn(inode);
+               if (fc->readdirplus_auto) {
+                       parent = dget_parent(entry);
+                       fuse_advise_use_readdirplus(parent->d_inode);
+                       dput(parent);
+               }
        }
-       fuse_advise_use_readdirplus(inode);
        return 1;
 }
 
index d1c9b85b3f58bfbbc10919b6ee50e7b4ead9bb2f..e570081f9f76be0be1bc05deb9b260101d018b51 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/compat.h>
 #include <linux/swap.h>
 #include <linux/aio.h>
+#include <linux/falloc.h>
 
 static const struct file_operations fuse_direct_io_file_operations;
 
@@ -1278,7 +1279,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
 
        iov_iter_init(&ii, iov, nr_segs, count, 0);
 
-       req = fuse_get_req(fc, fuse_iter_npages(&ii));
+       if (io->async)
+               req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii));
+       else
+               req = fuse_get_req(fc, fuse_iter_npages(&ii));
        if (IS_ERR(req))
                return PTR_ERR(req);
 
@@ -1314,7 +1318,11 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
                        break;
                if (count) {
                        fuse_put_request(fc, req);
-                       req = fuse_get_req(fc, fuse_iter_npages(&ii));
+                       if (io->async)
+                               req = fuse_get_req_for_background(fc,
+                                       fuse_iter_npages(&ii));
+                       else
+                               req = fuse_get_req(fc, fuse_iter_npages(&ii));
                        if (IS_ERR(req))
                                break;
                }
@@ -2365,6 +2373,11 @@ static void fuse_do_truncate(struct file *file)
        fuse_do_setattr(inode, &attr, file);
 }
 
+static inline loff_t fuse_round_up(loff_t off)
+{
+       return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
+}
+
 static ssize_t
 fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
                        loff_t offset, unsigned long nr_segs)
@@ -2372,6 +2385,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        ssize_t ret = 0;
        struct file *file = iocb->ki_filp;
        struct fuse_file *ff = file->private_data;
+       bool async_dio = ff->fc->async_dio;
        loff_t pos = 0;
        struct inode *inode;
        loff_t i_size;
@@ -2383,10 +2397,10 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        i_size = i_size_read(inode);
 
        /* optimization for short read */
-       if (rw != WRITE && offset + count > i_size) {
+       if (async_dio && rw != WRITE && offset + count > i_size) {
                if (offset >= i_size)
                        return 0;
-               count = i_size - offset;
+               count = min_t(loff_t, count, fuse_round_up(i_size - offset));
        }
 
        io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
@@ -2404,7 +2418,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
         * By default, we want to optimize all I/Os with async request
         * submission to the client filesystem if supported.
         */
-       io->async = ff->fc->async_dio;
+       io->async = async_dio;
        io->iocb = iocb;
 
        /*
@@ -2412,7 +2426,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
         * to wait on real async I/O requests, so we must submit this request
         * synchronously.
         */
-       if (!is_sync_kiocb(iocb) && (offset + count > i_size))
+       if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
                io->async = false;
 
        if (rw == WRITE)
@@ -2424,7 +2438,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
                fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
 
                /* we have a non-extending, async request, so return */
-               if (ret > 0 && !is_sync_kiocb(iocb))
+               if (!is_sync_kiocb(iocb))
                        return -EIOCBQUEUED;
 
                ret = wait_on_sync_kiocb(iocb);
@@ -2446,6 +2460,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
                                loff_t length)
 {
        struct fuse_file *ff = file->private_data;
+       struct inode *inode = file->f_inode;
        struct fuse_conn *fc = ff->fc;
        struct fuse_req *req;
        struct fuse_fallocate_in inarg = {
@@ -2459,9 +2474,16 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
        if (fc->no_fallocate)
                return -EOPNOTSUPP;
 
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               mutex_lock(&inode->i_mutex);
+               fuse_set_nowrite(inode);
+       }
+
        req = fuse_get_req_nopages(fc);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto out;
+       }
 
        req->in.h.opcode = FUSE_FALLOCATE;
        req->in.h.nodeid = ff->nodeid;
@@ -2476,6 +2498,24 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
        }
        fuse_put_request(fc, req);
 
+       if (err)
+               goto out;
+
+       /* we could have extended the file */
+       if (!(mode & FALLOC_FL_KEEP_SIZE))
+               fuse_write_update_size(inode, offset + length);
+
+       if (mode & FALLOC_FL_PUNCH_HOLE)
+               truncate_pagecache_range(inode, offset, offset + length - 1);
+
+       fuse_invalidate_attr(inode);
+
+out:
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               fuse_release_nowrite(inode);
+               mutex_unlock(&inode->i_mutex);
+       }
+
        return err;
 }
 
index 6201f81e4d3a54bdc2d1d4a145253d7ab199baa9..9a0cdde14a088c43e12e5d5fc4ba84257ca98b46 100644 (file)
@@ -867,10 +867,11 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                                fc->dont_mask = 1;
                        if (arg->flags & FUSE_AUTO_INVAL_DATA)
                                fc->auto_inval_data = 1;
-                       if (arg->flags & FUSE_DO_READDIRPLUS)
+                       if (arg->flags & FUSE_DO_READDIRPLUS) {
                                fc->do_readdirplus = 1;
-                       if (arg->flags & FUSE_READDIRPLUS_AUTO)
-                               fc->readdirplus_auto = 1;
+                               if (arg->flags & FUSE_READDIRPLUS_AUTO)
+                                       fc->readdirplus_auto = 1;
+                       }
                        if (arg->flags & FUSE_ASYNC_DIO)
                                fc->async_dio = 1;
                } else {
index eb08c9e43c2afb13947ca557f1a62da4626c5071..5a376ab81feb9021620cdb13b0cd1642059ead43 100644 (file)
@@ -26,7 +26,7 @@ config GFS2_FS
 config GFS2_FS_LOCKING_DLM
        bool "GFS2 DLM locking"
        depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && \
-               HOTPLUG && DLM && CONFIGFS_FS && SYSFS
+               HOTPLUG && CONFIGFS_FS && SYSFS && (DLM=y || DLM=GFS2_FS)
        help
          Multiple node locking module for GFS2
 
index 1dc9a13ce6bb2f160cdd98a857ac48496c64dffe..93b5809c20bb347a9de3e5f0d46949c3dcf78dc5 100644 (file)
@@ -1286,17 +1286,26 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
        if (ret)
                return ret;
 
+       ret = get_write_access(inode);
+       if (ret)
+               return ret;
+
        inode_dio_wait(inode);
 
        ret = gfs2_rs_alloc(GFS2_I(inode));
        if (ret)
-               return ret;
+               goto out;
 
        oldsize = inode->i_size;
-       if (newsize >= oldsize)
-               return do_grow(inode, newsize);
+       if (newsize >= oldsize) {
+               ret = do_grow(inode, newsize);
+               goto out;
+       }
 
-       return do_shrink(inode, oldsize, newsize);
+       ret = do_shrink(inode, oldsize, newsize);
+out:
+       put_write_access(inode);
+       return ret;
 }
 
 int gfs2_truncatei_resume(struct gfs2_inode *ip)
index c3e82bd23179533e83dc5bf57186be025cdc2e9e..b631c904346084b5fb7b7d96985e724e7b4173a3 100644 (file)
@@ -354,22 +354,31 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip)
                return ERR_PTR(-EIO);
        }
 
-       hc = kmalloc(hsize, GFP_NOFS);
-       ret = -ENOMEM;
+       hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN);
+       if (hc == NULL)
+               hc = __vmalloc(hsize, GFP_NOFS, PAGE_KERNEL);
+
        if (hc == NULL)
                return ERR_PTR(-ENOMEM);
 
        ret = gfs2_dir_read_data(ip, hc, hsize);
        if (ret < 0) {
-               kfree(hc);
+               if (is_vmalloc_addr(hc))
+                       vfree(hc);
+               else
+                       kfree(hc);
                return ERR_PTR(ret);
        }
 
        spin_lock(&inode->i_lock);
-       if (ip->i_hash_cache)
-               kfree(hc);
-       else
+       if (ip->i_hash_cache) {
+               if (is_vmalloc_addr(hc))
+                       vfree(hc);
+               else
+                       kfree(hc);
+       } else {
                ip->i_hash_cache = hc;
+       }
        spin_unlock(&inode->i_lock);
 
        return ip->i_hash_cache;
@@ -385,7 +394,10 @@ void gfs2_dir_hash_inval(struct gfs2_inode *ip)
 {
        __be64 *hc = ip->i_hash_cache;
        ip->i_hash_cache = NULL;
-       kfree(hc);
+       if (is_vmalloc_addr(hc))
+               vfree(hc);
+       else
+               kfree(hc);
 }
 
 static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent)
@@ -1113,7 +1125,10 @@ static int dir_double_exhash(struct gfs2_inode *dip)
        if (IS_ERR(hc))
                return PTR_ERR(hc);
 
-       h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS);
+       h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN);
+       if (hc2 == NULL)
+               hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL);
+
        if (!hc2)
                return -ENOMEM;
 
@@ -1145,7 +1160,10 @@ static int dir_double_exhash(struct gfs2_inode *dip)
        gfs2_dinode_out(dip, dibh->b_data);
        brelse(dibh);
 out_kfree:
-       kfree(hc2);
+       if (is_vmalloc_addr(hc2))
+               vfree(hc2);
+       else
+               kfree(hc2);
        return error;
 }
 
@@ -1846,6 +1864,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
        memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
 
        ht = kzalloc(size, GFP_NOFS);
+       if (ht == NULL)
+               ht = vzalloc(size);
        if (!ht)
                return -ENOMEM;
 
@@ -1933,7 +1953,10 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
        gfs2_rlist_free(&rlist);
        gfs2_quota_unhold(dip);
 out:
-       kfree(ht);
+       if (is_vmalloc_addr(ht))
+               vfree(ht);
+       else
+               kfree(ht);
        return error;
 }
 
index acd16764b133aa896bac5bea77a9bb481e07c74c..ad0dc38d87ab74dd7695a74b683a2baf7d3620d3 100644 (file)
@@ -402,16 +402,20 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        /* Update file times before taking page lock */
        file_update_time(vma->vm_file);
 
+       ret = get_write_access(inode);
+       if (ret)
+               goto out;
+
        ret = gfs2_rs_alloc(ip);
        if (ret)
-               return ret;
+               goto out_write_access;
 
        gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
 
        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
        ret = gfs2_glock_nq(&gh);
        if (ret)
-               goto out;
+               goto out_uninit;
 
        set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
        set_bit(GIF_SW_PAGED, &ip->i_flags);
@@ -480,12 +484,15 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        gfs2_quota_unlock(ip);
 out_unlock:
        gfs2_glock_dq(&gh);
-out:
+out_uninit:
        gfs2_holder_uninit(&gh);
        if (ret == 0) {
                set_page_dirty(page);
                wait_for_stable_page(page);
        }
+out_write_access:
+       put_write_access(inode);
+out:
        sb_end_pagefault(inode->i_sb);
        return block_page_mkwrite_return(ret);
 }
@@ -594,10 +601,10 @@ static int gfs2_release(struct inode *inode, struct file *file)
        kfree(file->private_data);
        file->private_data = NULL;
 
-       if ((file->f_mode & FMODE_WRITE) &&
-           (atomic_read(&inode->i_writecount) == 1))
-               gfs2_rs_delete(ip);
+       if (!(file->f_mode & FMODE_WRITE))
+               return 0;
 
+       gfs2_rs_delete(ip);
        return 0;
 }
 
index 8833a4f264e3a2e1aefc79af532d0d83920185b5..62b484e4a9e4e46ac905e9a24e8afdce330225d2 100644 (file)
@@ -189,6 +189,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
        return inode;
 
 fail_refresh:
+       ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
        ip->i_iopen_gh.gh_gl->gl_object = NULL;
        gfs2_glock_dq_uninit(&ip->i_iopen_gh);
 fail_iopen:
index c5fa758fd8446e1938036be9cdedaf75e2bc552b..6c33d7b6e0c4e26b6b02d74d8d5e090f26c820e9 100644 (file)
@@ -212,7 +212,7 @@ static void gfs2_end_log_write(struct bio *bio, int error)
                fs_err(sdp, "Error %d writing to log\n", error);
        }
 
-       bio_for_each_segment(bvec, bio, i) {
+       bio_for_each_segment_all(bvec, bio, i) {
                page = bvec->bv_page;
                if (page_has_buffers(page))
                        gfs2_end_log_write_bh(sdp, bvec, error);
@@ -419,7 +419,9 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
                if (total > limit)
                        num = limit;
                gfs2_log_unlock(sdp);
-               page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num);
+               page = gfs2_get_log_desc(sdp,
+                                        is_databuf ? GFS2_LOG_DESC_JDATA :
+                                        GFS2_LOG_DESC_METADATA, num + 1, num);
                ld = page_address(page);
                gfs2_log_lock(sdp);
                ptr = (__be64 *)(ld + 1);
index c7c840e916f82f333861ac7998ccf53710cc34d8..c253b13722e8a8e2fc08b9708da11f74fe3b270a 100644 (file)
@@ -121,7 +121,7 @@ static u64 qd2index(struct gfs2_quota_data *qd)
 {
        struct kqid qid = qd->qd_id;
        return (2 * (u64)from_kqid(&init_user_ns, qid)) +
-               (qid.type == USRQUOTA) ? 0 : 1;
+               ((qid.type == USRQUOTA) ? 0 : 1);
 }
 
 static u64 qd2offset(struct gfs2_quota_data *qd)
@@ -721,7 +721,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
                        goto unlock_out;
        }
 
-       gfs2_trans_add_meta(ip->i_gl, bh);
+       gfs2_trans_add_data(ip->i_gl, bh);
 
        kaddr = kmap_atomic(page);
        if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
index 0c5a575b513ea2e28e4f4a4065de97db365fe576..9809156e3d044e9ca4c39dd9b58c69105f282ab0 100644 (file)
@@ -638,8 +638,10 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
  */
 void gfs2_rs_delete(struct gfs2_inode *ip)
 {
+       struct inode *inode = &ip->i_inode;
+
        down_write(&ip->i_rw_mutex);
-       if (ip->i_res) {
+       if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) {
                gfs2_rs_deltree(ip->i_res);
                BUG_ON(ip->i_res->rs_free);
                kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
@@ -1401,9 +1403,14 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
        u32 extlen;
        u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
        int ret;
+       struct inode *inode = &ip->i_inode;
 
-       extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
-       extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
+       if (S_ISDIR(inode->i_mode))
+               extlen = 1;
+       else {
+               extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
+               extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
+       }
        if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
                return;
 
index 917c8e1eb4ae5e1889e290c6fda4a73bcb869bbe..e5639dec66c49dc7361ff0ed79828301031b675d 100644 (file)
@@ -1444,6 +1444,7 @@ static void gfs2_evict_inode(struct inode *inode)
        /* Must not read inode block until block type has been verified */
        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
        if (unlikely(error)) {
+               ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
                goto out;
        }
@@ -1514,8 +1515,10 @@ static void gfs2_evict_inode(struct inode *inode)
        if (gfs2_rs_active(ip->i_res))
                gfs2_rs_deltree(ip->i_res);
 
-       if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
+       if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
+               ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
                gfs2_glock_dq(&ip->i_iopen_gh);
+       }
        gfs2_holder_uninit(&ip->i_iopen_gh);
        gfs2_glock_dq_uninit(&gh);
        if (error && error != GLR_TRYFAILED && error != -EROFS)
@@ -1534,6 +1537,7 @@ static void gfs2_evict_inode(struct inode *inode)
        ip->i_gl = NULL;
        if (ip->i_iopen_gh.gh_gl) {
                ip->i_iopen_gh.gh_gl->gl_object = NULL;
+               ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
        }
 }
index f3b1a15ccd5930e72896bdbaf3213460c3bb9f34..d3fa6bd9503e762c861debdd4fe64bef546bb78f 100644 (file)
@@ -415,7 +415,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
        spin_lock(&tree->hash_lock);
        node = hfs_bnode_findhash(tree, num);
        spin_unlock(&tree->hash_lock);
-       BUG_ON(node);
+       if (node) {
+               pr_crit("new node %u already hashed?\n", num);
+               WARN_ON(1);
+               return node;
+       }
        node = __hfs_bnode_create(tree, num);
        if (!node)
                return ERR_PTR(-ENOMEM);
index 546f6d39713aa97be0e3d03f8dc115317a481ac7..834ac13c04b7976442a72b8c7ee20fa3d8b443b2 100644 (file)
@@ -33,25 +33,27 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
        if (whence == SEEK_DATA || whence == SEEK_HOLE)
                return -EINVAL;
 
+       mutex_lock(&i->i_mutex);
        hpfs_lock(s);
 
        /*printk("dir lseek\n");*/
        if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
-       mutex_lock(&i->i_mutex);
        pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1;
        while (pos != new_off) {
                if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh);
                else goto fail;
                if (pos == 12) goto fail;
        }
-       mutex_unlock(&i->i_mutex);
+       hpfs_add_pos(i, &filp->f_pos);
 ok:
+       filp->f_pos = new_off;
        hpfs_unlock(s);
-       return filp->f_pos = new_off;
-fail:
        mutex_unlock(&i->i_mutex);
+       return new_off;
+fail:
        /*printk("illegal lseek: %016llx\n", new_off);*/
        hpfs_unlock(s);
+       mutex_unlock(&i->i_mutex);
        return -ESPIPE;
 }
 
index c57499dca89c5a3910bcefc5af951179aa693f24..360d27c488873825fed5c04f8bb2320a51a39d62 100644 (file)
@@ -2009,7 +2009,13 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
 
        bio->bi_end_io = lbmIODone;
        bio->bi_private = bp;
-       submit_bio(READ_SYNC, bio);
+       /*check if journaling to disk has been disabled*/
+       if (log->no_integrity) {
+               bio->bi_size = 0;
+               lbmIODone(bio, 0);
+       } else {
+               submit_bio(READ_SYNC, bio);
+       }
 
        wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD));
 
index 2003e830ed1c0d890f1fe0944db8dff04dab8ee6..788e0a9c1fb09cfb6d80ee65e1747310c7c840d0 100644 (file)
@@ -611,11 +611,28 @@ static int jfs_freeze(struct super_block *sb)
 {
        struct jfs_sb_info *sbi = JFS_SBI(sb);
        struct jfs_log *log = sbi->log;
+       int rc = 0;
 
        if (!(sb->s_flags & MS_RDONLY)) {
                txQuiesce(sb);
-               lmLogShutdown(log);
-               updateSuper(sb, FM_CLEAN);
+               rc = lmLogShutdown(log);
+               if (rc) {
+                       jfs_error(sb, "jfs_freeze: lmLogShutdown failed");
+
+                       /* let operations fail rather than hang */
+                       txResume(sb);
+
+                       return rc;
+               }
+               rc = updateSuper(sb, FM_CLEAN);
+               if (rc) {
+                       jfs_err("jfs_freeze: updateSuper failed\n");
+                       /*
+                        * Don't fail here. Everything succeeded except
+                        * marking the superblock clean, so there's really
+                        * no harm in leaving it frozen for now.
+                        */
+               }
        }
        return 0;
 }
@@ -627,13 +644,18 @@ static int jfs_unfreeze(struct super_block *sb)
        int rc = 0;
 
        if (!(sb->s_flags & MS_RDONLY)) {
-               updateSuper(sb, FM_MOUNT);
-               if ((rc = lmLogInit(log)))
-                       jfs_err("jfs_unlock failed with return code %d", rc);
-               else
-                       txResume(sb);
+               rc = updateSuper(sb, FM_MOUNT);
+               if (rc) {
+                       jfs_error(sb, "jfs_unfreeze: updateSuper failed");
+                       goto out;
+               }
+               rc = lmLogInit(log);
+               if (rc)
+                       jfs_error(sb, "jfs_unfreeze: lmLogInit failed");
+out:
+               txResume(sb);
        }
-       return 0;
+       return rc;
 }
 
 static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
index 57ae9c8c66bfc6d98ae96643874710cdc3de321a..85e40d1c0a8fd64b358447ee09b26f906102ee1d 100644 (file)
@@ -2740,7 +2740,7 @@ static int do_last(struct nameidata *nd, struct path *path,
                if (error)
                        return error;
 
-               audit_inode(name, dir, 0);
+               audit_inode(name, dir, LOOKUP_PARENT);
                error = -EISDIR;
                /* trailing slashes? */
                if (nd->last.name[nd->last.len])
index a13d26ede254357d46bc1f24c7c404555048987f..0bc27684ebfa338d0b77c295427c31472ec66049 100644 (file)
@@ -414,7 +414,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
 
        spin_lock(&tbl->slot_tbl_lock);
        /* state manager is resetting the session */
-       if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
+       if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
                spin_unlock(&tbl->slot_tbl_lock);
                status = htonl(NFS4ERR_DELAY);
                /* Return NFS4ERR_BADSESSION if we're draining the session
index 59461c957d9d7ed7204e09f9fad1aa0b154d8486..a35582c9d4440f8fe907192427b5c4f9a9a3b061 100644 (file)
@@ -763,7 +763,7 @@ static void nfs4_callback_free_slot(struct nfs4_session *session)
         * A single slot, so highest used slotid is either 0 or -1
         */
        tbl->highest_used_slotid = NFS4_NO_SLOT;
-       nfs4_session_drain_complete(session, tbl);
+       nfs4_slot_tbl_drain_complete(tbl);
        spin_unlock(&tbl->slot_tbl_lock);
 }
 
index 947b0c908aa908c643140dd7087b0927753c6d49..4cbad5d6b276f8c481984c166869e2679c2e241e 100644 (file)
@@ -203,7 +203,7 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
        __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
        error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I);
        if (error == -EINVAL)
-               error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_NULL);
+               error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
        if (error < 0)
                goto error;
 
index 8fbc100541154cbd31253dc261eb5eaafbe71e29..d7ba5616989c49fe52d396d41187a663744598f1 100644 (file)
@@ -572,7 +572,7 @@ int nfs41_setup_sequence(struct nfs4_session *session,
        task->tk_timeout = 0;
 
        spin_lock(&tbl->slot_tbl_lock);
-       if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
+       if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
            !args->sa_privileged) {
                /* The state manager will wait until the slot table is empty */
                dprintk("%s session is draining\n", __func__);
@@ -1078,7 +1078,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
        struct nfs4_state *state = opendata->state;
        struct nfs_inode *nfsi = NFS_I(state->inode);
        struct nfs_delegation *delegation;
-       int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
+       int open_mode = opendata->o_arg.open_flags;
        fmode_t fmode = opendata->o_arg.fmode;
        nfs4_stateid stateid;
        int ret = -EAGAIN;
index ebda5f4a031b74d2c890d4c1e6cf932d869e8f2b..c4e225e4a9afc382b1eb689314ec2e7875cc1a45 100644 (file)
@@ -73,7 +73,7 @@ void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
                        tbl->highest_used_slotid = new_max;
                else {
                        tbl->highest_used_slotid = NFS4_NO_SLOT;
-                       nfs4_session_drain_complete(tbl->session, tbl);
+                       nfs4_slot_tbl_drain_complete(tbl);
                }
        }
        dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
@@ -226,7 +226,7 @@ static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
        struct nfs4_slot *slot = pslot;
        struct nfs4_slot_table *tbl = slot->table;
 
-       if (nfs4_session_draining(tbl->session) && !args->sa_privileged)
+       if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
                return false;
        slot->generation = tbl->generation;
        args->sa_slot = slot;
index 6f3cb39386d4e400d509e6526924fa0b5233ed74..ff7d9f0f8a65179fbf9bc795c4b1cdbf1ee56bf1 100644 (file)
@@ -25,6 +25,10 @@ struct nfs4_slot {
 };
 
 /* Sessions */
+enum nfs4_slot_tbl_state {
+       NFS4_SLOT_TBL_DRAINING,
+};
+
 #define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long))
 struct nfs4_slot_table {
        struct nfs4_session *session;           /* Parent session */
@@ -43,6 +47,7 @@ struct nfs4_slot_table {
        unsigned long   generation;             /* Generation counter for
                                                   target_highest_slotid */
        struct completion complete;
+       unsigned long   slot_tbl_state;
 };
 
 /*
@@ -68,7 +73,6 @@ struct nfs4_session {
 
 enum nfs4_session_state {
        NFS4_SESSION_INITING,
-       NFS4_SESSION_DRAINING,
 };
 
 #if defined(CONFIG_NFS_V4_1)
@@ -88,12 +92,11 @@ extern void nfs4_destroy_session(struct nfs4_session *session);
 extern int nfs4_init_session(struct nfs_server *server);
 extern int nfs4_init_ds_session(struct nfs_client *, unsigned long);
 
-extern void nfs4_session_drain_complete(struct nfs4_session *session,
-               struct nfs4_slot_table *tbl);
+extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
 
-static inline bool nfs4_session_draining(struct nfs4_session *session)
+static inline bool nfs4_slot_tbl_draining(struct nfs4_slot_table *tbl)
 {
-       return !!test_bit(NFS4_SESSION_DRAINING, &session->session_state);
+       return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
 }
 
 bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
index 300d17d85c0e03397d352746fc6fc775b39941e1..1fab140764c42756867f064a81b61903b8858a91 100644 (file)
@@ -241,7 +241,7 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
        if (ses == NULL)
                return;
        tbl = &ses->fc_slot_table;
-       if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
+       if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
                spin_lock(&tbl->slot_tbl_lock);
                nfs41_wake_slot_table(tbl);
                spin_unlock(&tbl->slot_tbl_lock);
@@ -251,15 +251,15 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
 /*
  * Signal state manager thread if session fore channel is drained
  */
-void nfs4_session_drain_complete(struct nfs4_session *session,
-               struct nfs4_slot_table *tbl)
+void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl)
 {
-       if (nfs4_session_draining(session))
+       if (nfs4_slot_tbl_draining(tbl))
                complete(&tbl->complete);
 }
 
-static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
+static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl)
 {
+       set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
        spin_lock(&tbl->slot_tbl_lock);
        if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
                INIT_COMPLETION(tbl->complete);
@@ -275,13 +275,12 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
        struct nfs4_session *ses = clp->cl_session;
        int ret = 0;
 
-       set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
        /* back channel */
-       ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table);
+       ret = nfs4_drain_slot_tbl(&ses->bc_slot_table);
        if (ret)
                return ret;
        /* fore channel */
-       return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
+       return nfs4_drain_slot_tbl(&ses->fc_slot_table);
 }
 
 static void nfs41_finish_session_reset(struct nfs_client *clp)
index a366107a7331ad36864ba81b8b14ba940756ac70..2d7525fbcf250225981ab521da638fc8f2a204d5 100644 (file)
@@ -1942,6 +1942,7 @@ static int nfs23_validate_mount_data(void *options,
                args->namlen            = data->namlen;
                args->bsize             = data->bsize;
 
+               args->auth_flavors[0] = RPC_AUTH_UNIX;
                if (data->flags & NFS_MOUNT_SECFLAVOUR)
                        args->auth_flavors[0] = data->pseudoflavor;
                if (!args->nfs_server.hostname)
@@ -2637,6 +2638,7 @@ static int nfs4_validate_mount_data(void *options,
                        goto out_no_address;
                args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
 
+               args->auth_flavors[0] = RPC_AUTH_UNIX;
                if (data->auth_flavourlen) {
                        if (data->auth_flavourlen > 1)
                                goto out_inval_auth;
index 8ae5abfe6ba24fd699aed2e156a2b2f815695c04..27d74a2945151cfbdf76e41cd381b90a0efe4d09 100644 (file)
@@ -279,6 +279,7 @@ do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, str
 {
        struct svc_fh *current_fh = &cstate->current_fh;
        __be32 status;
+       int accmode = 0;
 
        /* We don't know the target directory, and therefore can not
        * set the change info
@@ -290,9 +291,19 @@ do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, str
 
        open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
                (open->op_iattr.ia_size == 0);
+       /*
+        * In the delegation case, the client is telling us about an
+        * open that it *already* performed locally, some time ago.  We
+        * should let it succeed now if possible.
+        *
+        * In the case of a CLAIM_FH open, on the other hand, the client
+        * may be counting on us to enforce permissions (the Linux 4.1
+        * client uses this for normal opens, for example).
+        */
+       if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
+               accmode = NFSD_MAY_OWNER_OVERRIDE;
 
-       status = do_open_permission(rqstp, current_fh, open,
-                                   NFSD_MAY_OWNER_OVERRIDE);
+       status = do_open_permission(rqstp, current_fh, open, accmode);
 
        return status;
 }
index 899ca26dd194d73f43234ba08447f1533428eff6..4e9a21db867ae60afcc14a6ca362e978495c4a5c 100644 (file)
@@ -146,7 +146,7 @@ nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
  * then disable recovery tracking.
  */
 static void
-legacy_recdir_name_error(int error)
+legacy_recdir_name_error(struct nfs4_client *clp, int error)
 {
        printk(KERN_ERR "NFSD: unable to generate recoverydir "
                        "name (%d).\n", error);
@@ -159,9 +159,7 @@ legacy_recdir_name_error(int error)
        if (error == -ENOENT) {
                printk(KERN_ERR "NFSD: disabling legacy clientid tracking. "
                        "Reboot recovery will not function correctly!\n");
-
-               /* the argument is ignored by the legacy exit function */
-               nfsd4_client_tracking_exit(NULL);
+               nfsd4_client_tracking_exit(clp->net);
        }
 }
 
@@ -184,7 +182,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
 
        status = nfs4_make_rec_clidname(dname, &clp->cl_name);
        if (status)
-               return legacy_recdir_name_error(status);
+               return legacy_recdir_name_error(clp, status);
 
        status = nfs4_save_creds(&original_cred);
        if (status < 0)
@@ -341,7 +339,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
 
        status = nfs4_make_rec_clidname(dname, &clp->cl_name);
        if (status)
-               return legacy_recdir_name_error(status);
+               return legacy_recdir_name_error(clp, status);
 
        status = mnt_want_write_file(nn->rec_file);
        if (status)
@@ -601,7 +599,7 @@ nfsd4_check_legacy_client(struct nfs4_client *clp)
 
        status = nfs4_make_rec_clidname(dname, &clp->cl_name);
        if (status) {
-               legacy_recdir_name_error(status);
+               legacy_recdir_name_error(clp, status);
                return status;
        }
 
index 689fb608648e9a80db3c4e643feb3fe00ff1cfa4..bccfec8343c5ee34925cea97b8fc4006f8265084 100644 (file)
@@ -219,13 +219,32 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
 
 static int nilfs_set_page_dirty(struct page *page)
 {
-       int ret = __set_page_dirty_buffers(page);
+       int ret = __set_page_dirty_nobuffers(page);
 
-       if (ret) {
+       if (page_has_buffers(page)) {
                struct inode *inode = page->mapping->host;
-               unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
+               unsigned nr_dirty = 0;
+               struct buffer_head *bh, *head;
 
-               nilfs_set_file_dirty(inode, nr_dirty);
+               /*
+                * This page is locked by callers, and no other thread
+                * concurrently marks its buffers dirty since they are
+                * only dirtied through routines in fs/buffer.c in
+                * which call sites of mark_buffer_dirty are protected
+                * by page lock.
+                */
+               bh = head = page_buffers(page);
+               do {
+                       /* Do not mark hole blocks dirty */
+                       if (buffer_dirty(bh) || !buffer_mapped(bh))
+                               continue;
+
+                       set_buffer_dirty(bh);
+                       nr_dirty++;
+               } while (bh = bh->b_this_page, bh != head);
+
+               if (nr_dirty)
+                       nilfs_set_file_dirty(inode, nr_dirty);
        }
        return ret;
 }
index d0be29fa94cffba4e11355b20a270a11e608adef..6c80083a984fc192ebc73bad2a2edc86b4c25e11 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
+#include <linux/compat.h>
 
 #include <asm/ioctls.h>
 
@@ -857,6 +858,22 @@ SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
        return ret;
 }
 
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE6(fanotify_mark,
+                               int, fanotify_fd, unsigned int, flags,
+                               __u32, mask0, __u32, mask1, int, dfd,
+                               const char  __user *, pathname)
+{
+       return sys_fanotify_mark(fanotify_fd, flags,
+#ifdef __BIG_ENDIAN
+                               ((__u64)mask1 << 32) | mask0,
+#else
+                               ((__u64)mask0 << 32) | mask1,
+#endif
+                                dfd, pathname);
+}
+#endif
+
 /*
  * fanotify_user_setup - Our initialization function.  Note that we cannot return
  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
index 1c39efb71bab6d2c4926962c0a642d291e2b360b..2487116d0d3312981834aa3667fd708dc7aa05ab 100644 (file)
@@ -790,7 +790,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                                                 &hole_size, &rec, &is_last);
                if (ret) {
                        mlog_errno(ret);
-                       goto out;
+                       goto out_unlock;
                }
 
                if (rec.e_blkno == 0ULL) {
index 8a7509f9e6f5e0f49438876228a8e887ac2cdeb1..ff54014a24ecd58511c37a16b33f5e27a1e3e6dd 100644 (file)
@@ -2288,7 +2288,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
                ret = ocfs2_inode_lock(inode, NULL, 1);
                if (ret < 0) {
                        mlog_errno(ret);
-                       goto out_sems;
+                       goto out;
                }
 
                ocfs2_inode_unlock(inode, 1);
index 3d2a7141b87a4b38a1aea383a68810ada1271404..9af0df15256e9405082fdb59d1a2c6fe97fb8e59 100644 (file)
@@ -83,7 +83,8 @@ static int do_make_slave(struct mount *mnt)
                if (peer_mnt == mnt)
                        peer_mnt = NULL;
        }
-       if (IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share))
+       if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) &&
+           list_empty(&mnt->mnt_share))
                mnt_release_group_id(mnt);
 
        list_del_init(&mnt->mnt_share);
index 8798d065e400fca8303e25b5665b0c1d7eaa579f..afa6be6fc39759edc22abca58d3e21677576b0b4 100644 (file)
@@ -120,7 +120,7 @@ static int qnx6_readdir(struct file *filp, void *dirent, filldir_t filldir)
        struct inode *inode = file_inode(filp);
        struct super_block *s = inode->i_sb;
        struct qnx6_sb_info *sbi = QNX6_SB(s);
-       loff_t pos = filp->f_pos & (QNX6_DIR_ENTRY_SIZE - 1);
+       loff_t pos = filp->f_pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
        unsigned long npages = dir_pages(inode);
        unsigned long n = pos >> PAGE_CACHE_SHIFT;
        unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE;
index 66c53b642a880a1da6bf70c7d85a844dd18cbcea..6c2d136561cbd5db121285d97ee335fdf20db081 100644 (file)
@@ -204,6 +204,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
                                next_pos = deh_offset(deh) + 1;
 
                                if (item_moved(&tmp_ih, &path_to_entry)) {
+                                       set_cpu_key_k_offset(&pos_key,
+                                                            next_pos);
                                        goto research;
                                }
                        }       /* for */
index 77d6d47abc838be3a18acc837f9f97816974e6e1..f844533792ee99d7c7191f070869d1462cea52ab 100644 (file)
@@ -1811,11 +1811,16 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
                                  TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
        memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
        args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
-       if (insert_inode_locked4(inode, args.objectid,
-                            reiserfs_find_actor, &args) < 0) {
+
+       reiserfs_write_unlock(inode->i_sb);
+       err = insert_inode_locked4(inode, args.objectid,
+                            reiserfs_find_actor, &args);
+       reiserfs_write_lock(inode->i_sb);
+       if (err) {
                err = -EINVAL;
                goto out_bad_inode;
        }
+
        if (old_format_only(sb))
                /* not a perfect generation count, as object ids can be reused, but
                 ** this is as good as reiserfs can do right now.
index 4cce1d9552fbbcd5f23447245a8b6f942c6fe89c..821bcf70e467432e14868b0c5bb00cb1b6940598 100644 (file)
@@ -318,7 +318,19 @@ static int delete_one_xattr(struct dentry *dentry, void *data)
 static int chown_one_xattr(struct dentry *dentry, void *data)
 {
        struct iattr *attrs = data;
-       return reiserfs_setattr(dentry, attrs);
+       int ia_valid = attrs->ia_valid;
+       int err;
+
+       /*
+        * We only want the ownership bits. Otherwise, we'll do
+        * things like change a directory to a regular file if
+        * ATTR_MODE is set.
+        */
+       attrs->ia_valid &= (ATTR_UID|ATTR_GID);
+       err = reiserfs_setattr(dentry, attrs);
+       attrs->ia_valid = ia_valid;
+
+       return err;
 }
 
 /* No i_mutex, but the inode is unconnected. */
index d7c01ef64edab42ed59d022a2cf36f917e1935c0..6c8767fdfc6a287b0a9d201b4048d89b06fecaad 100644 (file)
@@ -443,6 +443,9 @@ int reiserfs_acl_chmod(struct inode *inode)
        int depth;
        int error;
 
+       if (IS_PRIVATE(inode))
+               return 0;
+
        if (S_ISLNK(inode->i_mode))
                return -EOPNOTSUPP;
 
index e1a7779dd3cb18e72276569f8d2b0ce77e53a42c..f373bde8f545da481ba0a7caa873271dd599b30d 100644 (file)
@@ -49,8 +49,11 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
                return (unsigned long) -EINVAL;
 
        offset += ROMFS_I(inode)->i_dataoffset;
-       if (offset > mtd->size - len)
+       if (offset >= mtd->size)
                return (unsigned long) -EINVAL;
+       /* the mapping mustn't extend beyond the EOF */
+       if ((offset + len) > mtd->size)
+               len = mtd->size - offset;
 
        ret = mtd_get_unmapped_area(mtd, len, offset, flags);
        if (ret == -EOPNOTSUPP)
index 1d32f1d5276339e96b0034e13050eaeb39dba653..306d883d89bc7d6420ca4b5b8c5f848e573249bf 100644 (file)
@@ -21,6 +21,8 @@
 #include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
 #include "xfs_vnodeops.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
 #include "xfs_trace.h"
 #include <linux/slab.h>
 #include <linux/xattr.h>
@@ -34,7 +36,9 @@
  */
 
 STATIC struct posix_acl *
-xfs_acl_from_disk(struct xfs_acl *aclp)
+xfs_acl_from_disk(
+       struct xfs_acl  *aclp,
+       int             max_entries)
 {
        struct posix_acl_entry *acl_e;
        struct posix_acl *acl;
@@ -42,7 +46,7 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
        unsigned int count, i;
 
        count = be32_to_cpu(aclp->acl_cnt);
-       if (count > XFS_ACL_MAX_ENTRIES)
+       if (count > max_entries)
                return ERR_PTR(-EFSCORRUPTED);
 
        acl = posix_acl_alloc(count, GFP_KERNEL);
@@ -108,9 +112,9 @@ xfs_get_acl(struct inode *inode, int type)
        struct xfs_inode *ip = XFS_I(inode);
        struct posix_acl *acl;
        struct xfs_acl *xfs_acl;
-       int len = sizeof(struct xfs_acl);
        unsigned char *ea_name;
        int error;
+       int len;
 
        acl = get_cached_acl(inode, type);
        if (acl != ACL_NOT_CACHED)
@@ -133,8 +137,8 @@ xfs_get_acl(struct inode *inode, int type)
         * If we have a cached ACLs value just return it, not need to
         * go out to the disk.
         */
-
-       xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
+       len = XFS_ACL_MAX_SIZE(ip->i_mount);
+       xfs_acl = kzalloc(len, GFP_KERNEL);
        if (!xfs_acl)
                return ERR_PTR(-ENOMEM);
 
@@ -153,7 +157,7 @@ xfs_get_acl(struct inode *inode, int type)
                goto out;
        }
 
-       acl = xfs_acl_from_disk(xfs_acl);
+       acl = xfs_acl_from_disk(xfs_acl, XFS_ACL_MAX_ENTRIES(ip->i_mount));
        if (IS_ERR(acl))
                goto out;
 
@@ -189,16 +193,17 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
 
        if (acl) {
                struct xfs_acl *xfs_acl;
-               int len;
+               int len = XFS_ACL_MAX_SIZE(ip->i_mount);
 
-               xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
+               xfs_acl = kzalloc(len, GFP_KERNEL);
                if (!xfs_acl)
                        return -ENOMEM;
 
                xfs_acl_to_disk(xfs_acl, acl);
-               len = sizeof(struct xfs_acl) -
-                       (sizeof(struct xfs_acl_entry) *
-                        (XFS_ACL_MAX_ENTRIES - acl->a_count));
+
+               /* subtract away the unused acl entries */
+               len -= sizeof(struct xfs_acl_entry) *
+                        (XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count);
 
                error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
                                len, ATTR_ROOT);
@@ -243,7 +248,7 @@ xfs_set_mode(struct inode *inode, umode_t mode)
 static int
 xfs_acl_exists(struct inode *inode, unsigned char *name)
 {
-       int len = sizeof(struct xfs_acl);
+       int len = XFS_ACL_MAX_SIZE(XFS_M(inode->i_sb));
 
        return (xfs_attr_get(XFS_I(inode), name, NULL, &len,
                            ATTR_ROOT|ATTR_KERNOVAL) == 0);
@@ -379,7 +384,7 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name,
                goto out_release;
 
        error = -EINVAL;
-       if (acl->a_count > XFS_ACL_MAX_ENTRIES)
+       if (acl->a_count > XFS_ACL_MAX_ENTRIES(XFS_M(inode->i_sb)))
                goto out_release;
 
        if (type == ACL_TYPE_ACCESS) {
index 39632d94135490ac94dee48ff5f037d29d5592ac..4016a567b83cc2e00d46b5f4629bfc83785a7dff 100644 (file)
@@ -22,19 +22,36 @@ struct inode;
 struct posix_acl;
 struct xfs_inode;
 
-#define XFS_ACL_MAX_ENTRIES 25
 #define XFS_ACL_NOT_PRESENT (-1)
 
 /* On-disk XFS access control list structure */
+struct xfs_acl_entry {
+       __be32  ae_tag;
+       __be32  ae_id;
+       __be16  ae_perm;
+       __be16  ae_pad;         /* fill the implicit hole in the structure */
+};
+
 struct xfs_acl {
-       __be32          acl_cnt;
-       struct xfs_acl_entry {
-               __be32  ae_tag;
-               __be32  ae_id;
-               __be16  ae_perm;
-       } acl_entry[XFS_ACL_MAX_ENTRIES];
+       __be32                  acl_cnt;
+       struct xfs_acl_entry    acl_entry[0];
 };
 
+/*
+ * The number of ACL entries allowed is defined by the on-disk format.
+ * For v4 superblocks, that is limited to 25 entries. For v5 superblocks, it is
+ * limited only by the maximum size of the xattr that stores the information.
+ */
+#define XFS_ACL_MAX_ENTRIES(mp)        \
+       (xfs_sb_version_hascrc(&mp->m_sb) \
+               ?  (XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \
+                                               sizeof(struct xfs_acl_entry) \
+               : 25)
+
+#define XFS_ACL_MAX_SIZE(mp) \
+       (sizeof(struct xfs_acl) + \
+               sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp)))
+
 /* On-disk XFS extended attribute names */
 #define SGI_ACL_FILE           (unsigned char *)"SGI_ACL_FILE"
 #define SGI_ACL_DEFAULT                (unsigned char *)"SGI_ACL_DEFAULT"
index 2b2691b7342890e64e957d616dfb1c4aa2efc8c1..41a695048be7b09b87baf4517fd8124b6d7a8ffb 100644 (file)
@@ -725,6 +725,25 @@ xfs_convert_page(
                        (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
                        i_size_read(inode));
 
+       /*
+        * If the current map does not span the entire page we are about to try
+        * to write, then give up. The only way we can write a page that spans
+        * multiple mappings in a single writeback iteration is via the
+        * xfs_vm_writepage() function. Data integrity writeback requires the
+        * entire page to be written in a single attempt, otherwise the part of
+        * the page we don't write here doesn't get written as part of the data
+        * integrity sync.
+        *
+        * For normal writeback, we also don't attempt to write partial pages
+        * here as it simply means that write_cache_pages() will see it under
+        * writeback and ignore the page until some point in the future, at
+        * which time this will be the only page in the file that needs
+        * writeback.  Hence for more optimal IO patterns, we should always
+        * avoid partial page writeback due to multiple mappings on a page here.
+        */
+       if (!xfs_imap_valid(inode, imap, end_offset))
+               goto fail_unlock_page;
+
        len = 1 << inode->i_blkbits;
        p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
                                        PAGE_CACHE_SIZE);
index 08d5457c948e051866217792e2fcbc12d1f5c726..31d3cd12926918978f922edb6e0f01c22d29d864 100644 (file)
@@ -931,20 +931,22 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
  */
 int
 xfs_attr_shortform_allfit(
-       struct xfs_buf  *bp,
-       struct xfs_inode *dp)
+       struct xfs_buf          *bp,
+       struct xfs_inode        *dp)
 {
-       xfs_attr_leafblock_t *leaf;
-       xfs_attr_leaf_entry_t *entry;
+       struct xfs_attr_leafblock *leaf;
+       struct xfs_attr_leaf_entry *entry;
        xfs_attr_leaf_name_local_t *name_loc;
-       int bytes, i;
+       struct xfs_attr3_icleaf_hdr leafhdr;
+       int                     bytes;
+       int                     i;
 
        leaf = bp->b_addr;
-       ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
+       xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
+       entry = xfs_attr3_leaf_entryp(leaf);
 
-       entry = &leaf->entries[0];
        bytes = sizeof(struct xfs_attr_sf_hdr);
-       for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
+       for (i = 0; i < leafhdr.count; entry++, i++) {
                if (entry->flags & XFS_ATTR_INCOMPLETE)
                        continue;               /* don't copy partial entries */
                if (!(entry->flags & XFS_ATTR_LOCAL))
@@ -954,15 +956,15 @@ xfs_attr_shortform_allfit(
                        return(0);
                if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
                        return(0);
-               bytes += sizeof(struct xfs_attr_sf_entry)-1
+               bytes += sizeof(struct xfs_attr_sf_entry) - 1
                                + name_loc->namelen
                                + be16_to_cpu(name_loc->valuelen);
        }
        if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
            (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
            (bytes == sizeof(struct xfs_attr_sf_hdr)))
-               return(-1);
-       return(xfs_attr_shortform_bytesfit(dp, bytes));
+               return -1;
+       return xfs_attr_shortform_bytesfit(dp, bytes);
 }
 
 /*
@@ -1410,7 +1412,7 @@ xfs_attr3_leaf_add_work(
                name_rmt->valuelen = 0;
                name_rmt->valueblk = 0;
                args->rmtblkno = 1;
-               args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen);
+               args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
        }
        xfs_trans_log_buf(args->trans, bp,
             XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
@@ -1443,11 +1445,12 @@ xfs_attr3_leaf_add_work(
 STATIC void
 xfs_attr3_leaf_compact(
        struct xfs_da_args      *args,
-       struct xfs_attr3_icleaf_hdr *ichdr_d,
+       struct xfs_attr3_icleaf_hdr *ichdr_dst,
        struct xfs_buf          *bp)
 {
-       xfs_attr_leafblock_t    *leaf_s, *leaf_d;
-       struct xfs_attr3_icleaf_hdr ichdr_s;
+       struct xfs_attr_leafblock *leaf_src;
+       struct xfs_attr_leafblock *leaf_dst;
+       struct xfs_attr3_icleaf_hdr ichdr_src;
        struct xfs_trans        *trans = args->trans;
        struct xfs_mount        *mp = trans->t_mountp;
        char                    *tmpbuffer;
@@ -1455,29 +1458,38 @@ xfs_attr3_leaf_compact(
        trace_xfs_attr_leaf_compact(args);
 
        tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
-       ASSERT(tmpbuffer != NULL);
        memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(mp));
        memset(bp->b_addr, 0, XFS_LBSIZE(mp));
+       leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
+       leaf_dst = bp->b_addr;
 
        /*
-        * Copy basic information
+        * Copy the on-disk header back into the destination buffer to ensure
+        * all the information in the header that is not part of the incore
+        * header structure is preserved.
         */
-       leaf_s = (xfs_attr_leafblock_t *)tmpbuffer;
-       leaf_d = bp->b_addr;
-       ichdr_s = *ichdr_d;     /* struct copy */
-       ichdr_d->firstused = XFS_LBSIZE(mp);
-       ichdr_d->usedbytes = 0;
-       ichdr_d->count = 0;
-       ichdr_d->holes = 0;
-       ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_s);
-       ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base;
+       memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src));
+
+       /* Initialise the incore headers */
+       ichdr_src = *ichdr_dst; /* struct copy */
+       ichdr_dst->firstused = XFS_LBSIZE(mp);
+       ichdr_dst->usedbytes = 0;
+       ichdr_dst->count = 0;
+       ichdr_dst->holes = 0;
+       ichdr_dst->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_src);
+       ichdr_dst->freemap[0].size = ichdr_dst->firstused -
+                                               ichdr_dst->freemap[0].base;
+
+
+       /* write the header back to initialise the underlying buffer */
+       xfs_attr3_leaf_hdr_to_disk(leaf_dst, ichdr_dst);
 
        /*
         * Copy all entry's in the same (sorted) order,
         * but allocate name/value pairs packed and in sequence.
         */
-       xfs_attr3_leaf_moveents(leaf_s, &ichdr_s, 0, leaf_d, ichdr_d, 0,
-                               ichdr_s.count, mp);
+       xfs_attr3_leaf_moveents(leaf_src, &ichdr_src, 0, leaf_dst, ichdr_dst, 0,
+                               ichdr_src.count, mp);
        /*
         * this logs the entire buffer, but the caller must write the header
         * back to the buffer when it is finished modifying it.
@@ -2179,14 +2191,24 @@ xfs_attr3_leaf_unbalance(
                struct xfs_attr_leafblock *tmp_leaf;
                struct xfs_attr3_icleaf_hdr tmphdr;
 
-               tmp_leaf = kmem_alloc(state->blocksize, KM_SLEEP);
-               memset(tmp_leaf, 0, state->blocksize);
-               memset(&tmphdr, 0, sizeof(tmphdr));
+               tmp_leaf = kmem_zalloc(state->blocksize, KM_SLEEP);
+
+               /*
+                * Copy the header into the temp leaf so that all the stuff
+                * not in the incore header is present and gets copied back in
+                * once we've moved all the entries.
+                */
+               memcpy(tmp_leaf, save_leaf, xfs_attr3_leaf_hdr_size(save_leaf));
 
+               memset(&tmphdr, 0, sizeof(tmphdr));
                tmphdr.magic = savehdr.magic;
                tmphdr.forw = savehdr.forw;
                tmphdr.back = savehdr.back;
                tmphdr.firstused = state->blocksize;
+
+               /* write the header to the temp buffer to initialise it */
+               xfs_attr3_leaf_hdr_to_disk(tmp_leaf, &tmphdr);
+
                if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
                                         drop_blk->bp, &drophdr)) {
                        xfs_attr3_leaf_moveents(drop_leaf, &drophdr, 0,
@@ -2330,9 +2352,11 @@ xfs_attr3_leaf_lookup_int(
                        if (!xfs_attr_namesp_match(args->flags, entry->flags))
                                continue;
                        args->index = probe;
+                       args->valuelen = be32_to_cpu(name_rmt->valuelen);
                        args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
-                       args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
-                                                  be32_to_cpu(name_rmt->valuelen));
+                       args->rmtblkcnt = xfs_attr3_rmt_blocks(
+                                                       args->dp->i_mount,
+                                                       args->valuelen);
                        return XFS_ERROR(EEXIST);
                }
        }
@@ -2383,7 +2407,8 @@ xfs_attr3_leaf_getvalue(
                ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
                valuelen = be32_to_cpu(name_rmt->valuelen);
                args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
-               args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen);
+               args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
+                                                      valuelen);
                if (args->flags & ATTR_KERNOVAL) {
                        args->valuelen = valuelen;
                        return 0;
@@ -2709,7 +2734,8 @@ xfs_attr3_leaf_list_int(
                                args.valuelen = valuelen;
                                args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
                                args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
-                               args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
+                               args.rmtblkcnt = xfs_attr3_rmt_blocks(
+                                                       args.dp->i_mount, valuelen);
                                retval = xfs_attr_rmtval_get(&args);
                                if (retval)
                                        return retval;
@@ -3232,7 +3258,7 @@ xfs_attr3_leaf_inactive(
                        name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
                        if (name_rmt->valueblk) {
                                lp->valueblk = be32_to_cpu(name_rmt->valueblk);
-                               lp->valuelen = XFS_B_TO_FSB(dp->i_mount,
+                               lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
                                                    be32_to_cpu(name_rmt->valuelen));
                                lp++;
                        }
index dee84466dcc934add7391861d6c83631a63bca62..ef6b0c124528f6bff8d59c0fee5fa31a1d5dcc8b 100644 (file)
  * Each contiguous block has a header, so it is not just a simple attribute
  * length to FSB conversion.
  */
-static int
+int
 xfs_attr3_rmt_blocks(
        struct xfs_mount *mp,
        int             attrlen)
 {
-       int             buflen = XFS_ATTR3_RMT_BUF_SPACE(mp,
-                                                        mp->m_sb.sb_blocksize);
-       return (attrlen + buflen - 1) / buflen;
+       if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
+               return (attrlen + buflen - 1) / buflen;
+       }
+       return XFS_B_TO_FSB(mp, attrlen);
+}
+
+/*
+ * Checking of the remote attribute header is split into two parts. The verifier
+ * does CRC, location and bounds checking, the unpacking function checks the
+ * attribute parameters and owner.
+ */
+static bool
+xfs_attr3_rmt_hdr_ok(
+       struct xfs_mount        *mp,
+       void                    *ptr,
+       xfs_ino_t               ino,
+       uint32_t                offset,
+       uint32_t                size,
+       xfs_daddr_t             bno)
+{
+       struct xfs_attr3_rmt_hdr *rmt = ptr;
+
+       if (bno != be64_to_cpu(rmt->rm_blkno))
+               return false;
+       if (offset != be32_to_cpu(rmt->rm_offset))
+               return false;
+       if (size != be32_to_cpu(rmt->rm_bytes))
+               return false;
+       if (ino != be64_to_cpu(rmt->rm_owner))
+               return false;
+
+       /* ok */
+       return true;
 }
 
 static bool
 xfs_attr3_rmt_verify(
-       struct xfs_buf          *bp)
+       struct xfs_mount        *mp,
+       void                    *ptr,
+       int                     fsbsize,
+       xfs_daddr_t             bno)
 {
-       struct xfs_mount        *mp = bp->b_target->bt_mount;
-       struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
+       struct xfs_attr3_rmt_hdr *rmt = ptr;
 
        if (!xfs_sb_version_hascrc(&mp->m_sb))
                return false;
@@ -70,7 +103,9 @@ xfs_attr3_rmt_verify(
                return false;
        if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid))
                return false;
-       if (bp->b_bn != be64_to_cpu(rmt->rm_blkno))
+       if (be64_to_cpu(rmt->rm_blkno) != bno)
+               return false;
+       if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
                return false;
        if (be32_to_cpu(rmt->rm_offset) +
                                be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX)
@@ -86,17 +121,40 @@ xfs_attr3_rmt_read_verify(
        struct xfs_buf  *bp)
 {
        struct xfs_mount *mp = bp->b_target->bt_mount;
+       char            *ptr;
+       int             len;
+       bool            corrupt = false;
+       xfs_daddr_t     bno;
 
        /* no verification of non-crc buffers */
        if (!xfs_sb_version_hascrc(&mp->m_sb))
                return;
 
-       if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
-                             XFS_ATTR3_RMT_CRC_OFF) ||
-           !xfs_attr3_rmt_verify(bp)) {
+       ptr = bp->b_addr;
+       bno = bp->b_bn;
+       len = BBTOB(bp->b_length);
+       ASSERT(len >= XFS_LBSIZE(mp));
+
+       while (len > 0) {
+               if (!xfs_verify_cksum(ptr, XFS_LBSIZE(mp),
+                                     XFS_ATTR3_RMT_CRC_OFF)) {
+                       corrupt = true;
+                       break;
+               }
+               if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
+                       corrupt = true;
+                       break;
+               }
+               len -= XFS_LBSIZE(mp);
+               ptr += XFS_LBSIZE(mp);
+               bno += mp->m_bsize;
+       }
+
+       if (corrupt) {
                XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
                xfs_buf_ioerror(bp, EFSCORRUPTED);
-       }
+       } else
+               ASSERT(len == 0);
 }
 
 static void
@@ -105,23 +163,39 @@ xfs_attr3_rmt_write_verify(
 {
        struct xfs_mount *mp = bp->b_target->bt_mount;
        struct xfs_buf_log_item *bip = bp->b_fspriv;
+       char            *ptr;
+       int             len;
+       xfs_daddr_t     bno;
 
        /* no verification of non-crc buffers */
        if (!xfs_sb_version_hascrc(&mp->m_sb))
                return;
 
-       if (!xfs_attr3_rmt_verify(bp)) {
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
-               xfs_buf_ioerror(bp, EFSCORRUPTED);
-               return;
-       }
+       ptr = bp->b_addr;
+       bno = bp->b_bn;
+       len = BBTOB(bp->b_length);
+       ASSERT(len >= XFS_LBSIZE(mp));
+
+       while (len > 0) {
+               if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
+                       XFS_CORRUPTION_ERROR(__func__,
+                                           XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+                       xfs_buf_ioerror(bp, EFSCORRUPTED);
+                       return;
+               }
+               if (bip) {
+                       struct xfs_attr3_rmt_hdr *rmt;
+
+                       rmt = (struct xfs_attr3_rmt_hdr *)ptr;
+                       rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+               }
+               xfs_update_cksum(ptr, XFS_LBSIZE(mp), XFS_ATTR3_RMT_CRC_OFF);
 
-       if (bip) {
-               struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
-               rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+               len -= XFS_LBSIZE(mp);
+               ptr += XFS_LBSIZE(mp);
+               bno += mp->m_bsize;
        }
-       xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
-                        XFS_ATTR3_RMT_CRC_OFF);
+       ASSERT(len == 0);
 }
 
 const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
@@ -129,15 +203,16 @@ const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
        .verify_write = xfs_attr3_rmt_write_verify,
 };
 
-static int
+STATIC int
 xfs_attr3_rmt_hdr_set(
        struct xfs_mount        *mp,
+       void                    *ptr,
        xfs_ino_t               ino,
        uint32_t                offset,
        uint32_t                size,
-       struct xfs_buf          *bp)
+       xfs_daddr_t             bno)
 {
-       struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
+       struct xfs_attr3_rmt_hdr *rmt = ptr;
 
        if (!xfs_sb_version_hascrc(&mp->m_sb))
                return 0;
@@ -147,36 +222,107 @@ xfs_attr3_rmt_hdr_set(
        rmt->rm_bytes = cpu_to_be32(size);
        uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_uuid);
        rmt->rm_owner = cpu_to_be64(ino);
-       rmt->rm_blkno = cpu_to_be64(bp->b_bn);
-       bp->b_ops = &xfs_attr3_rmt_buf_ops;
+       rmt->rm_blkno = cpu_to_be64(bno);
 
        return sizeof(struct xfs_attr3_rmt_hdr);
 }
 
 /*
- * Checking of the remote attribute header is split into two parts. the verifier
- * does CRC, location and bounds checking, the unpacking function checks the
- * attribute parameters and owner.
+ * Helper functions to copy attribute data in and out of the one disk extents
  */
-static bool
-xfs_attr3_rmt_hdr_ok(
-       struct xfs_mount        *mp,
-       xfs_ino_t               ino,
-       uint32_t                offset,
-       uint32_t                size,
-       struct xfs_buf          *bp)
+STATIC int
+xfs_attr_rmtval_copyout(
+       struct xfs_mount *mp,
+       struct xfs_buf  *bp,
+       xfs_ino_t       ino,
+       int             *offset,
+       int             *valuelen,
+       char            **dst)
 {
-       struct xfs_attr3_rmt_hdr *rmt = bp->b_addr;
+       char            *src = bp->b_addr;
+       xfs_daddr_t     bno = bp->b_bn;
+       int             len = BBTOB(bp->b_length);
 
-       if (offset != be32_to_cpu(rmt->rm_offset))
-               return false;
-       if (size != be32_to_cpu(rmt->rm_bytes))
-               return false;
-       if (ino != be64_to_cpu(rmt->rm_owner))
-               return false;
+       ASSERT(len >= XFS_LBSIZE(mp));
 
-       /* ok */
-       return true;
+       while (len > 0 && *valuelen > 0) {
+               int hdr_size = 0;
+               int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
+
+               byte_cnt = min_t(int, *valuelen, byte_cnt);
+
+               if (xfs_sb_version_hascrc(&mp->m_sb)) {
+                       if (!xfs_attr3_rmt_hdr_ok(mp, src, ino, *offset,
+                                                 byte_cnt, bno)) {
+                               xfs_alert(mp,
+"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
+                                       bno, *offset, byte_cnt, ino);
+                               return EFSCORRUPTED;
+                       }
+                       hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
+               }
+
+               memcpy(*dst, src + hdr_size, byte_cnt);
+
+               /* roll buffer forwards */
+               len -= XFS_LBSIZE(mp);
+               src += XFS_LBSIZE(mp);
+               bno += mp->m_bsize;
+
+               /* roll attribute data forwards */
+               *valuelen -= byte_cnt;
+               *dst += byte_cnt;
+               *offset += byte_cnt;
+       }
+       return 0;
+}
+
+STATIC void
+xfs_attr_rmtval_copyin(
+       struct xfs_mount *mp,
+       struct xfs_buf  *bp,
+       xfs_ino_t       ino,
+       int             *offset,
+       int             *valuelen,
+       char            **src)
+{
+       char            *dst = bp->b_addr;
+       xfs_daddr_t     bno = bp->b_bn;
+       int             len = BBTOB(bp->b_length);
+
+       ASSERT(len >= XFS_LBSIZE(mp));
+
+       while (len > 0 && *valuelen > 0) {
+               int hdr_size;
+               int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
+
+               byte_cnt = min(*valuelen, byte_cnt);
+               hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
+                                                byte_cnt, bno);
+
+               memcpy(dst + hdr_size, *src, byte_cnt);
+
+               /*
+                * If this is the last block, zero the remainder of it.
+                * Check that we are actually the last block, too.
+                */
+               if (byte_cnt + hdr_size < XFS_LBSIZE(mp)) {
+                       ASSERT(*valuelen - byte_cnt == 0);
+                       ASSERT(len == XFS_LBSIZE(mp));
+                       memset(dst + hdr_size + byte_cnt, 0,
+                                       XFS_LBSIZE(mp) - hdr_size - byte_cnt);
+               }
+
+               /* roll buffer forwards */
+               len -= XFS_LBSIZE(mp);
+               dst += XFS_LBSIZE(mp);
+               bno += mp->m_bsize;
+
+               /* roll attribute data forwards */
+               *valuelen -= byte_cnt;
+               *src += byte_cnt;
+               *offset += byte_cnt;
+       }
 }
 
 /*
@@ -190,13 +336,12 @@ xfs_attr_rmtval_get(
        struct xfs_bmbt_irec    map[ATTR_RMTVALUE_MAPSIZE];
        struct xfs_mount        *mp = args->dp->i_mount;
        struct xfs_buf          *bp;
-       xfs_daddr_t             dblkno;
        xfs_dablk_t             lblkno = args->rmtblkno;
-       void                    *dst = args->value;
+       char                    *dst = args->value;
        int                     valuelen = args->valuelen;
        int                     nmap;
        int                     error;
-       int                     blkcnt;
+       int                     blkcnt = args->rmtblkcnt;
        int                     i;
        int                     offset = 0;
 
@@ -207,52 +352,36 @@ xfs_attr_rmtval_get(
        while (valuelen > 0) {
                nmap = ATTR_RMTVALUE_MAPSIZE;
                error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
-                                      args->rmtblkcnt, map, &nmap,
+                                      blkcnt, map, &nmap,
                                       XFS_BMAPI_ATTRFORK);
                if (error)
                        return error;
                ASSERT(nmap >= 1);
 
                for (i = 0; (i < nmap) && (valuelen > 0); i++) {
-                       int     byte_cnt;
-                       char    *src;
+                       xfs_daddr_t     dblkno;
+                       int             dblkcnt;
 
                        ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
                               (map[i].br_startblock != HOLESTARTBLOCK));
                        dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
-                       blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
+                       dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
                        error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
-                                                  dblkno, blkcnt, 0, &bp,
+                                                  dblkno, dblkcnt, 0, &bp,
                                                   &xfs_attr3_rmt_buf_ops);
                        if (error)
                                return error;
 
-                       byte_cnt = min_t(int, valuelen, BBTOB(bp->b_length));
-                       byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, byte_cnt);
-
-                       src = bp->b_addr;
-                       if (xfs_sb_version_hascrc(&mp->m_sb)) {
-                               if (!xfs_attr3_rmt_hdr_ok(mp, args->dp->i_ino,
-                                                       offset, byte_cnt, bp)) {
-                                       xfs_alert(mp,
-"remote attribute header does not match required off/len/owner (0x%x/Ox%x,0x%llx)",
-                                               offset, byte_cnt, args->dp->i_ino);
-                                       xfs_buf_relse(bp);
-                                       return EFSCORRUPTED;
-
-                               }
-
-                               src += sizeof(struct xfs_attr3_rmt_hdr);
-                       }
-
-                       memcpy(dst, src, byte_cnt);
+                       error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
+                                                       &offset, &valuelen,
+                                                       &dst);
                        xfs_buf_relse(bp);
+                       if (error)
+                               return error;
 
-                       offset += byte_cnt;
-                       dst += byte_cnt;
-                       valuelen -= byte_cnt;
-
+                       /* roll attribute extent map forwards */
                        lblkno += map[i].br_blockcount;
+                       blkcnt -= map[i].br_blockcount;
                }
        }
        ASSERT(valuelen == 0);
@@ -270,17 +399,13 @@ xfs_attr_rmtval_set(
        struct xfs_inode        *dp = args->dp;
        struct xfs_mount        *mp = dp->i_mount;
        struct xfs_bmbt_irec    map;
-       struct xfs_buf          *bp;
-       xfs_daddr_t             dblkno;
        xfs_dablk_t             lblkno;
        xfs_fileoff_t           lfileoff = 0;
-       void                    *src = args->value;
+       char                    *src = args->value;
        int                     blkcnt;
        int                     valuelen;
        int                     nmap;
        int                     error;
-       int                     hdrcnt = 0;
-       bool                    crcs = xfs_sb_version_hascrc(&mp->m_sb);
        int                     offset = 0;
 
        trace_xfs_attr_rmtval_set(args);
@@ -289,24 +414,14 @@ xfs_attr_rmtval_set(
         * Find a "hole" in the attribute address space large enough for
         * us to drop the new attribute's value into. Because CRC enable
         * attributes have headers, we can't just do a straight byte to FSB
-        * conversion. We calculate the worst case block count in this case
-        * and we may not need that many, so we have to handle this when
-        * allocating the blocks below. 
+        * conversion and have to take the header space into account.
         */
-       if (!crcs)
-               blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
-       else
-               blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
-
+       blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
        error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
                                                   XFS_ATTR_FORK);
        if (error)
                return error;
 
-       /* Start with the attribute data. We'll allocate the rest afterwards. */
-       if (crcs)
-               blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
-
        args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff;
        args->rmtblkcnt = blkcnt;
 
@@ -349,26 +464,6 @@ xfs_attr_rmtval_set(
                       (map.br_startblock != HOLESTARTBLOCK));
                lblkno += map.br_blockcount;
                blkcnt -= map.br_blockcount;
-               hdrcnt++;
-
-               /*
-                * If we have enough blocks for the attribute data, calculate
-                * how many extra blocks we need for headers. We might run
-                * through this multiple times in the case that the additional
-                * headers in the blocks needed for the data fragments spills
-                * into requiring more blocks. e.g. for 512 byte blocks, we'll
-                * spill for another block every 9 headers we require in this
-                * loop.
-                */
-               if (crcs && blkcnt == 0) {
-                       int total_len;
-
-                       total_len = args->valuelen +
-                                   hdrcnt * sizeof(struct xfs_attr3_rmt_hdr);
-                       blkcnt = XFS_B_TO_FSB(mp, total_len);
-                       blkcnt -= args->rmtblkcnt;
-                       args->rmtblkcnt += blkcnt;
-               }
 
                /*
                 * Start the next trans in the chain.
@@ -385,18 +480,19 @@ xfs_attr_rmtval_set(
         * the INCOMPLETE flag.
         */
        lblkno = args->rmtblkno;
+       blkcnt = args->rmtblkcnt;
        valuelen = args->valuelen;
        while (valuelen > 0) {
-               int     byte_cnt;
-               char    *buf;
+               struct xfs_buf  *bp;
+               xfs_daddr_t     dblkno;
+               int             dblkcnt;
+
+               ASSERT(blkcnt > 0);
 
-               /*
-                * Try to remember where we decided to put the value.
-                */
                xfs_bmap_init(args->flist, args->firstblock);
                nmap = 1;
                error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
-                                      args->rmtblkcnt, &map, &nmap,
+                                      blkcnt, &map, &nmap,
                                       XFS_BMAPI_ATTRFORK);
                if (error)
                        return(error);
@@ -405,41 +501,27 @@ xfs_attr_rmtval_set(
                       (map.br_startblock != HOLESTARTBLOCK));
 
                dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
-               blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
+               dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
 
-               bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, 0);
+               bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0);
                if (!bp)
                        return ENOMEM;
                bp->b_ops = &xfs_attr3_rmt_buf_ops;
 
-               byte_cnt = BBTOB(bp->b_length);
-               byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, byte_cnt);
-               if (valuelen < byte_cnt)
-                       byte_cnt = valuelen;
-
-               buf = bp->b_addr;
-               buf += xfs_attr3_rmt_hdr_set(mp, dp->i_ino, offset,
-                                            byte_cnt, bp);
-               memcpy(buf, src, byte_cnt);
-
-               if (byte_cnt < BBTOB(bp->b_length))
-                       xfs_buf_zero(bp, byte_cnt,
-                                    BBTOB(bp->b_length) - byte_cnt);
+               xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
+                                      &valuelen, &src);
 
                error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
                xfs_buf_relse(bp);
                if (error)
                        return error;
 
-               src += byte_cnt;
-               valuelen -= byte_cnt;
-               offset += byte_cnt;
-               hdrcnt--;
 
+               /* roll attribute extent map forwards */
                lblkno += map.br_blockcount;
+               blkcnt -= map.br_blockcount;
        }
        ASSERT(valuelen == 0);
-       ASSERT(hdrcnt == 0);
        return 0;
 }
 
@@ -448,33 +530,40 @@ xfs_attr_rmtval_set(
  * out-of-line buffer that it is stored on.
  */
 int
-xfs_attr_rmtval_remove(xfs_da_args_t *args)
+xfs_attr_rmtval_remove(
+       struct xfs_da_args      *args)
 {
-       xfs_mount_t *mp;
-       xfs_bmbt_irec_t map;
-       xfs_buf_t *bp;
-       xfs_daddr_t dblkno;
-       xfs_dablk_t lblkno;
-       int valuelen, blkcnt, nmap, error, done, committed;
+       struct xfs_mount        *mp = args->dp->i_mount;
+       xfs_dablk_t             lblkno;
+       int                     blkcnt;
+       int                     error;
+       int                     done;
 
        trace_xfs_attr_rmtval_remove(args);
 
-       mp = args->dp->i_mount;
-
        /*
-        * Roll through the "value", invalidating the attribute value's
-        * blocks.
+        * Roll through the "value", invalidating the attribute value's blocks.
+        * Note that args->rmtblkcnt is the minimum number of data blocks we'll
+        * see for a CRC enabled remote attribute. Each extent will have a
+        * header, and so we may have more blocks than we realise here.  If we
+        * fail to map the blocks correctly, we'll have problems with the buffer
+        * lookups.
         */
        lblkno = args->rmtblkno;
-       valuelen = args->rmtblkcnt;
-       while (valuelen > 0) {
+       blkcnt = args->rmtblkcnt;
+       while (blkcnt > 0) {
+               struct xfs_bmbt_irec    map;
+               struct xfs_buf          *bp;
+               xfs_daddr_t             dblkno;
+               int                     dblkcnt;
+               int                     nmap;
+
                /*
                 * Try to remember where we decided to put the value.
                 */
                nmap = 1;
                error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
-                                      args->rmtblkcnt, &map, &nmap,
-                                      XFS_BMAPI_ATTRFORK);
+                                      blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
                if (error)
                        return(error);
                ASSERT(nmap == 1);
@@ -482,21 +571,20 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
                       (map.br_startblock != HOLESTARTBLOCK));
 
                dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
-               blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
+               dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
 
                /*
                 * If the "remote" value is in the cache, remove it.
                 */
-               bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, XBF_TRYLOCK);
+               bp = xfs_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK);
                if (bp) {
                        xfs_buf_stale(bp);
                        xfs_buf_relse(bp);
                        bp = NULL;
                }
 
-               valuelen -= map.br_blockcount;
-
                lblkno += map.br_blockcount;
+               blkcnt -= map.br_blockcount;
        }
 
        /*
@@ -506,6 +594,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
        blkcnt = args->rmtblkcnt;
        done = 0;
        while (!done) {
+               int committed;
+
                xfs_bmap_init(args->flist, args->firstblock);
                error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
                                    XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
index c7cca60a062a1ab1a45eb7f08632773e54d0a734..92a8fd7977cc2b48649ed3e5c22344fe83fcd8c1 100644 (file)
 
 #define XFS_ATTR3_RMT_MAGIC    0x5841524d      /* XARM */
 
+/*
+ * There is one of these headers per filesystem block in a remote attribute.
+ * This is done to ensure there is a 1:1 mapping between the attribute value
+ * length and the number of blocks needed to store the attribute. This makes the
+ * verification of a buffer a little more complex, but greatly simplifies the
+ * allocation, reading and writing of these attributes as we don't have to guess
+ * the number of blocks needed to store the attribute data.
+ */
 struct xfs_attr3_rmt_hdr {
        __be32  rm_magic;
        __be32  rm_offset;
@@ -39,6 +47,8 @@ struct xfs_attr3_rmt_hdr {
 
 extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
 
+int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
+
 int xfs_attr_rmtval_get(struct xfs_da_args *args);
 int xfs_attr_rmtval_set(struct xfs_da_args *args);
 int xfs_attr_rmtval_remove(struct xfs_da_args *args);
index 82b70bda9f47a51eb3f31a50ae4fcd32f6c57047..1b2472a46e46b96e31e0615f670120218ed7cf24 100644 (file)
@@ -513,6 +513,7 @@ _xfs_buf_find(
                xfs_alert(btp->bt_mount,
                          "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
                          __func__, blkno, eofs);
+               WARN_ON(1);
                return NULL;
        }
 
@@ -1649,7 +1650,7 @@ xfs_alloc_buftarg(
 {
        xfs_buftarg_t           *btp;
 
-       btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
+       btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
 
        btp->bt_mount = mp;
        btp->bt_dev =  bdev->bd_dev;
index cf263476d6b43591c5786045857f6e4625bde4ce..4ec431777048740528d0d1b96ca7f52e6ba69d01 100644 (file)
@@ -262,12 +262,7 @@ xfs_buf_item_format_segment(
                        vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
                        vecp->i_len = nbits * XFS_BLF_CHUNK;
                        vecp->i_type = XLOG_REG_TYPE_BCHUNK;
-/*
- * You would think we need to bump the nvecs here too, but we do not
- * this number is used by recovery, and it gets confused by the boundary
- * split here
- *                     nvecs++;
- */
+                       nvecs++;
                        vecp++;
                        first_bit = next_bit;
                        last_bit = next_bit;
index 9b26a99ebfe917ed67123a77794751fc2b72cecd..0b8b2a13cd24debe493c8982679a2c565ebae5a1 100644 (file)
@@ -270,6 +270,7 @@ xfs_da3_node_read_verify(
                                break;
                        return;
                case XFS_ATTR_LEAF_MAGIC:
+               case XFS_ATTR3_LEAF_MAGIC:
                        bp->b_ops = &xfs_attr3_leaf_buf_ops;
                        bp->b_ops->verify_read(bp);
                        return;
@@ -2464,7 +2465,8 @@ xfs_buf_map_from_irec(
        ASSERT(nirecs >= 1);
 
        if (nirecs > 1) {
-               map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_SLEEP);
+               map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
+                                 KM_SLEEP | KM_NOFS);
                if (!map)
                        return ENOMEM;
                *mapp = map;
@@ -2520,7 +2522,8 @@ xfs_dabuf_map(
                 * Optimize the one-block case.
                 */
                if (nfsb != 1)
-                       irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_SLEEP);
+                       irecs = kmem_zalloc(sizeof(irec) * nfsb,
+                                           KM_SLEEP | KM_NOFS);
 
                nirecs = nfsb;
                error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
index f852b082a0844abf3f8fbfea601589c0c162a9c7..c407e1ccff438a1db7e11cfdca1db4d45beea000 100644 (file)
@@ -219,6 +219,14 @@ xfs_swap_extents(
        int             taforkblks = 0;
        __uint64_t      tmp;
 
+       /*
+        * We have no way of updating owner information in the BMBT blocks for
+        * each inode on CRC enabled filesystems, so to avoid corrupting the
+        * this metadata we simply don't allow extent swaps to occur.
+        */
+       if (xfs_sb_version_hascrc(&mp->m_sb))
+               return XFS_ERROR(EINVAL);
+
        tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
        if (!tempifp) {
                error = XFS_ERROR(ENOMEM);
index a3b1bd841a8055249dc16a40f2f32614715ecaba..995f1f505a5246c9bb3132c836f08f59d8659dd1 100644 (file)
@@ -715,6 +715,7 @@ struct xfs_dir3_free_hdr {
        __be32                  firstdb;        /* db of first entry */
        __be32                  nvalid;         /* count of valid entries */
        __be32                  nused;          /* count of used entries */
+       __be32                  pad;            /* 64 bit alignment. */
 };
 
 struct xfs_dir3_free {
index 721ba2fe8e54581aebf884e1cb31d2f02e1e2534..da71a1819d780cd35bd9817fd89deabd5feb3eb7 100644 (file)
@@ -1336,7 +1336,7 @@ xfs_dir2_leaf_getdents(
                                     mp->m_sb.sb_blocksize);
        map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) +
                                (length * sizeof(struct xfs_bmbt_irec)),
-                              KM_SLEEP);
+                              KM_SLEEP | KM_NOFS);
        map_info->map_size = length;
 
        /*
index 5246de4912d4b07d3fda2940bfe30a15d77a09f1..2226a00acd156118a2998ce37c2c95ae628503d9 100644 (file)
@@ -263,18 +263,19 @@ xfs_dir3_free_get_buf(
         * Initialize the new block to be empty, and remember
         * its first slot as our empty slot.
         */
-       hdr.magic = XFS_DIR2_FREE_MAGIC;
-       hdr.firstdb = 0;
-       hdr.nused = 0;
-       hdr.nvalid = 0;
+       memset(bp->b_addr, 0, sizeof(struct xfs_dir3_free_hdr));
+       memset(&hdr, 0, sizeof(hdr));
+
        if (xfs_sb_version_hascrc(&mp->m_sb)) {
                struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
 
                hdr.magic = XFS_DIR3_FREE_MAGIC;
+
                hdr3->hdr.blkno = cpu_to_be64(bp->b_bn);
                hdr3->hdr.owner = cpu_to_be64(dp->i_ino);
                uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_uuid);
-       }
+       } else
+               hdr.magic = XFS_DIR2_FREE_MAGIC;
        xfs_dir3_free_hdr_to_disk(bp->b_addr, &hdr);
        *bpp = bp;
        return 0;
@@ -1921,8 +1922,6 @@ xfs_dir2_node_addname_int(
                         */
                        freehdr.firstdb = (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
                                        xfs_dir3_free_max_bests(mp);
-                       free->hdr.nvalid = 0;
-                       free->hdr.nused = 0;
                } else {
                        free = fbp->b_addr;
                        bests = xfs_dir3_free_bests_p(mp, free);
index a41f8bf1da3788818131386f8ceb08516ffd1603..044e97a33c8d0a155f1ea026a20881f95dd95274 100644 (file)
@@ -249,8 +249,11 @@ xfs_qm_init_dquot_blk(
                d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
                d->dd_diskdq.d_id = cpu_to_be32(curid);
                d->dd_diskdq.d_flags = type;
-               if (xfs_sb_version_hascrc(&mp->m_sb))
+               if (xfs_sb_version_hascrc(&mp->m_sb)) {
                        uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
+                       xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
+                                        XFS_DQUOT_CRC_OFF);
+               }
        }
 
        xfs_trans_dquot_buf(tp, bp,
@@ -286,23 +289,6 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
        dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
 }
 
-STATIC void
-xfs_dquot_buf_calc_crc(
-       struct xfs_mount        *mp,
-       struct xfs_buf          *bp)
-{
-       struct xfs_dqblk        *d = (struct xfs_dqblk *)bp->b_addr;
-       int                     i;
-
-       if (!xfs_sb_version_hascrc(&mp->m_sb))
-               return;
-
-       for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++, d++) {
-               xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
-                                offsetof(struct xfs_dqblk, dd_crc));
-       }
-}
-
 STATIC bool
 xfs_dquot_buf_verify_crc(
        struct xfs_mount        *mp,
@@ -328,12 +314,11 @@ xfs_dquot_buf_verify_crc(
 
        for (i = 0; i < ndquots; i++, d++) {
                if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
-                                offsetof(struct xfs_dqblk, dd_crc)))
+                                XFS_DQUOT_CRC_OFF))
                        return false;
                if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
                        return false;
        }
-
        return true;
 }
 
@@ -393,6 +378,11 @@ xfs_dquot_buf_read_verify(
        }
 }
 
+/*
+ * we don't calculate the CRC here as that is done when the dquot is flushed to
+ * the buffer after the update is done. This ensures that the dquot in the
+ * buffer always has an up-to-date CRC value.
+ */
 void
 xfs_dquot_buf_write_verify(
        struct xfs_buf  *bp)
@@ -404,7 +394,6 @@ xfs_dquot_buf_write_verify(
                xfs_buf_ioerror(bp, EFSCORRUPTED);
                return;
        }
-       xfs_dquot_buf_calc_crc(mp, bp);
 }
 
 const struct xfs_buf_ops xfs_dquot_buf_ops = {
@@ -1151,11 +1140,17 @@ xfs_qm_dqflush(
         * copy the lsn into the on-disk dquot now while we have the in memory
         * dquot here. This can't be done later in the write verifier as we
         * can't get access to the log item at that point in time.
+        *
+        * We also calculate the CRC here so that the on-disk dquot in the
+        * buffer always has a valid CRC. This ensures there is no possibility
+        * of a dquot without an up-to-date CRC getting to disk.
         */
        if (xfs_sb_version_hascrc(&mp->m_sb)) {
                struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
 
                dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
+               xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
+                                XFS_DQUOT_CRC_OFF);
        }
 
        /*
index c0f375087efc3256cf5196ff08cdd2068ce18834..452920a3f03fb2e4405ce52e34587e55acfb7abe 100644 (file)
@@ -305,11 +305,12 @@ xfs_efi_release(xfs_efi_log_item_t        *efip,
 {
        ASSERT(atomic_read(&efip->efi_next_extent) >= nextents);
        if (atomic_sub_and_test(nextents, &efip->efi_next_extent)) {
-               __xfs_efi_release(efip);
-
                /* recovery needs us to drop the EFI reference, too */
                if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
                        __xfs_efi_release(efip);
+
+               __xfs_efi_release(efip);
+               /* efip may now have been freed, do not reference it again. */
        }
 }
 
index 6dda3f949b04f5c8a9ad804d030c9b21f99b4d96..d04695545397308a6596f5109a72f8923fd79419 100644 (file)
@@ -236,6 +236,7 @@ typedef struct xfs_fsop_resblks {
 #define XFS_FSOP_GEOM_FLAGS_PROJID32   0x0800  /* 32-bit project IDs   */
 #define XFS_FSOP_GEOM_FLAGS_DIRV2CI    0x1000  /* ASCII only CI names  */
 #define XFS_FSOP_GEOM_FLAGS_LAZYSB     0x4000  /* lazy superblock counters */
+#define XFS_FSOP_GEOM_FLAGS_V5SB       0x8000  /* version 5 superblock */
 
 
 /*
index 87595b211da10743bbd07c7be8b4fd44d9f8212b..3c3644ea825b65edd813965296aa4c73d6214313 100644 (file)
@@ -99,7 +99,9 @@ xfs_fs_geometry(
                        (xfs_sb_version_hasattr2(&mp->m_sb) ?
                                XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) |
                        (xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
-                               XFS_FSOP_GEOM_FLAGS_PROJID32 : 0);
+                               XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
+                       (xfs_sb_version_hascrc(&mp->m_sb) ?
+                               XFS_FSOP_GEOM_FLAGS_V5SB : 0);
                geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
                                mp->m_sb.sb_logsectsize : BBSIZE;
                geo->rtsectsize = mp->m_sb.sb_blocksize;
index efbe1accb6ca0b5e433e87df5fe0976469764e3d..7f7be5f98f52f743e04b4a915ef8466a62ec6237 100644 (file)
@@ -1638,6 +1638,10 @@ xfs_iunlink(
                dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
                offset = ip->i_imap.im_boffset +
                        offsetof(xfs_dinode_t, di_next_unlinked);
+
+               /* need to recalc the inode CRC if appropriate */
+               xfs_dinode_calc_crc(mp, dip);
+
                xfs_trans_inode_buf(tp, ibp);
                xfs_trans_log_buf(tp, ibp, offset,
                                  (offset + sizeof(xfs_agino_t) - 1));
@@ -1723,6 +1727,10 @@ xfs_iunlink_remove(
                        dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
                        offset = ip->i_imap.im_boffset +
                                offsetof(xfs_dinode_t, di_next_unlinked);
+
+                       /* need to recalc the inode CRC if appropriate */
+                       xfs_dinode_calc_crc(mp, dip);
+
                        xfs_trans_inode_buf(tp, ibp);
                        xfs_trans_log_buf(tp, ibp, offset,
                                          (offset + sizeof(xfs_agino_t) - 1));
@@ -1796,6 +1804,10 @@ xfs_iunlink_remove(
                        dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
                        offset = ip->i_imap.im_boffset +
                                offsetof(xfs_dinode_t, di_next_unlinked);
+
+                       /* need to recalc the inode CRC if appropriate */
+                       xfs_dinode_calc_crc(mp, dip);
+
                        xfs_trans_inode_buf(tp, ibp);
                        xfs_trans_log_buf(tp, ibp, offset,
                                          (offset + sizeof(xfs_agino_t) - 1));
@@ -1809,6 +1821,10 @@ xfs_iunlink_remove(
                last_dip->di_next_unlinked = cpu_to_be32(next_agino);
                ASSERT(next_agino != 0);
                offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
+
+               /* need to recalc the inode CRC if appropriate */
+               xfs_dinode_calc_crc(mp, last_dip);
+
                xfs_trans_inode_buf(tp, last_ibp);
                xfs_trans_log_buf(tp, last_ibp, offset,
                                  (offset + sizeof(xfs_agino_t) - 1));
index d82efaa2ac7350553c8804c014c5f299809af178..ca9ecaa81112fac7706c4cac23c92f2326f0bba8 100644 (file)
@@ -455,6 +455,28 @@ xfs_vn_getattr(
        return 0;
 }
 
+static void
+xfs_setattr_mode(
+       struct xfs_trans        *tp,
+       struct xfs_inode        *ip,
+       struct iattr            *iattr)
+{
+       struct inode    *inode = VFS_I(ip);
+       umode_t         mode = iattr->ia_mode;
+
+       ASSERT(tp);
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+       if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+               mode &= ~S_ISGID;
+
+       ip->i_d.di_mode &= S_IFMT;
+       ip->i_d.di_mode |= mode & ~S_IFMT;
+
+       inode->i_mode &= S_IFMT;
+       inode->i_mode |= mode & ~S_IFMT;
+}
+
 int
 xfs_setattr_nonsize(
        struct xfs_inode        *ip,
@@ -606,18 +628,8 @@ xfs_setattr_nonsize(
        /*
         * Change file access modes.
         */
-       if (mask & ATTR_MODE) {
-               umode_t mode = iattr->ia_mode;
-
-               if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
-                       mode &= ~S_ISGID;
-
-               ip->i_d.di_mode &= S_IFMT;
-               ip->i_d.di_mode |= mode & ~S_IFMT;
-
-               inode->i_mode &= S_IFMT;
-               inode->i_mode |= mode & ~S_IFMT;
-       }
+       if (mask & ATTR_MODE)
+               xfs_setattr_mode(tp, ip, iattr);
 
        /*
         * Change file access or modified times.
@@ -714,9 +726,8 @@ xfs_setattr_size(
                return XFS_ERROR(error);
 
        ASSERT(S_ISREG(ip->i_d.di_mode));
-       ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
-                       ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID|
-                       ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
+       ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
+                       ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
 
        if (!(flags & XFS_ATTR_NOLOCK)) {
                lock_flags |= XFS_IOLOCK_EXCL;
@@ -860,6 +871,12 @@ xfs_setattr_size(
                xfs_inode_clear_eofblocks_tag(ip);
        }
 
+       /*
+        * Change file access modes.
+        */
+       if (mask & ATTR_MODE)
+               xfs_setattr_mode(tp, ip, iattr);
+
        if (mask & ATTR_CTIME) {
                inode->i_ctime = iattr->ia_ctime;
                ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
index e3d0b85d852b6e2c3b07bc07482d2a7e357ccf90..d0833b54e55d63ed83eb61cedfd110cd4a1263b2 100644 (file)
@@ -139,7 +139,7 @@ xlog_cil_prepare_log_vecs(
 
                new_lv = kmem_zalloc(sizeof(*new_lv) +
                                niovecs * sizeof(struct xfs_log_iovec),
-                               KM_SLEEP);
+                               KM_SLEEP|KM_NOFS);
 
                /* The allocated iovec region lies beyond the log vector. */
                new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1];
index 93f03ec17eeca5052a99a713895a1771887e55ac..45a85ff84da1e3cdf688d9d8429aa4a40e4d6b66 100644 (file)
@@ -1599,10 +1599,43 @@ xlog_recover_add_to_trans(
 }
 
 /*
- * Sort the log items in the transaction. Cancelled buffers need
- * to be put first so they are processed before any items that might
- * modify the buffers. If they are cancelled, then the modifications
- * don't need to be replayed.
+ * Sort the log items in the transaction.
+ *
+ * The ordering constraints are defined by the inode allocation and unlink
+ * behaviour. The rules are:
+ *
+ *     1. Every item is only logged once in a given transaction. Hence it
+ *        represents the last logged state of the item. Hence ordering is
+ *        dependent on the order in which operations need to be performed so
+ *        required initial conditions are always met.
+ *
+ *     2. Cancelled buffers are recorded in pass 1 in a separate table and
+ *        there's nothing to replay from them so we can simply cull them
+ *        from the transaction. However, we can't do that until after we've
+ *        replayed all the other items because they may be dependent on the
+ *        cancelled buffer and replaying the cancelled buffer can remove it
+ *        form the cancelled buffer table. Hence they have tobe done last.
+ *
+ *     3. Inode allocation buffers must be replayed before inode items that
+ *        read the buffer and replay changes into it.
+ *
+ *     4. Inode unlink buffers must be replayed after inode items are replayed.
+ *        This ensures that inodes are completely flushed to the inode buffer
+ *        in a "free" state before we remove the unlinked inode list pointer.
+ *
+ * Hence the ordering needs to be inode allocation buffers first, inode items
+ * second, inode unlink buffers third and cancelled buffers last.
+ *
+ * But there's a problem with that - we can't tell an inode allocation buffer
+ * apart from a regular buffer, so we can't separate them. We can, however,
+ * tell an inode unlink buffer from the others, and so we can separate them out
+ * from all the other buffers and move them to last.
+ *
+ * Hence, 4 lists, in order from head to tail:
+ *     - buffer_list for all buffers except cancelled/inode unlink buffers
+ *     - item_list for all non-buffer items
+ *     - inode_buffer_list for inode unlink buffers
+ *     - cancel_list for the cancelled buffers
  */
 STATIC int
 xlog_recover_reorder_trans(
@@ -1612,6 +1645,10 @@ xlog_recover_reorder_trans(
 {
        xlog_recover_item_t     *item, *n;
        LIST_HEAD(sort_list);
+       LIST_HEAD(cancel_list);
+       LIST_HEAD(buffer_list);
+       LIST_HEAD(inode_buffer_list);
+       LIST_HEAD(inode_list);
 
        list_splice_init(&trans->r_itemq, &sort_list);
        list_for_each_entry_safe(item, n, &sort_list, ri_list) {
@@ -1619,12 +1656,18 @@ xlog_recover_reorder_trans(
 
                switch (ITEM_TYPE(item)) {
                case XFS_LI_BUF:
-                       if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
+                       if (buf_f->blf_flags & XFS_BLF_CANCEL) {
                                trace_xfs_log_recover_item_reorder_head(log,
                                                        trans, item, pass);
-                               list_move(&item->ri_list, &trans->r_itemq);
+                               list_move(&item->ri_list, &cancel_list);
                                break;
                        }
+                       if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
+                               list_move(&item->ri_list, &inode_buffer_list);
+                               break;
+                       }
+                       list_move_tail(&item->ri_list, &buffer_list);
+                       break;
                case XFS_LI_INODE:
                case XFS_LI_DQUOT:
                case XFS_LI_QUOTAOFF:
@@ -1632,7 +1675,7 @@ xlog_recover_reorder_trans(
                case XFS_LI_EFI:
                        trace_xfs_log_recover_item_reorder_tail(log,
                                                        trans, item, pass);
-                       list_move_tail(&item->ri_list, &trans->r_itemq);
+                       list_move_tail(&item->ri_list, &inode_list);
                        break;
                default:
                        xfs_warn(log->l_mp,
@@ -1643,6 +1686,14 @@ xlog_recover_reorder_trans(
                }
        }
        ASSERT(list_empty(&sort_list));
+       if (!list_empty(&buffer_list))
+               list_splice(&buffer_list, &trans->r_itemq);
+       if (!list_empty(&inode_list))
+               list_splice_tail(&inode_list, &trans->r_itemq);
+       if (!list_empty(&inode_buffer_list))
+               list_splice_tail(&inode_buffer_list, &trans->r_itemq);
+       if (!list_empty(&cancel_list))
+               list_splice_tail(&cancel_list, &trans->r_itemq);
        return 0;
 }
 
@@ -1861,6 +1912,15 @@ xlog_recover_do_inode_buffer(
                buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
                                              next_unlinked_offset);
                *buffer_nextp = *logged_nextp;
+
+               /*
+                * If necessary, recalculate the CRC in the on-disk inode. We
+                * have to leave the inode in a consistent state for whoever
+                * reads it next....
+                */
+               xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
+                               xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
+
        }
 
        return 0;
@@ -2096,6 +2156,17 @@ xlog_recover_do_reg_buffer(
                ASSERT(BBTOB(bp->b_io_length) >=
                       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
 
+               /*
+                * The dirty regions logged in the buffer, even though
+                * contiguous, may span multiple chunks. This is because the
+                * dirty region may span a physical page boundary in a buffer
+                * and hence be split into two separate vectors for writing into
+                * the log. Hence we need to trim nbits back to the length of
+                * the current region being copied out of the log.
+                */
+               if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
+                       nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
+
                /*
                 * Do a sanity check if this is a dquot buffer. Just checking
                 * the first dquot in the buffer should do. XXXThis is
@@ -2255,6 +2326,12 @@ xfs_qm_dqcheck(
        d->dd_diskdq.d_flags = type;
        d->dd_diskdq.d_id = cpu_to_be32(id);
 
+       if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
+               xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
+                                XFS_DQUOT_CRC_OFF);
+       }
+
        return errs;
 }
 
@@ -2782,6 +2859,10 @@ xlog_recover_dquot_pass2(
        }
 
        memcpy(ddq, recddq, item->ri_buf[1].i_len);
+       if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
+                                XFS_DQUOT_CRC_OFF);
+       }
 
        ASSERT(dq_f->qlf_size == 2);
        ASSERT(bp->b_target->bt_mount == mp);
index f41702b43003d3930ea3e281da363e495b3edd6b..b75c9bb6e71e34b0c65158f63ba475a217d9e347 100644 (file)
@@ -41,6 +41,7 @@
 #include "xfs_qm.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_cksum.h"
 
 /*
  * The global quota manager. There is only one of these for the entire
@@ -839,7 +840,7 @@ xfs_qm_reset_dqcounts(
        xfs_dqid_t      id,
        uint            type)
 {
-       xfs_disk_dquot_t        *ddq;
+       struct xfs_dqblk        *dqb;
        int                     j;
 
        trace_xfs_reset_dqcounts(bp, _RET_IP_);
@@ -853,8 +854,12 @@ xfs_qm_reset_dqcounts(
        do_div(j, sizeof(xfs_dqblk_t));
        ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 #endif
-       ddq = bp->b_addr;
+       dqb = bp->b_addr;
        for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
+               struct xfs_disk_dquot   *ddq;
+
+               ddq = (struct xfs_disk_dquot *)&dqb[j];
+
                /*
                 * Do a sanity check, and if needed, repair the dqblk. Don't
                 * output any warnings because it's perfectly possible to
@@ -871,7 +876,12 @@ xfs_qm_reset_dqcounts(
                ddq->d_bwarns = 0;
                ddq->d_iwarns = 0;
                ddq->d_rtbwarns = 0;
-               ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
+
+               if (xfs_sb_version_hascrc(&mp->m_sb)) {
+                       xfs_update_cksum((char *)&dqb[j],
+                                        sizeof(struct xfs_dqblk),
+                                        XFS_DQUOT_CRC_OFF);
+               }
        }
 }
 
@@ -907,19 +917,29 @@ xfs_qm_dqiter_bufs(
                              XFS_FSB_TO_DADDR(mp, bno),
                              mp->m_quotainfo->qi_dqchunklen, 0, &bp,
                              &xfs_dquot_buf_ops);
-               if (error)
-                       break;
 
                /*
-                * XXX(hch): need to figure out if it makes sense to validate
-                *           the CRC here.
+                * CRC and validation errors will return a EFSCORRUPTED here. If
+                * this occurs, re-read without CRC validation so that we can
+                * repair the damage via xfs_qm_reset_dqcounts(). This process
+                * will leave a trace in the log indicating corruption has
+                * been detected.
                 */
+               if (error == EFSCORRUPTED) {
+                       error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
+                                     XFS_FSB_TO_DADDR(mp, bno),
+                                     mp->m_quotainfo->qi_dqchunklen, 0, &bp,
+                                     NULL);
+               }
+
+               if (error)
+                       break;
+
                xfs_qm_reset_dqcounts(mp, bp, firstid, type);
                xfs_buf_delwri_queue(bp, buffer_list);
                xfs_buf_relse(bp);
-               /*
-                * goto the next block.
-                */
+
+               /* goto the next block. */
                bno++;
                firstid += mp->m_quotainfo->qi_dqperchunk;
        }
index c41190cad6e91a78306122a4ef18511d3f3b8f01..6cdf6ffc36a1d7f9fc967ec6c72de3621a58b47c 100644 (file)
@@ -489,31 +489,36 @@ xfs_qm_scall_setqlim(
        if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
                return 0;
 
-       tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
-       error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
-                                 0, 0, XFS_DEFAULT_LOG_COUNT);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return (error);
-       }
-
        /*
         * We don't want to race with a quotaoff so take the quotaoff lock.
-        * (We don't hold an inode lock, so there's nothing else to stop
-        * a quotaoff from happening). (XXXThis doesn't currently happen
-        * because we take the vfslock before calling xfs_qm_sysent).
+        * We don't hold an inode lock, so there's nothing else to stop
+        * a quotaoff from happening.
         */
        mutex_lock(&q->qi_quotaofflock);
 
        /*
-        * Get the dquot (locked), and join it to the transaction.
-        * Allocate the dquot if this doesn't exist.
+        * Get the dquot (locked) before we start, as we need to do a
+        * transaction to allocate it if it doesn't exist. Once we have the
+        * dquot, unlock it so we can start the next transaction safely. We hold
+        * a reference to the dquot, so it's safe to do this unlock/lock without
+        * it being reclaimed in the mean time.
         */
-       if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
-               xfs_trans_cancel(tp, XFS_TRANS_ABORT);
+       error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp);
+       if (error) {
                ASSERT(error != ENOENT);
                goto out_unlock;
        }
+       xfs_dqunlock(dqp);
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
+       error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
+                                 0, 0, XFS_DEFAULT_LOG_COUNT);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               goto out_rele;
+       }
+
+       xfs_dqlock(dqp);
        xfs_trans_dqjoin(tp, dqp);
        ddq = &dqp->q_core;
 
@@ -621,9 +626,10 @@ xfs_qm_scall_setqlim(
        xfs_trans_log_dquot(tp, dqp);
 
        error = xfs_trans_commit(tp, 0);
-       xfs_qm_dqrele(dqp);
 
- out_unlock:
+out_rele:
+       xfs_qm_dqrele(dqp);
+out_unlock:
        mutex_unlock(&q->qi_quotaofflock);
        return error;
 }
index c61e31c7d99791271848205b031ddc86af2142b8..c38068f26c558d2b1f6c81ac902c4c53498cafbc 100644 (file)
@@ -87,6 +87,8 @@ typedef struct xfs_dqblk {
        uuid_t            dd_uuid;      /* location information */
 } xfs_dqblk_t;
 
+#define XFS_DQUOT_CRC_OFF      offsetof(struct xfs_dqblk, dd_crc)
+
 /*
  * flags for q_flags field in the dquot.
  */
index ea341cea68cbfc5a7798810f0239ff34bfedfb09..3033ba5e9762f19609a7f86d5117f64eb70f7bd4 100644 (file)
@@ -1372,6 +1372,17 @@ xfs_finish_flags(
                }
        }
 
+       /*
+        * V5 filesystems always use attr2 format for attributes.
+        */
+       if (xfs_sb_version_hascrc(&mp->m_sb) &&
+           (mp->m_flags & XFS_MOUNT_NOATTR2)) {
+               xfs_warn(mp,
+"Cannot mount a V5 filesystem as %s. %s is always enabled for V5 filesystems.",
+                       MNTOPT_NOATTR2, MNTOPT_ATTR2);
+               return XFS_ERROR(EINVAL);
+       }
+
        /*
         * mkfs'ed attr2 will turn on attr2 mount unless explicitly
         * told by noattr2 to turn it off
index 5f234389327c806c286c7c1b8a2b0578a4aa8a9f..195a403e1522bbed00cb0f843b1d53c885db0e0d 100644 (file)
@@ -56,16 +56,9 @@ xfs_symlink_blocks(
        struct xfs_mount *mp,
        int             pathlen)
 {
-       int             fsblocks = 0;
-       int             len = pathlen;
+       int buflen = XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
 
-       do {
-               fsblocks++;
-               len -= XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
-       } while (len > 0);
-
-       ASSERT(fsblocks <= XFS_SYMLINK_MAPS);
-       return fsblocks;
+       return (pathlen + buflen - 1) / buflen;
 }
 
 static int
@@ -405,7 +398,7 @@ xfs_symlink(
        if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
                fs_blocks = 0;
        else
-               fs_blocks = XFS_B_TO_FSB(mp, pathlen);
+               fs_blocks = xfs_symlink_blocks(mp, pathlen);
        resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
        error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0,
                        XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
@@ -512,7 +505,7 @@ xfs_symlink(
                cur_chunk = target_path;
                offset = 0;
                for (n = 0; n < nmaps; n++) {
-                       char *buf;
+                       char    *buf;
 
                        d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
                        byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
@@ -525,9 +518,7 @@ xfs_symlink(
                        bp->b_ops = &xfs_symlink_buf_ops;
 
                        byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
-                       if (pathlen < byte_cnt) {
-                               byte_cnt = pathlen;
-                       }
+                       byte_cnt = min(byte_cnt, pathlen);
 
                        buf = bp->b_addr;
                        buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset,
@@ -542,6 +533,7 @@ xfs_symlink(
                        xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) -
                                                        (char *)bp->b_addr);
                }
+               ASSERT(pathlen == 0);
        }
 
        /*
index 1501f4fa51a6e4a0a683ca9998ded29a0410e450..0176bb21f09a5c3c0795f6f6932e07f92643c94a 100644 (file)
@@ -1453,7 +1453,7 @@ xfs_free_file_space(
        xfs_mount_t             *mp;
        int                     nimap;
        uint                    resblks;
-       uint                    rounding;
+       xfs_off_t               rounding;
        int                     rt;
        xfs_fileoff_t           startoffset_fsb;
        xfs_trans_t             *tp;
@@ -1482,7 +1482,7 @@ xfs_free_file_space(
                inode_dio_wait(VFS_I(ip));
        }
 
-       rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
+       rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
        ioffset = offset & ~(rounding - 1);
        error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
                                              ioffset, -1);
index 98db31d9f9b410908b2056b192bf9813bc8ce4aa..636c59f2003a315c264777eccd0e3abfbdd8045f 100644 (file)
@@ -377,7 +377,6 @@ acpi_status acpi_bus_get_status_handle(acpi_handle handle,
                                       unsigned long long *sta);
 int acpi_bus_get_status(struct acpi_device *device);
 
-#ifdef CONFIG_PM
 int acpi_bus_set_power(acpi_handle handle, int state);
 const char *acpi_power_state_string(int state);
 int acpi_device_get_power(struct acpi_device *device, int *state);
@@ -385,41 +384,12 @@ int acpi_device_set_power(struct acpi_device *device, int state);
 int acpi_bus_init_power(struct acpi_device *device);
 int acpi_bus_update_power(acpi_handle handle, int *state_p);
 bool acpi_bus_power_manageable(acpi_handle handle);
+
+#ifdef CONFIG_PM
 bool acpi_bus_can_wakeup(acpi_handle handle);
-#else /* !CONFIG_PM */
-static inline int acpi_bus_set_power(acpi_handle handle, int state)
-{
-       return 0;
-}
-static inline const char *acpi_power_state_string(int state)
-{
-       return "D0";
-}
-static inline int acpi_device_get_power(struct acpi_device *device, int *state)
-{
-       return 0;
-}
-static inline int acpi_device_set_power(struct acpi_device *device, int state)
-{
-       return 0;
-}
-static inline int acpi_bus_init_power(struct acpi_device *device)
-{
-       return 0;
-}
-static inline int acpi_bus_update_power(acpi_handle handle, int *state_p)
-{
-       return 0;
-}
-static inline bool acpi_bus_power_manageable(acpi_handle handle)
-{
-       return false;
-}
-static inline bool acpi_bus_can_wakeup(acpi_handle handle)
-{
-       return false;
-}
-#endif /* !CONFIG_PM */
+#else
+static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; }
+#endif
 
 #ifdef CONFIG_ACPI_PROC_EVENT
 int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
index 5b3d2bd4813ae30895160d1a383ea149615d292e..64b8c7639520d1e41e1439bc558d7a35424e25ef 100644 (file)
@@ -77,7 +77,7 @@ struct acpi_signal_fatal_info {
 /*
  * OSL Initialization and shutdown primitives
  */
-acpi_status __initdata acpi_os_initialize(void);
+acpi_status __init acpi_os_initialize(void);
 
 acpi_status acpi_os_terminate(void);
 
index b327b5a9296d36fb6c31fd26e382488fd0b45cbb..ea69367fdd3bbafaf1775da248c7cc998c47fba8 100644 (file)
@@ -329,10 +329,16 @@ int acpi_processor_power_init(struct acpi_processor *pr);
 int acpi_processor_power_exit(struct acpi_processor *pr);
 int acpi_processor_cst_has_changed(struct acpi_processor *pr);
 int acpi_processor_hotplug(struct acpi_processor *pr);
-int acpi_processor_suspend(struct device *dev);
-int acpi_processor_resume(struct device *dev);
 extern struct cpuidle_driver acpi_idle_driver;
 
+#ifdef CONFIG_PM_SLEEP
+void acpi_processor_syscore_init(void);
+void acpi_processor_syscore_exit(void);
+#else
+static inline void acpi_processor_syscore_init(void) {}
+static inline void acpi_processor_syscore_exit(void) {}
+#endif
+
 /* in processor_thermal.c */
 int acpi_processor_get_limit_info(struct acpi_processor *pr);
 extern const struct thermal_cooling_device_ops processor_cooling_ops;
index ac9da00e9f2c66b104029aa7b89253802708a322..d5afe96adba6c5ee239390625bbb93907c2cc1ba 100644 (file)
@@ -343,8 +343,12 @@ extern void ioport_unmap(void __iomem *p);
 #endif /* CONFIG_GENERIC_IOMAP */
 #endif /* CONFIG_HAS_IOPORT */
 
+#ifndef xlate_dev_kmem_ptr
 #define xlate_dev_kmem_ptr(p)  p
+#endif
+#ifndef xlate_dev_mem_ptr
 #define xlate_dev_mem_ptr(p)   __va(p)
+#endif
 
 #ifdef CONFIG_VIRT_TO_BUS
 #ifndef virt_to_bus
index b1b1fa6ffffecce461a44945524d60d9b46dd54c..13821c339a4151912b88f2e4de04cbf99653a612 100644 (file)
@@ -97,11 +97,9 @@ struct mmu_gather {
        unsigned long           start;
        unsigned long           end;
        unsigned int            need_flush : 1, /* Did free PTEs */
-                               fast_mode  : 1; /* No batching   */
-
        /* we are in the middle of an operation to clear
         * a full mm and can make some optimizations */
-       unsigned int            fullmm : 1,
+                               fullmm : 1,
        /* we have performed an operation which
         * requires a complete flush of the tlb */
                                need_flush_all : 1;
@@ -114,19 +112,6 @@ struct mmu_gather {
 
 #define HAVE_GENERIC_MMU_GATHER
 
-static inline int tlb_fast_mode(struct mmu_gather *tlb)
-{
-#ifdef CONFIG_SMP
-       return tlb->fast_mode;
-#else
-       /*
-        * For UP we don't need to worry about TLB flush
-        * and page free order so much..
-        */
-       return 1;
-#endif
-}
-
 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
 void tlb_flush_mmu(struct mmu_gather *tlb);
 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
index 61196592152e09a3913d1989cd847a8f9457664b..63d17ee9eb488c336ad55521f346dc1c2703c4c4 100644 (file)
@@ -316,6 +316,7 @@ struct drm_ioctl_desc {
        int flags;
        drm_ioctl_t *func;
        unsigned int cmd_drv;
+       const char *name;
 };
 
 /**
@@ -324,7 +325,7 @@ struct drm_ioctl_desc {
  */
 
 #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags)                        \
-       [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl}
+       [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
 
 struct drm_magic_entry {
        struct list_head head;
index 8230b46fdd73ff916324a9d4d0dd064a13207502..471f276ce8f741638ad6ced787aa7f943457e751 100644 (file)
@@ -50,13 +50,14 @@ struct drm_fb_helper_surface_size {
 
 /**
  * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
- * @gamma_set: - Set the given gamma lut register on the given crtc.
- * @gamma_get: - Read the given gamma lut register on the given crtc, used to
- *              save the current lut when force-restoring the fbdev for e.g.
- *              kdbg.
- * @fb_probe: - Driver callback to allocate and initialize the fbdev info
- *             structure. Futhermore it also needs to allocate the drm
- *             framebuffer used to back the fbdev.
+ * @gamma_set: Set the given gamma lut register on the given crtc.
+ * @gamma_get: Read the given gamma lut register on the given crtc, used to
+ *             save the current lut when force-restoring the fbdev for e.g.
+ *             kdbg.
+ * @fb_probe: Driver callback to allocate and initialize the fbdev info
+ *            structure. Futhermore it also needs to allocate the drm
+ *            framebuffer used to back the fbdev.
+ * @initial_config: Setup an initial fbdev display configuration
  *
  * Driver callbacks used by the fbdev emulation helper library.
  */
index 393369147a2dd7812ea5555f30ac8a4390ed1442..675ddf4b441f4746942dd62b2ae5febdaaad2868 100644 (file)
@@ -87,15 +87,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
 /** Other copying of data from kernel space */
 #define DRM_COPY_TO_USER(arg1, arg2, arg3)             \
        copy_to_user(arg1, arg2, arg3)
-/* Macros for copyfrom user, but checking readability only once */
-#define DRM_VERIFYAREA_READ( uaddr, size )             \
-       (access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
-#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
-       __copy_from_user(arg1, arg2, arg3)
-#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3)   \
-       __copy_to_user(arg1, arg2, arg3)
-#define DRM_GET_USER_UNCHECKED(val, uaddr)             \
-       __get_user(val, uaddr)
 
 #define DRM_HZ HZ
 
index c2af598f701dac425a6b4b7d65bfec06529801cb..bb1bc485390b253826e683de25dcf8f8c5d72f4e 100644 (file)
        {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
index d09deabc7bf64773eff4223d631830dc74ef45b3..fb0298082916fa7f7622de3fc2233ca4732b649e 100644 (file)
@@ -37,6 +37,8 @@ struct acpi_dma_spec {
  * @dev:               struct device of this controller
  * @acpi_dma_xlate:    callback function to find a suitable channel
  * @data:              private data used by a callback function
+ * @base_request_line: first supported request line (CSRT)
+ * @end_request_line:  last supported request line (CSRT)
  */
 struct acpi_dma {
        struct list_head        dma_controllers;
@@ -44,6 +46,8 @@ struct acpi_dma {
        struct dma_chan         *(*acpi_dma_xlate)
                                (struct acpi_dma_spec *, struct acpi_dma *);
        void                    *data;
+       unsigned short          base_request_line;
+       unsigned short          end_request_line;
 };
 
 /* Used with acpi_dma_simple_xlate() */
index ec10e1b24c1cce50d50581d58c5000cf7b9e5565..737f90ab4b6235abd340b1c47d9308591c138e8b 100644 (file)
@@ -49,10 +49,11 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 }
 #endif
 
-extern void cper_print_aer(const char *prefix, struct pci_dev *dev,
+extern void cper_print_aer(struct pci_dev *dev,
                           int cper_severity, struct aer_capability_regs *aer);
 extern int cper_severity_to_aer(int cper_severity);
 extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
-                             int severity);
+                             int severity,
+                             struct aer_capability_regs *aer_regs);
 #endif //_AER_H_
 
index 5a6d718adf34825eb11bc4dd20cbcb8f1dced0ad..b20b03852f21dd722e405c4685f2c9851588ed98 100644 (file)
@@ -84,8 +84,13 @@ extern int audit_classify_arch(int arch);
 #define        AUDIT_TYPE_CHILD_DELETE 3       /* a child being deleted */
 #define        AUDIT_TYPE_CHILD_CREATE 4       /* a child being created */
 
+/* maximized args number that audit_socketcall can process */
+#define AUDITSC_ARGS           6
+
 struct filename;
 
+extern void audit_log_session_info(struct audit_buffer *ab);
+
 #ifdef CONFIG_AUDITSYSCALL
 /* These are defined in auditsc.c */
                                /* Public API */
@@ -120,7 +125,7 @@ static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
                                       unsigned long a1, unsigned long a2,
                                       unsigned long a3)
 {
-       if (unlikely(!audit_dummy_context()))
+       if (unlikely(current->audit_context))
                __audit_syscall_entry(arch, major, a0, a1, a2, a3);
 }
 static inline void audit_syscall_exit(void *pt_regs)
@@ -185,12 +190,10 @@ static inline int audit_get_sessionid(struct task_struct *tsk)
        return tsk->sessionid;
 }
 
-extern void audit_log_task_context(struct audit_buffer *ab);
-extern void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk);
 extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
 extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
 extern int __audit_bprm(struct linux_binprm *bprm);
-extern void __audit_socketcall(int nargs, unsigned long *args);
+extern int __audit_socketcall(int nargs, unsigned long *args);
 extern int __audit_sockaddr(int len, void *addr);
 extern void __audit_fd_pair(int fd1, int fd2);
 extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
@@ -224,10 +227,11 @@ static inline int audit_bprm(struct linux_binprm *bprm)
                return __audit_bprm(bprm);
        return 0;
 }
-static inline void audit_socketcall(int nargs, unsigned long *args)
+static inline int audit_socketcall(int nargs, unsigned long *args)
 {
        if (unlikely(!audit_dummy_context()))
-               __audit_socketcall(nargs, args);
+               return __audit_socketcall(nargs, args);
+       return 0;
 }
 static inline int audit_sockaddr(int len, void *addr)
 {
@@ -340,11 +344,6 @@ static inline int audit_get_sessionid(struct task_struct *tsk)
 {
        return -1;
 }
-static inline void audit_log_task_context(struct audit_buffer *ab)
-{ }
-static inline void audit_log_task_info(struct audit_buffer *ab,
-                                      struct task_struct *tsk)
-{ }
 static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
 { }
 static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
@@ -354,8 +353,10 @@ static inline int audit_bprm(struct linux_binprm *bprm)
 {
        return 0;
 }
-static inline void audit_socketcall(int nargs, unsigned long *args)
-{ }
+static inline int audit_socketcall(int nargs, unsigned long *args)
+{
+       return 0;
+}
 static inline void audit_fd_pair(int fd1, int fd2)
 { }
 static inline int audit_sockaddr(int len, void *addr)
@@ -390,6 +391,11 @@ static inline void audit_ptrace(struct task_struct *t)
 #define audit_signals 0
 #endif /* CONFIG_AUDITSYSCALL */
 
+static inline bool audit_loginuid_set(struct task_struct *tsk)
+{
+       return uid_valid(audit_get_loginuid(tsk));
+}
+
 #ifdef CONFIG_AUDIT
 /* These are defined in audit.c */
                                /* Public API */
@@ -429,14 +435,17 @@ static inline void            audit_log_secctx(struct audit_buffer *ab, u32 secid)
 { }
 #endif
 
+extern int audit_log_task_context(struct audit_buffer *ab);
+extern void audit_log_task_info(struct audit_buffer *ab,
+                               struct task_struct *tsk);
+
 extern int                 audit_update_lsm_rules(void);
 
                                /* Private API (for audit.c only) */
-extern int audit_filter_user(void);
+extern int audit_filter_user(int type);
 extern int audit_filter_type(int type);
 extern int  audit_receive_filter(int type, int pid, int seq,
-                               void *data, size_t datasz, kuid_t loginuid,
-                               u32 sessionid, u32 sid);
+                               void *data, size_t datasz);
 extern int audit_enabled;
 #else /* CONFIG_AUDIT */
 static inline __printf(4, 5)
@@ -476,6 +485,13 @@ static inline void audit_log_link_denied(const char *string,
 { }
 static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
 { }
+static inline int audit_log_task_context(struct audit_buffer *ab)
+{
+       return 0;
+}
+static inline void audit_log_task_info(struct audit_buffer *ab,
+                                      struct task_struct *tsk)
+{ }
 #define audit_enabled 0
 #endif /* CONFIG_AUDIT */
 static inline void audit_log_string(struct audit_buffer *ab, const char *buf)
index b840a496028259076caff141de3a6a92ed177a3a..677b4f01b2d0bfd2dd1619d930ec92992bcd1d7d 100644 (file)
@@ -1,3 +1,6 @@
+#ifndef _LINUX_BRCMPHY_H
+#define _LINUX_BRCMPHY_H
+
 #define PHY_ID_BCM50610                        0x0143bd60
 #define PHY_ID_BCM50610M               0x0143bd70
 #define PHY_ID_BCM5241                 0x0143bc30
@@ -29,3 +32,5 @@
 #define PHY_BRCM_CLEAR_RGMII_MODE      0x00004000
 #define PHY_BRCM_DIS_TXCRXC_NOENRGY    0x00008000
 #define PHY_BCM_FLAGS_VALID            0x80000000
+
+#endif /* _LINUX_BRCMPHY_H */
index 5047355b9a0fcf4e2b07076b414c255d2c9b7510..8bda1294c035b24912a3da178ebc2c8e1cd5f827 100644 (file)
@@ -707,7 +707,7 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
  *
  * If a subsystem synchronizes against the parent in its ->css_online() and
  * before starting iterating, and synchronizes against @pos on each
- * iteration, any descendant cgroup which finished ->css_offline() is
+ * iteration, any descendant cgroup which finished ->css_online() is
  * guaranteed to be visible in the future iterations.
  *
  * In other words, the following guarantees that a descendant can't escape
index d53c35352ea9d14aa4a9c974fa559ab95f88e187..7f0c1dd0907904a831ab0f65fdcbb0d67a068b50 100644 (file)
@@ -673,6 +673,8 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
 asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
                                                 struct compat_timespec __user *interval);
 
+asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
+                                           int, const char __user *);
 #else
 
 #define is_compat_task() (0)
index 3c86faa597980fbad7e17d65d7668d1da5d40e60..8f0406230a0a4890b4247e60215795c11e0a30d3 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/completion.h>
 #include <linux/hrtimer.h>
 
-#define CPUIDLE_STATE_MAX      8
+#define CPUIDLE_STATE_MAX      10
 #define CPUIDLE_NAME_LEN       16
 #define CPUIDLE_DESC_LEN       32
 
index 1e483fa7afb41b681ba802061d4ae5ab4ad4676a..3cd32478f2fd095817c7fb0b9f6558d336020bde 100644 (file)
@@ -79,11 +79,26 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
                            struct bio_vec *biovec, int max_size);
 
+/*
+ * These iteration functions are typically used to check (and combine)
+ * properties of underlying devices.
+ * E.g. Does at least one underlying device support flush?
+ *      Does any underlying device not support WRITE_SAME?
+ *
+ * The callout function is called once for each contiguous section of
+ * an underlying device.  State can be maintained in *data.
+ * Return non-zero to stop iterating through any further devices.
+ */
 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
                                           struct dm_dev *dev,
                                           sector_t start, sector_t len,
                                           void *data);
 
+/*
+ * This function must iterate through each section of device used by the
+ * target until it encounters a non-zero return code, which it then returns.
+ * Returns zero if no callout returned non-zero.
+ */
 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
                                      iterate_devices_callout_fn fn,
                                      void *data);
index c050dcc322a43e2264bdaf49b3667a0b2e63c6ab..f65f5a69db8f8b93a4c85862748c0904350af576 100644 (file)
@@ -46,6 +46,7 @@ extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 extern int sk_detach_filter(struct sock *sk);
 extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
 extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
+extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
 
 #ifdef CONFIG_BPF_JIT
 #include <stdarg.h>
index f83e17a40e8b848185b649836274b57acdc332c1..99d0fbcbaf79eff04c8ab56531e02aa46e953977 100644 (file)
@@ -90,6 +90,8 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  *            not set this, then the ftrace infrastructure will add recursion
  *            protection for the caller.
  * STUB   - The ftrace_ops is just a place holder.
+ * INITIALIZED - The ftrace_ops has already been initialized (first use time
+ *            register_ftrace_function() is called, it will initialized the ops)
  */
 enum {
        FTRACE_OPS_FL_ENABLED                   = 1 << 0,
@@ -100,6 +102,7 @@ enum {
        FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = 1 << 5,
        FTRACE_OPS_FL_RECURSION_SAFE            = 1 << 6,
        FTRACE_OPS_FL_STUB                      = 1 << 7,
+       FTRACE_OPS_FL_INITIALIZED               = 1 << 8,
 };
 
 struct ftrace_ops {
@@ -110,6 +113,7 @@ struct ftrace_ops {
 #ifdef CONFIG_DYNAMIC_FTRACE
        struct ftrace_hash              *notrace_hash;
        struct ftrace_hash              *filter_hash;
+       struct mutex                    regex_lock;
 #endif
 };
 
index 34e00fb49becf506865c18ffafefe23c413ea5c4..4372658c73ae5eddd3dd6570d4dfac8c5d8825c8 100644 (file)
@@ -293,6 +293,7 @@ struct ftrace_event_file {
         * caching and such. Which is mostly OK ;-)
         */
        unsigned long           flags;
+       atomic_t                sm_ref; /* soft-mode reference counter */
 };
 
 #define __TRACE_EVENT_FLAGS(name, value)                               \
index af1b86d46f6e714e4feb5aeab303fb76f47a1234..0c48991b0402d0d93110b8a4fa9cd42d615279c2 100644 (file)
@@ -515,7 +515,7 @@ struct hid_device {                                                 /* device report descriptor */
        struct dentry *debug_rdesc;
        struct dentry *debug_events;
        struct list_head debug_list;
-       struct mutex debug_list_lock;
+       spinlock_t  debug_list_lock;
        wait_queue_head_t debug_wait;
 };
 
index 4474557904f69eef849b329b41b91ba4f72bb678..16fae6436d0ed150fe5947683c647e7ae8b1d9c4 100644 (file)
@@ -249,12 +249,12 @@ team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
                return port;
        cur = port;
        list_for_each_entry_continue_rcu(cur, &team->port_list, list)
-               if (team_port_txable(port))
+               if (team_port_txable(cur))
                        return cur;
        list_for_each_entry_rcu(cur, &team->port_list, list) {
                if (cur == port)
                        break;
-               if (team_port_txable(port))
+               if (team_port_txable(cur))
                        return cur;
        }
        return NULL;
index 13a3da25ff0752a99bccb12b9c105cdbdae13457..98cd41bb39c8667953c10030ef32f2945765f7e8 100644 (file)
@@ -30,15 +30,19 @@ struct journal_head {
 
        /*
         * Journalling list for this buffer [jbd_lock_bh_state()]
+        * NOTE: We *cannot* combine this with b_modified into a bitfield
+        * as gcc would then (which the C standard allows but which is
+        * very unuseful) make 64-bit accesses to the bitfield and clobber
+        * b_jcount if its update races with bitfield modification.
         */
-       unsigned b_jlist:4;
+       unsigned b_jlist;
 
        /*
         * This flag signals the buffer has been modified by
         * the currently running transaction
         * [jbd_lock_bh_state()]
         */
-       unsigned b_modified:1;
+       unsigned b_modified;
 
        /*
         * Copy of the buffer data frozen for writing to the log.
index e96329ceb28c8440e470e3e073226ed24995a462..e9ef6d6b51d5b07f6471e96dc20efa5685416220 100644 (file)
@@ -562,6 +562,9 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...);
 extern __printf(2, 3)
 int __trace_printk(unsigned long ip, const char *fmt, ...);
 
+extern int __trace_bputs(unsigned long ip, const char *str);
+extern int __trace_puts(unsigned long ip, const char *str, int size);
+
 /**
  * trace_puts - write a string into the ftrace buffer
  * @str: the string to record
@@ -587,8 +590,6 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
  *  (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
  */
 
-extern int __trace_bputs(unsigned long ip, const char *str);
-extern int __trace_puts(unsigned long ip, const char *str, int size);
 #define trace_puts(str) ({                                             \
        static const char *trace_printk_fmt                             \
                __attribute__((section("__trace_printk_fmt"))) =        \
index e15828fd71f1b589780b933549e3b07c9652c1e9..484604d184be7380807868d9a12edc80e9064db3 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/atomic.h>
 #include <linux/kernel.h>
 #include <linux/mutex.h>
+#include <linux/spinlock.h>
 
 struct kref {
        atomic_t refcount;
@@ -98,6 +99,38 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
        return kref_sub(kref, 1, release);
 }
 
+/**
+ * kref_put_spinlock_irqsave - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ *          last reference to the object is released.
+ *          This pointer is required, and it is not acceptable to pass kfree
+ *          in as this function.
+ * @lock: lock to take in release case
+ *
+ * Behaves identical to kref_put with one exception.  If the reference count
+ * drops to zero, the lock will be taken atomically wrt dropping the reference
+ * count.  The release function has to call spin_unlock() without _irqrestore.
+ */
+static inline int kref_put_spinlock_irqsave(struct kref *kref,
+               void (*release)(struct kref *kref),
+               spinlock_t *lock)
+{
+       unsigned long flags;
+
+       WARN_ON(release == NULL);
+       if (atomic_add_unless(&kref->refcount, -1, 1))
+               return 0;
+       spin_lock_irqsave(lock, flags);
+       if (atomic_dec_and_test(&kref->refcount)) {
+               release(kref);
+               local_irq_restore(flags);
+               return 1;
+       }
+       spin_unlock_irqrestore(lock, flags);
+       return 0;
+}
+
 static inline int kref_put_mutex(struct kref *kref,
                                 void (*release)(struct kref *kref),
                                 struct mutex *lock)
index 6a1f8df9144bcfd5c1af3e9cbad600fe61749c29..b83e5657365adcd30bd816d7d0e57ce3d3691980 100644 (file)
@@ -361,6 +361,17 @@ static inline void list_splice_tail_init(struct list_head *list,
 #define list_first_entry(ptr, type, member) \
        list_entry((ptr)->next, type, member)
 
+/**
+ * list_first_entry_or_null - get the first element from a list
+ * @ptr:       the list head to take the element from.
+ * @type:      the type of the struct this is embedded in.
+ * @member:    the name of the list_struct within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_first_entry_or_null(ptr, type, member) \
+       (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+
 /**
  * list_for_each       -       iterate over a list
  * @pos:       the &struct list_head to use as a loop cursor.
index fb1bf7d6a41018edf8358c76ffce8b74e2282cba..0390d5943ed6db1885ecf67612d7cd1da60e908b 100644 (file)
@@ -373,13 +373,11 @@ struct ab8500_sysctrl_platform_data;
 /**
  * struct ab8500_platform_data - AB8500 platform data
  * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
- * @pm_power_off: Should machine pm power off hook be registered or not
  * @init: board-specific initialization after detection of ab8500
  * @regulator: machine-specific constraints for regulators
  */
 struct ab8500_platform_data {
        int irq_base;
-       bool pm_power_off;
        void (*init) (struct ab8500 *);
        struct ab8500_regulator_platform_data *regulator;
        struct abx500_gpio_platform_data *gpio;
index 67f46ad6920a0bffeefb2efe0c437a7a2f86a883..352eec9df1b84097420a88953ce7276cbb637b69 100644 (file)
@@ -126,7 +126,7 @@ struct mlx4_rss_context {
 
 struct mlx4_qp_path {
        u8                      fl;
-       u8                      reserved1[1];
+       u8                      vlan_control;
        u8                      disable_pkey_check;
        u8                      pkey_index;
        u8                      counter_index;
@@ -141,11 +141,32 @@ struct mlx4_qp_path {
        u8                      sched_queue;
        u8                      vlan_index;
        u8                      feup;
-       u8                      reserved3;
+       u8                      fvl_rx;
        u8                      reserved4[2];
        u8                      dmac[6];
 };
 
+enum { /* fl */
+       MLX4_FL_CV      = 1 << 6,
+       MLX4_FL_ETH_HIDE_CQE_VLAN       = 1 << 2
+};
+enum { /* vlan_control */
+       MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED      = 1 << 6,
+       MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED      = 1 << 2,
+       MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */
+       MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED    = 1 << 0
+};
+
+enum { /* feup */
+       MLX4_FEUP_FORCE_ETH_UP          = 1 << 6, /* force Eth UP */
+       MLX4_FSM_FORCE_ETH_SRC_MAC      = 1 << 5, /* force Source MAC */
+       MLX4_FVL_FORCE_ETH_VLAN         = 1 << 3  /* force Eth vlan */
+};
+
+enum { /* fvl_rx */
+       MLX4_FVL_RX_FORCE_ETH_VLAN      = 1 << 0 /* enforce Eth rx vlan */
+};
+
 struct mlx4_qp_context {
        __be32                  flags;
        __be32                  pd;
@@ -185,6 +206,10 @@ struct mlx4_qp_context {
        u32                     reserved5[10];
 };
 
+enum { /* param3 */
+       MLX4_STRIP_VLAN = 1 << 30
+};
+
 /* Which firmware version adds support for NEC (NoErrorCompletion) bit */
 #define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
 
index a94a5a0ab122e8fc577df4db38eb65c1ec543f9c..60584b185a0c5d4ad61ed0435982ad4287dea8fc 100644 (file)
@@ -2733,6 +2733,17 @@ static inline netdev_features_t netdev_get_wanted_features(
 }
 netdev_features_t netdev_increment_features(netdev_features_t all,
        netdev_features_t one, netdev_features_t mask);
+
+/* Allow TSO being used on stacked device :
+ * Performing the GSO segmentation before last device
+ * is a performance improvement.
+ */
+static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
+                                                       netdev_features_t mask)
+{
+       return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
+}
+
 int __netdev_update_features(struct net_device *dev);
 void netdev_update_features(struct net_device *dev);
 void netdev_change_features(struct net_device *dev);
index 98ffb54988b6d79d16c7364212337e6312271393..2d4df6ce043efab2f9017bc5359b90846f085fb7 100644 (file)
@@ -17,6 +17,22 @@ extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
 
 extern int ipv6_netfilter_init(void);
 extern void ipv6_netfilter_fini(void);
+
+/*
+ * Hook functions for ipv6 to allow xt_* modules to be built-in even
+ * if IPv6 is a module.
+ */
+struct nf_ipv6_ops {
+       int (*chk_addr)(struct net *net, const struct in6_addr *addr,
+                       const struct net_device *dev, int strict);
+};
+
+extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
+static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
+{
+       return rcu_dereference(nf_ipv6_ops);
+}
+
 #else /* CONFIG_NETFILTER */
 static inline int ipv6_netfilter_init(void) { return 0; }
 static inline void ipv6_netfilter_fini(void) { return; }
index 3863a4dbdf1888c51ff9d4ae94057a31e5118034..2a93b64a3869f2af9ea623b8ddc0a435668b3c8c 100644 (file)
  *
  */
 
-#ifdef CONFIG_OF_DEVICE
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
+
+#ifdef CONFIG_OF_DEVICE
 #include <linux/pm.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
@@ -100,7 +101,7 @@ extern int of_platform_populate(struct device_node *root,
 
 #if !defined(CONFIG_OF_ADDRESS)
 struct of_dev_auxdata;
-struct device;
+struct device_node;
 static inline int of_platform_populate(struct device_node *root,
                                        const struct of_device_id *matches,
                                        const struct of_dev_auxdata *lookup,
index 81b31613eb252b3a46052d2375e333affc91c77f..1704479772787b28950c9768a9e73c35c88dcd9a 100644 (file)
@@ -60,11 +60,13 @@ static inline void acpi_pci_slot_remove(struct pci_bus *bus) { }
 void acpiphp_init(void);
 void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle);
 void acpiphp_remove_slots(struct pci_bus *bus);
+void acpiphp_check_host_bridge(acpi_handle handle);
 #else
 static inline void acpiphp_init(void) { }
 static inline void acpiphp_enumerate_slots(struct pci_bus *bus,
                                           acpi_handle handle) { }
 static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
+static inline void acpiphp_check_host_bridge(acpi_handle handle) { }
 #endif
 
 #else  /* CONFIG_ACPI */
index 2b85c521f7375452ed4c0fd54d7cba37b60f4dde..c12916248469f577795d28e96c4f86be747fbf5b 100644 (file)
 #define PCI_DEVICE_ID_TIGON3_5705M_2   0x165e
 #define PCI_DEVICE_ID_NX2_57712                0x1662
 #define PCI_DEVICE_ID_NX2_57712E       0x1663
+#define PCI_DEVICE_ID_NX2_57712_MF     0x1663
 #define PCI_DEVICE_ID_TIGON3_5714      0x1668
 #define PCI_DEVICE_ID_TIGON3_5714S     0x1669
 #define PCI_DEVICE_ID_TIGON3_5780      0x166a
 #define PCI_DEVICE_ID_TIGON3_5780S     0x166b
 #define PCI_DEVICE_ID_TIGON3_5705F     0x166e
+#define PCI_DEVICE_ID_NX2_57712_VF     0x166f
 #define PCI_DEVICE_ID_TIGON3_5754M     0x1672
 #define PCI_DEVICE_ID_TIGON3_5755M     0x1673
 #define PCI_DEVICE_ID_TIGON3_5756      0x1674
 #define PCI_DEVICE_ID_TIGON3_5787      0x169b
 #define PCI_DEVICE_ID_TIGON3_5788      0x169c
 #define PCI_DEVICE_ID_TIGON3_5789      0x169d
+#define PCI_DEVICE_ID_NX2_57840_4_10   0x16a1
+#define PCI_DEVICE_ID_NX2_57840_2_20   0x16a2
+#define PCI_DEVICE_ID_NX2_57840_MF     0x16a4
 #define PCI_DEVICE_ID_NX2_57800_MF     0x16a5
 #define PCI_DEVICE_ID_TIGON3_5702X     0x16a6
 #define PCI_DEVICE_ID_TIGON3_5703X     0x16a7
 #define PCI_DEVICE_ID_TIGON3_5704S     0x16a8
 #define PCI_DEVICE_ID_NX2_57800_VF     0x16a9
 #define PCI_DEVICE_ID_NX2_5706S                0x16aa
-#define PCI_DEVICE_ID_NX2_57840_MF     0x16a4
 #define PCI_DEVICE_ID_NX2_5708S                0x16ac
 #define PCI_DEVICE_ID_NX2_57840_VF     0x16ad
 #define PCI_DEVICE_ID_NX2_57810_MF     0x16ae
index 72474e18f1e0d8fef7441d3f11269eb90059b684..6aa238096622441de2125f3a7658fe2777f349a1 100644 (file)
  *     if it is 0, pull-down is disabled.
  * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and
  *     low, this is the most typical case and is typically achieved with two
- *     active transistors on the output. Sending this config will enabale
+ *     active transistors on the output. Setting this config will enable
  *     push-pull mode, the argument is ignored.
  * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
  *     collector) which means it is usually wired with other output ports
- *     which are then pulled up with an external resistor. Sending this
- *     config will enabale open drain mode, the argument is ignored.
+ *     which are then pulled up with an external resistor. Setting this
+ *     config will enable open drain mode, the argument is ignored.
  * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
- *     (open emitter). Sending this config will enabale open drain mode, the
+ *     (open emitter). Setting this config will enable open drain mode, the
  *     argument is ignored.
- * @PIN_CONFIG_DRIVE_STRENGTH: the pin will output the current passed as
- *     argument. The argument is in mA.
+ * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current
+ *     passed as argument. The argument is in mA.
  * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
  *      If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
  *      schmitt-trigger mode is disabled.
index 528e73ce46d2e2903b30172f61627ea113b8be02..23901992b9ddc2b4f67dc50ef0fe6e64f8173d2f 100644 (file)
 #ifndef __CLK_LPSS_H
 #define __CLK_LPSS_H
 
+struct lpss_clk_data {
+       const char *name;
+       struct clk *clk;
+};
+
 extern int lpt_clk_init(void);
 
 #endif /* __CLK_LPSS_H */
index ff9b0aab5281c2251e93f5a6f56ea09cf2e31289..c860c1b314c0473a7737cd6c7dacd65344fbc656 100644 (file)
@@ -43,8 +43,6 @@ struct omap_uart_port_info {
        int                     DTR_present;
 
        int (*get_context_loss_count)(struct device *);
-       void (*set_forceidle)(struct device *);
-       void (*set_noidle)(struct device *);
        void (*enable_wakeup)(struct device *, bool);
 };
 
index 6af944ab38f05638e7443122abbc1ec5592dfcbe..22c7052e937248e4c3337778d8608b3508a24ce5 100644 (file)
@@ -4,6 +4,7 @@
 #include <stdarg.h>
 #include <linux/init.h>
 #include <linux/kern_levels.h>
+#include <linux/linkage.h>
 
 extern const char linux_banner[];
 extern const char linux_proc_banner[];
index 2ae13714828bc42568e77684fedfc2cf929c291d..1c33dd7da4a7d860004264e00bd406645df1ed96 100644 (file)
@@ -105,9 +105,14 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
  * @head:      the head for your list.
  * @member:    the name of the hlist_nulls_node within the struct.
  *
+ * The barrier() is needed to make sure compiler doesn't cache first element [1],
+ * as this loop can be restarted [2]
+ * [1] Documentation/atomic_ops.txt around line 114
+ * [2] Documentation/RCU/rculist_nulls.txt around line 146
  */
 #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member)                        \
-       for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head));            \
+       for (({barrier();}),                                                    \
+            pos = rcu_dereference_raw(hlist_nulls_first_rcu(head));            \
                (!is_a_nulls(pos)) &&                                           \
                ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
                pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
index a3e7842786679567185ecdbc8835fd5936fb36f4..18e099342e6f82732a93b347b021ca7aa4e2b22d 100644 (file)
@@ -83,7 +83,6 @@
 
 extern struct bus_type rio_bus_type;
 extern struct device rio_bus;
-extern struct list_head rio_devices;   /* list of all devices */
 
 struct rio_mport;
 struct rio_dev;
@@ -237,6 +236,7 @@ enum rio_phy_type {
  * @name: Port name string
  * @priv: Master port private data
  * @dma: DMA device associated with mport
+ * @nscan: RapidIO network enumeration/discovery operations
  */
 struct rio_mport {
        struct list_head dbells;        /* list of doorbell events */
@@ -262,8 +262,14 @@ struct rio_mport {
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
        struct dma_device       dma;
 #endif
+       struct rio_scan *nscan;
 };
 
+/*
+ * Enumeration/discovery control flags
+ */
+#define RIO_SCAN_ENUM_NO_WAIT  0x00000001 /* Do not wait for enum completed */
+
 struct rio_id_table {
        u16 start;      /* logical minimal id */
        u32 max;        /* max number of IDs in table */
@@ -460,6 +466,16 @@ static inline struct rio_mport *dma_to_mport(struct dma_device *ddev)
 }
 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
 
+/**
+ * struct rio_scan - RIO enumeration and discovery operations
+ * @enumerate: Callback to perform RapidIO fabric enumeration.
+ * @discover: Callback to perform RapidIO fabric discovery.
+ */
+struct rio_scan {
+       int (*enumerate)(struct rio_mport *mport, u32 flags);
+       int (*discover)(struct rio_mport *mport, u32 flags);
+};
+
 /* Architecture and hardware-specific functions */
 extern int rio_register_mport(struct rio_mport *);
 extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
index b75c05920ab58f976cb73223ce7a7da97a4b146d..5059994fe2970edf8942e537fd3fe81144c2ccb3 100644 (file)
@@ -433,5 +433,6 @@ extern u16 rio_local_get_device_id(struct rio_mport *port);
 extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
 extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
                                   struct rio_dev *from);
+extern int rio_init_mports(void);
 
 #endif                         /* LINUX_RIO_DRV_H */
index caa8f4d0186b742c3effc99c296ac99724f2f4d1..178a8d909f14a3dcdcbc0ce255572975c8b3b221 100644 (file)
@@ -593,6 +593,7 @@ struct signal_struct {
 #endif
 #ifdef CONFIG_AUDIT
        unsigned audit_tty;
+       unsigned audit_tty_log_passwd;
        struct tty_audit_buf *tty_audit_buf;
 #endif
 #ifdef CONFIG_CGROUPS
index 2e0ced1af3b10458f4507560bd9c703c2661425f..9c676eae396867fbcc656bc9466fe19459f31b2f 100644 (file)
@@ -2852,6 +2852,21 @@ static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
                SKB_GSO_CB(inner_skb)->mac_offset;
 }
 
+static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
+{
+       int new_headroom, headroom;
+       int ret;
+
+       headroom = skb_headroom(skb);
+       ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
+       if (ret)
+               return ret;
+
+       new_headroom = skb_headroom(skb);
+       SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
+       return 0;
+}
+
 static inline bool skb_is_gso(const struct sk_buff *skb)
 {
        return skb_shinfo(skb)->gso_size;
index 428c37a1f95ce2eea8d9785ef69fc6050b5b7c96..b10ce4b341ea79f8a6e14e63dd458308287b9ac4 100644 (file)
@@ -305,7 +305,6 @@ struct ucred {
 
 extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
 
-extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
 extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
                               int offset, int len);
 extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 
@@ -314,7 +313,6 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
                                          unsigned int len, __wsum *csump);
 
 extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
-extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
 extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
                             int offset, int len);
 extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
@@ -322,6 +320,9 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
 
 struct timespec;
 
+/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
+extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
+extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
 extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
                          unsigned int flags, struct timespec *timeout);
 extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
index 733eb5ee31c5446cbe6a11bb0b3603a6534edf49..6ff26c8db7b923853527cee3ee1ffff1ca55bdd6 100644 (file)
@@ -57,7 +57,7 @@ extern struct bus_type spi_bus_type;
  * @modalias: Name of the driver to use with this device, or an alias
  *     for that name.  This appears in the sysfs "modalias" attribute
  *     for driver coldplugging, and in uevents used for hotplugging
- * @cs_gpio: gpio number of the chipselect line (optional, -EINVAL when
+ * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
  *     when not using a GPIO line)
  *
  * A @spi_device is used to interchange data between an SPI slave
@@ -266,7 +266,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  *     queue so the subsystem notifies the driver that it may relax the
  *     hardware by issuing this call
  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
- *     number. Any individual value may be -EINVAL for CS lines that
+ *     number. Any individual value may be -ENOENT for CS lines that
  *     are not GPIOs (driven by the SPI controller itself).
  *
  * Each SPI master controller can communicate with one or more @spi_device
index 22d81b3c955b533bf73ec0b9353c6c7d4287c010..d5d229b2e5af1815d38d7d87d217ee8340b9ec1d 100644 (file)
@@ -117,14 +117,10 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
 
 extern bool persistent_clock_exist;
 
-#ifdef ALWAYS_USE_PERSISTENT_CLOCK
-#define has_persistent_clock() true
-#else
 static inline bool has_persistent_clock(void)
 {
        return persistent_clock_exist;
 }
-#endif
 
 extern void read_persistent_clock(struct timespec *ts);
 extern void read_boot_clock(struct timespec *ts);
index 7e92bd86a808cfbdf31bf38f1165ab30fe93c2ff..8780bd2a272ab6672c8f6c32340b7d1ab2831acb 100644 (file)
@@ -575,8 +575,7 @@ extern void tty_audit_exit(void);
 extern void tty_audit_fork(struct signal_struct *sig);
 extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
 extern void tty_audit_push(struct tty_struct *tty);
-extern int tty_audit_push_task(struct task_struct *tsk,
-                              kuid_t loginuid, u32 sessionid);
+extern int tty_audit_push_current(void);
 #else
 static inline void tty_audit_add_data(struct tty_struct *tty,
                unsigned char *data, size_t size, unsigned icanon)
@@ -594,8 +593,7 @@ static inline void tty_audit_fork(struct signal_struct *sig)
 static inline void tty_audit_push(struct tty_struct *tty)
 {
 }
-static inline int tty_audit_push_task(struct task_struct *tsk,
-                                     kuid_t loginuid, u32 sessionid)
+static inline int tty_audit_push_current(void)
 {
        return 0;
 }
index 629aaf51f30b8c1fdc6e27cd086c9caf19378896..c55ce243cc0985c450786e4cb63f8ed3c8e5c53b 100644 (file)
@@ -35,4 +35,7 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
 }
 
 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
+
+int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
+int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
 #endif
index c454a88abf2e6fed7adda903a7d2e0626613e29f..f1b0dca60f127116113361da7ff61d726d0fa26a 100644 (file)
@@ -563,9 +563,8 @@ static inline int gadget_is_dualspeed(struct usb_gadget *g)
 }
 
 /**
- * gadget_is_superspeed() - return true if the hardware handles
- * supperspeed
- * @g: controller that might support supper speed
+ * gadget_is_superspeed() - return true if the hardware handles superspeed
+ * @g: controller that might support superspeed
  */
 static inline int gadget_is_superspeed(struct usb_gadget *g)
 {
index b9b0f7b4e43b81c4829802b5f0eea0cd0713974d..302ddf55d2daca5d74291b1eb9aaa2b4127fe7d2 100644 (file)
@@ -268,6 +268,8 @@ struct usb_serial_driver {
                        struct usb_serial_port *port, struct ktermios *old);
        void (*break_ctl)(struct tty_struct *tty, int break_state);
        int  (*chars_in_buffer)(struct tty_struct *tty);
+       void (*wait_until_sent)(struct tty_struct *tty, long timeout);
+       bool (*tx_empty)(struct usb_serial_port *port);
        void (*throttle)(struct tty_struct *tty);
        void (*unthrottle)(struct tty_struct *tty);
        int  (*tiocmget)(struct tty_struct *tty);
@@ -327,6 +329,8 @@ extern void usb_serial_generic_close(struct usb_serial_port *port);
 extern int usb_serial_generic_resume(struct usb_serial *serial);
 extern int usb_serial_generic_write_room(struct tty_struct *tty);
 extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
+extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty,
+                                                               long timeout);
 extern void usb_serial_generic_read_bulk_callback(struct urb *urb);
 extern void usb_serial_generic_write_bulk_callback(struct urb *urb);
 extern void usb_serial_generic_throttle(struct tty_struct *tty);
index e8d65718560b3460b09123cb99d3a3f08685c217..0d33fca487748916faca92cad918c3726f713a57 100644 (file)
@@ -36,7 +36,7 @@ extern int fg_console, last_console, want_console;
 int vc_allocate(unsigned int console);
 int vc_cons_allocated(unsigned int console);
 int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines);
-void vc_deallocate(unsigned int console);
+struct vc_data *vc_deallocate(unsigned int console);
 void reset_palette(struct vc_data *vc);
 void do_blank_screen(int entering_gfx);
 void do_unblank_screen(int leaving_gfx);
index ac38be2692d89f2993381e57d6145ab75891b1d3..1133695eb0671d7aeb7f9140bf2b6adc3bc7483d 100644 (file)
@@ -217,6 +217,8 @@ do {                                                                        \
                if (!ret)                                               \
                        break;                                          \
        }                                                               \
+       if (!ret && (condition))                                        \
+               ret = 1;                                                \
        finish_wait(&wq, &__wait);                                      \
 } while (0)
 
@@ -233,8 +235,9 @@ do {                                                                        \
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
  *
- * The function returns 0 if the @timeout elapsed, and the remaining
- * jiffies if the condition evaluated to true before the timeout elapsed.
+ * The function returns 0 if the @timeout elapsed, or the remaining
+ * jiffies (at least 1) if the @condition evaluated to %true before
+ * the @timeout elapsed.
  */
 #define wait_event_timeout(wq, condition, timeout)                     \
 ({                                                                     \
@@ -302,6 +305,8 @@ do {                                                                        \
                ret = -ERESTARTSYS;                                     \
                break;                                                  \
        }                                                               \
+       if (!ret && (condition))                                        \
+               ret = 1;                                                \
        finish_wait(&wq, &__wait);                                      \
 } while (0)
 
@@ -318,9 +323,10 @@ do {                                                                       \
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
  *
- * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
- * was interrupted by a signal, and the remaining jiffies otherwise
- * if the condition evaluated to true before the timeout elapsed.
+ * Returns:
+ * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
+ * a signal, or the remaining jiffies (at least 1) if the @condition
+ * evaluated to %true before the @timeout elapsed.
  */
 #define wait_event_interruptible_timeout(wq, condition, timeout)       \
 ({                                                                     \
index 84a6440f1f19ee698dba94a7f899d77fc527e432..21f702704f2444272e1554c87112594d40acd421 100644 (file)
@@ -65,7 +65,7 @@ extern int                    addrconf_set_dstaddr(struct net *net,
 
 extern int                     ipv6_chk_addr(struct net *net,
                                              const struct in6_addr *addr,
-                                             struct net_device *dev,
+                                             const struct net_device *dev,
                                              int strict);
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
index 31f1fb9eb78478a6db6339065b69b280adb26916..99eac12d040ba375e0b1d8712d9a4b3005da5ee5 100644 (file)
@@ -30,7 +30,8 @@ struct nf_loginfo {
        } u;
 };
 
-typedef void nf_logfn(u_int8_t pf,
+typedef void nf_logfn(struct net *net,
+                     u_int8_t pf,
                      unsigned int hooknum,
                      const struct sk_buff *skb,
                      const struct net_device *in,
index e2dec42c2db2b5c07afc98165b140b76aa599f56..5ca3f14f0998e9962385821409d32f0ac66dbb43 100644 (file)
@@ -2,7 +2,8 @@
 #define _KER_NFNETLINK_LOG_H
 
 void
-nfulnl_log_packet(u_int8_t pf,
+nfulnl_log_packet(struct net *net,
+                 u_int8_t pf,
                  unsigned int hooknum,
                  const struct sk_buff *skb,
                  const struct net_device *in,
index f10818fc8804ba85ca8c2e372b5145b18ae689a9..e7f4e21cc3e17ec53ac8c5021daa74401766decb 100644 (file)
@@ -679,22 +679,26 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
 #endif
 
 struct psched_ratecfg {
-       u64 rate_bps;
-       u32 mult;
-       u32 shift;
+       u64     rate_bps;
+       u32     mult;
+       u16     overhead;
+       u8      shift;
 };
 
 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
                                unsigned int len)
 {
-       return ((u64)len * r->mult) >> r->shift;
+       return ((u64)(len + r->overhead) * r->mult) >> r->shift;
 }
 
-extern void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate);
+extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
 
-static inline u32 psched_ratecfg_getrate(const struct psched_ratecfg *r)
+static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
+                                         const struct psched_ratecfg *r)
 {
-       return r->rate_bps >> 3;
+       memset(res, 0, sizeof(*res));
+       res->rate = r->rate_bps >> 3;
+       res->overhead = r->overhead;
 }
 
 #endif
index 5c97b0fc5623aa4bb44db9082d6631ad0a27fafe..66772cf8c3c528c86104684ca3cf1ac8c5ccfd85 100644 (file)
@@ -866,6 +866,18 @@ struct inet_hashinfo;
 struct raw_hashinfo;
 struct module;
 
+/*
+ * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+ * un-modified. Special care is taken when initializing object to zero.
+ */
+static inline void sk_prot_clear_nulls(struct sock *sk, int size)
+{
+       if (offsetof(struct sock, sk_node.next) != 0)
+               memset(sk, 0, offsetof(struct sock, sk_node.next));
+       memset(&sk->sk_node.pprev, 0,
+              size - offsetof(struct sock, sk_node.pprev));
+}
+
 /* Networking protocol blocks we attach to sockets.
  * socket layer -> transport layer interface
  * transport -> network interface is defined by struct inet_proto
index ae16531d0d353741ac66c83e0464341d060d5cce..94ce082b29dcdba2d726cbc90a0c4510fd38fb2b 100644 (file)
@@ -1160,6 +1160,8 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
        }
 }
 
+extern void xfrm_garbage_collect(struct net *net);
+
 #else
 
 static inline void xfrm_sk_free_policy(struct sock *sk) {}
@@ -1194,6 +1196,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
 {
        return 1;
 }
+static inline void xfrm_garbage_collect(struct net *net)
+{
+}
 #endif
 
 static __inline__
index ef937b56f9b54c44e4003d762eaa7105534077da..e2c1e66d58ae6c810e4ca18d4e62d9f78b2c3e4b 100644 (file)
@@ -118,7 +118,7 @@ struct ex_phy {
 
        enum ex_phy_state phy_state;
 
-       enum sas_dev_type attached_dev_type;
+       enum sas_device_type attached_dev_type;
        enum sas_linkrate linkrate;
 
        u8   attached_sata_host:1;
@@ -195,7 +195,7 @@ enum {
 
 struct domain_device {
        spinlock_t done_lock;
-        enum sas_dev_type dev_type;
+       enum sas_device_type dev_type;
 
         enum sas_linkrate linkrate;
         enum sas_linkrate min_linkrate;
index a6026da25f3e0bf5787d479e1511da85c1175299..25ac6283b9c753a6af74055d590a1ed613004be1 100644 (file)
@@ -107,7 +107,7 @@ enum osd_attributes_mode {
  *             int exponent: 04;
  *     }
  */
-typedef __be32 __bitwise osd_cdb_offset;
+typedef __be32 osd_cdb_offset;
 
 enum {
        OSD_OFFSET_UNUSED = 0xFFFFFFFF,
index be3eb0bf1ac0ef1213343a173787c645d16e21ef..0d2607d1238753f951e43e6b278735971413423c 100644 (file)
@@ -90,16 +90,18 @@ enum sas_oob_mode {
 };
 
 /* See sas_discover.c if you plan on changing these */
-enum sas_dev_type {
-       NO_DEVICE   = 0,          /* protocol */
-       SAS_END_DEV = 1,          /* protocol */
-       EDGE_DEV    = 2,          /* protocol */
-       FANOUT_DEV  = 3,          /* protocol */
-       SAS_HA      = 4,
-       SATA_DEV    = 5,
-       SATA_PM     = 7,
-       SATA_PM_PORT= 8,
-       SATA_PENDING  = 9,
+enum sas_device_type {
+       /* these are SAS protocol defined (attached device type field) */
+       SAS_PHY_UNUSED = 0,
+       SAS_END_DEVICE = 1,
+       SAS_EDGE_EXPANDER_DEVICE = 2,
+       SAS_FANOUT_EXPANDER_DEVICE = 3,
+       /* these are internal to libsas */
+       SAS_HA = 4,
+       SAS_SATA_DEV = 5,
+       SAS_SATA_PM = 7,
+       SAS_SATA_PM_PORT = 8,
+       SAS_SATA_PENDING = 9,
 };
 
 enum sas_protocol {
index ff71a56546845f2e29266ecf466ecd6d7174f38a..00f41aeeecf54f321b74599b99dc5f97a5af0bca 100644 (file)
@@ -32,8 +32,8 @@
 
 static inline int dev_is_sata(struct domain_device *dev)
 {
-       return dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
-              dev->dev_type == SATA_PM_PORT || dev->dev_type == SATA_PENDING;
+       return dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
+              dev->dev_type == SAS_SATA_PM_PORT || dev->dev_type == SAS_SATA_PENDING;
 }
 
 int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);
index a7f9cba275e98405b7da60143361b87cc4fcc231..cc645876d14737db54ea12aeb9959b7a00dfe9c2 100644 (file)
@@ -394,10 +394,18 @@ extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
                        int data_direction, void *buffer, unsigned bufflen,
                        unsigned char *sense, int timeout, int retries,
                        int flag, int *resid);
-extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
-                           int data_direction, void *buffer, unsigned bufflen,
-                           struct scsi_sense_hdr *, int timeout, int retries,
-                           int *resid);
+extern int scsi_execute_req_flags(struct scsi_device *sdev,
+       const unsigned char *cmd, int data_direction, void *buffer,
+       unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
+       int retries, int *resid, int flags);
+static inline int scsi_execute_req(struct scsi_device *sdev,
+       const unsigned char *cmd, int data_direction, void *buffer,
+       unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
+       int retries, int *resid)
+{
+       return scsi_execute_req_flags(sdev, cmd, data_direction, buffer,
+               bufflen, sshdr, timeout, retries, resid, 0);
+}
 extern void sdev_disable_disk_events(struct scsi_device *sdev);
 extern void sdev_enable_disk_events(struct scsi_device *sdev);
 
index 4a58cca2ecc182f1d44912297cc294b3ecce4760..d0f1602985e7efe6d6ab40dd67fbcc04e6a32276 100644 (file)
@@ -471,14 +471,10 @@ iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess);
 extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
 extern int iscsi_flashnode_bus_match(struct device *dev,
                                     struct device_driver *drv);
-extern int iscsi_is_flashnode_conn_dev(struct device *dev, void *data);
-
 extern struct device *
 iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
                          int (*fn)(struct device *dev, void *data));
-
 extern struct device *
-iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
-                         void *data,
-                         int (*fn)(struct device *dev, void *data));
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess);
+
 #endif
index 9b8e08879cfc9d7f7038245cf12e9b162f0f7f4e..0bd71e2702e3181e115ba8aa8e2b49a78ac21da7 100644 (file)
@@ -10,13 +10,6 @@ struct scsi_transport_template;
 struct sas_rphy;
 struct request;
 
-enum sas_device_type {
-       SAS_PHY_UNUSED = 0,
-       SAS_END_DEVICE = 1,
-       SAS_EDGE_EXPANDER_DEVICE = 2,
-       SAS_FANOUT_EXPANDER_DEVICE = 3,
-};
-
 static inline int sas_protocol_ata(enum sas_protocol proto)
 {
        return ((proto & SAS_PROTOCOL_SATA) ||
index 28c65e1ada21e8f632422055d70d66789d63a3d5..e11e179420a11fb07bce24e48e960bfdba9671a4 100644 (file)
 #define DECLARE_TLV_DB_LINEAR(name, min_dB, max_dB)    \
        unsigned int name[] = { TLV_DB_LINEAR_ITEM(min_dB, max_dB) }
 
-/* dB range container */
+/* dB range container:
+ * Items in dB range container must be ordered by their values and by their
+ * dB values. This implies that larger values must correspond with larger
+ * dB values (which is also required for all other mixer controls).
+ */
 /* Each item is: <min> <max> <TLV> */
 #define TLV_DB_RANGE_ITEM(...) \
        TLV_ITEM(SNDRV_CTL_TLVT_DB_RANGE, __VA_ARGS__)
index c4af592f7057e472d8953099c56651423d0ec6ad..4ea4f985f39409cf8e03c03972e5b923ca756fff 100644 (file)
@@ -463,7 +463,6 @@ struct se_cmd {
 #define CMD_T_ABORTED          (1 << 0)
 #define CMD_T_ACTIVE           (1 << 1)
 #define CMD_T_COMPLETE         (1 << 2)
-#define CMD_T_QUEUED           (1 << 3)
 #define CMD_T_SENT             (1 << 4)
 #define CMD_T_STOP             (1 << 5)
 #define CMD_T_FAILED           (1 << 6)
@@ -544,6 +543,7 @@ struct se_session {
        struct list_head        sess_list;
        struct list_head        sess_acl_list;
        struct list_head        sess_cmd_list;
+       struct list_head        sess_wait_list;
        spinlock_t              sess_cmd_lock;
        struct kref             sess_kref;
 };
@@ -572,12 +572,8 @@ struct se_dev_entry {
        bool                    def_pr_registered;
        /* See transport_lunflags_table */
        u32                     lun_flags;
-       u32                     deve_cmds;
        u32                     mapped_lun;
-       u32                     average_bytes;
-       u32                     last_byte_count;
        u32                     total_cmds;
-       u32                     total_bytes;
        u64                     pr_res_key;
        u64                     creation_time;
        u32                     attach_count;
index ba3471b73c07557d91411d9d6b3367e356962096..1dcce9cc99b9bb0321b0ad9419adadb41a1c1346 100644 (file)
@@ -114,7 +114,7 @@ sense_reason_t      transport_generic_new_cmd(struct se_cmd *);
 
 void   target_execute_cmd(struct se_cmd *cmd);
 
-void   transport_generic_free_cmd(struct se_cmd *, int);
+int    transport_generic_free_cmd(struct se_cmd *, int);
 
 bool   transport_wait_for_tasks(struct se_cmd *);
 int    transport_check_aborted_status(struct se_cmd *, int);
@@ -123,7 +123,7 @@ int transport_send_check_condition_and_sense(struct se_cmd *,
 int    target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
 int    target_put_sess_cmd(struct se_session *, struct se_cmd *);
 void   target_sess_cmd_list_set_waiting(struct se_session *);
-void   target_wait_for_sess_cmds(struct se_session *, int);
+void   target_wait_for_sess_cmds(struct se_session *);
 
 int    core_alua_check_nonop_delay(struct se_cmd *);
 
index d0e686402df883d89e474a63374cdd1aa4c73ba6..8ee15b97cd389113859e614e6135a8e1eb6916b5 100644 (file)
@@ -2139,7 +2139,7 @@ TRACE_EVENT(ext4_es_remove_extent,
                  __entry->lblk, __entry->len)
 );
 
-TRACE_EVENT(ext4_es_find_delayed_extent_enter,
+TRACE_EVENT(ext4_es_find_delayed_extent_range_enter,
        TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
 
        TP_ARGS(inode, lblk),
@@ -2161,7 +2161,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_enter,
                  (unsigned long) __entry->ino, __entry->lblk)
 );
 
-TRACE_EVENT(ext4_es_find_delayed_extent_exit,
+TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
        TP_PROTO(struct inode *inode, struct extent_status *es),
 
        TP_ARGS(inode, es),
index 9f096f1c0907b7056bcbe396783fdb2237724a65..75cef3fd97add201693b6bf21f4e2f1754c96f43 100644 (file)
 #define AUDIT_OBJ_TYPE 21
 #define AUDIT_OBJ_LEV_LOW      22
 #define AUDIT_OBJ_LEV_HIGH     23
+#define AUDIT_LOGINUID_SET     24
 
                                /* These are ONLY useful when checking
                                 * at syscall exit time (AUDIT_AT_EXIT). */
@@ -369,7 +370,8 @@ struct audit_status {
 };
 
 struct audit_tty_status {
-       __u32           enabled; /* 1 = enabled, 0 = disabled */
+       __u32           enabled;        /* 1 = enabled, 0 = disabled */
+       __u32           log_passwd;     /* 1 = enabled, 0 = disabled */
 };
 
 /* audit_rule_data supports filter rules with both integer and string
index ee13ab6c361409b31216da2e54790f804401fe51..c312f16bc4e7e692f4117b0a9280cadf1977f8db 100644 (file)
@@ -39,7 +39,7 @@
 #define VIRTIO_CONSOLE_F_SIZE  0       /* Does host provide console size? */
 #define VIRTIO_CONSOLE_F_MULTIPORT 1   /* Does host provide multiple ports? */
 
-#define VIRTIO_CONSOLE_BAD_ID          (~(u32)0)
+#define VIRTIO_CONSOLE_BAD_ID          (~(__u32)0)
 
 struct virtio_console_config {
        /* colums of the screens */
index 62ca9a77c1d65dff76823857e895f61280893a1b..aeb4e9a0c5d1c1e4dbd484286983bc3eada936df 100644 (file)
@@ -748,6 +748,7 @@ struct omap_dss_driver {
 };
 
 enum omapdss_version omapdss_get_version(void);
+bool omapdss_is_initialized(void);
 
 int omap_dss_register_driver(struct omap_dss_driver *);
 void omap_dss_unregister_driver(struct omap_dss_driver *);
index 0a7515c1e3a424214158750aaf1262c05fac0d48..569c07f2e3446d57acb82b3430a1ceeb5f85fb95 100644 (file)
@@ -70,6 +70,7 @@ struct xenbus_device {
        struct device dev;
        enum xenbus_state state;
        struct completion down;
+       struct work_struct work;
 };
 
 static inline struct xenbus_device *to_xenbus_device(struct device *dev)
index a7e40ed8a07674fd0493c1495a012a09759d835b..70480a3aa69891b6ebc6c998e2202c3197a443ec 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -752,19 +752,29 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
                        int otime, struct list_head *pt)
 {
        int i;
+       int progress;
 
-       if (sma->complex_count || sops == NULL) {
-               if (update_queue(sma, -1, pt))
+       progress = 1;
+retry_global:
+       if (sma->complex_count) {
+               if (update_queue(sma, -1, pt)) {
+                       progress = 1;
                        otime = 1;
+                       sops = NULL;
+               }
        }
+       if (!progress)
+               goto done;
 
        if (!sops) {
                /* No semops; something special is going on. */
                for (i = 0; i < sma->sem_nsems; i++) {
-                       if (update_queue(sma, i, pt))
+                       if (update_queue(sma, i, pt)) {
                                otime = 1;
+                               progress = 1;
+                       }
                }
-               goto done;
+               goto done_checkretry;
        }
 
        /* Check the semaphores that were modified. */
@@ -772,8 +782,15 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
                if (sops[i].sem_op > 0 ||
                        (sops[i].sem_op < 0 &&
                                sma->sem_base[sops[i].sem_num].semval == 0))
-                       if (update_queue(sma, sops[i].sem_num, pt))
+                       if (update_queue(sma, sops[i].sem_num, pt)) {
                                otime = 1;
+                               progress = 1;
+                       }
+       }
+done_checkretry:
+       if (progress) {
+               progress = 0;
+               goto retry_global;
        }
 done:
        if (otime)
index 0b084fa44b1f21f3cc73508676e79d58b08f1896..21c7fa615bd3107b0c28a4da499ea3ee7361d695 100644 (file)
@@ -49,6 +49,8 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
 
 #include <linux/audit.h>
 
@@ -265,7 +267,6 @@ void audit_log_lost(const char *message)
 }
 
 static int audit_log_config_change(char *function_name, int new, int old,
-                                  kuid_t loginuid, u32 sessionid, u32 sid,
                                   int allow_changes)
 {
        struct audit_buffer *ab;
@@ -274,29 +275,17 @@ static int audit_log_config_change(char *function_name, int new, int old,
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
        if (unlikely(!ab))
                return rc;
-       audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new,
-                        old, from_kuid(&init_user_ns, loginuid), sessionid);
-       if (sid) {
-               char *ctx = NULL;
-               u32 len;
-
-               rc = security_secid_to_secctx(sid, &ctx, &len);
-               if (rc) {
-                       audit_log_format(ab, " sid=%u", sid);
-                       allow_changes = 0; /* Something weird, deny request */
-               } else {
-                       audit_log_format(ab, " subj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
+       audit_log_format(ab, "%s=%d old=%d", function_name, new, old);
+       audit_log_session_info(ab);
+       rc = audit_log_task_context(ab);
+       if (rc)
+               allow_changes = 0; /* Something weird, deny request */
        audit_log_format(ab, " res=%d", allow_changes);
        audit_log_end(ab);
        return rc;
 }
 
-static int audit_do_config_change(char *function_name, int *to_change,
-                                 int new, kuid_t loginuid, u32 sessionid,
-                                 u32 sid)
+static int audit_do_config_change(char *function_name, int *to_change, int new)
 {
        int allow_changes, rc = 0, old = *to_change;
 
@@ -307,8 +296,7 @@ static int audit_do_config_change(char *function_name, int *to_change,
                allow_changes = 1;
 
        if (audit_enabled != AUDIT_OFF) {
-               rc = audit_log_config_change(function_name, new, old, loginuid,
-                                            sessionid, sid, allow_changes);
+               rc = audit_log_config_change(function_name, new, old, allow_changes);
                if (rc)
                        allow_changes = 0;
        }
@@ -322,44 +310,37 @@ static int audit_do_config_change(char *function_name, int *to_change,
        return rc;
 }
 
-static int audit_set_rate_limit(int limit, kuid_t loginuid, u32 sessionid,
-                               u32 sid)
+static int audit_set_rate_limit(int limit)
 {
-       return audit_do_config_change("audit_rate_limit", &audit_rate_limit,
-                                     limit, loginuid, sessionid, sid);
+       return audit_do_config_change("audit_rate_limit", &audit_rate_limit, limit);
 }
 
-static int audit_set_backlog_limit(int limit, kuid_t loginuid, u32 sessionid,
-                                  u32 sid)
+static int audit_set_backlog_limit(int limit)
 {
-       return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit,
-                                     limit, loginuid, sessionid, sid);
+       return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, limit);
 }
 
-static int audit_set_enabled(int state, kuid_t loginuid, u32 sessionid, u32 sid)
+static int audit_set_enabled(int state)
 {
        int rc;
        if (state < AUDIT_OFF || state > AUDIT_LOCKED)
                return -EINVAL;
 
-       rc =  audit_do_config_change("audit_enabled", &audit_enabled, state,
-                                    loginuid, sessionid, sid);
-
+       rc =  audit_do_config_change("audit_enabled", &audit_enabled, state);
        if (!rc)
                audit_ever_enabled |= !!state;
 
        return rc;
 }
 
-static int audit_set_failure(int state, kuid_t loginuid, u32 sessionid, u32 sid)
+static int audit_set_failure(int state)
 {
        if (state != AUDIT_FAIL_SILENT
            && state != AUDIT_FAIL_PRINTK
            && state != AUDIT_FAIL_PANIC)
                return -EINVAL;
 
-       return audit_do_config_change("audit_failure", &audit_failure, state,
-                                     loginuid, sessionid, sid);
+       return audit_do_config_change("audit_failure", &audit_failure, state);
 }
 
 /*
@@ -417,34 +398,53 @@ static void kauditd_send_skb(struct sk_buff *skb)
                consume_skb(skb);
 }
 
-static int kauditd_thread(void *dummy)
+/*
+ * flush_hold_queue - empty the hold queue if auditd appears
+ *
+ * If auditd just started, drain the queue of messages already
+ * sent to syslog/printk.  Remember loss here is ok.  We already
+ * called audit_log_lost() if it didn't go out normally.  so the
+ * race between the skb_dequeue and the next check for audit_pid
+ * doesn't matter.
+ *
+ * If you ever find kauditd to be too slow we can get a perf win
+ * by doing our own locking and keeping better track if there
+ * are messages in this queue.  I don't see the need now, but
+ * in 5 years when I want to play with this again I'll see this
+ * note and still have no friggin idea what i'm thinking today.
+ */
+static void flush_hold_queue(void)
 {
        struct sk_buff *skb;
 
+       if (!audit_default || !audit_pid)
+               return;
+
+       skb = skb_dequeue(&audit_skb_hold_queue);
+       if (likely(!skb))
+               return;
+
+       while (skb && audit_pid) {
+               kauditd_send_skb(skb);
+               skb = skb_dequeue(&audit_skb_hold_queue);
+       }
+
+       /*
+        * if auditd just disappeared but we
+        * dequeued an skb we need to drop ref
+        */
+       if (skb)
+               consume_skb(skb);
+}
+
+static int kauditd_thread(void *dummy)
+{
        set_freezable();
        while (!kthread_should_stop()) {
-               /*
-                * if auditd just started drain the queue of messages already
-                * sent to syslog/printk.  remember loss here is ok.  we already
-                * called audit_log_lost() if it didn't go out normally.  so the
-                * race between the skb_dequeue and the next check for audit_pid
-                * doesn't matter.
-                *
-                * if you ever find kauditd to be too slow we can get a perf win
-                * by doing our own locking and keeping better track if there
-                * are messages in this queue.  I don't see the need now, but
-                * in 5 years when I want to play with this again I'll see this
-                * note and still have no friggin idea what i'm thinking today.
-                */
-               if (audit_default && audit_pid) {
-                       skb = skb_dequeue(&audit_skb_hold_queue);
-                       if (unlikely(skb)) {
-                               while (skb && audit_pid) {
-                                       kauditd_send_skb(skb);
-                                       skb = skb_dequeue(&audit_skb_hold_queue);
-                               }
-                       }
-               }
+               struct sk_buff *skb;
+               DECLARE_WAITQUEUE(wait, current);
+
+               flush_hold_queue();
 
                skb = skb_dequeue(&audit_skb_queue);
                wake_up(&audit_backlog_wait);
@@ -453,19 +453,18 @@ static int kauditd_thread(void *dummy)
                                kauditd_send_skb(skb);
                        else
                                audit_printk_skb(skb);
-               } else {
-                       DECLARE_WAITQUEUE(wait, current);
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       add_wait_queue(&kauditd_wait, &wait);
-
-                       if (!skb_queue_len(&audit_skb_queue)) {
-                               try_to_freeze();
-                               schedule();
-                       }
+                       continue;
+               }
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&kauditd_wait, &wait);
 
-                       __set_current_state(TASK_RUNNING);
-                       remove_wait_queue(&kauditd_wait, &wait);
+               if (!skb_queue_len(&audit_skb_queue)) {
+                       try_to_freeze();
+                       schedule();
                }
+
+               __set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kauditd_wait, &wait);
        }
        return 0;
 }
@@ -579,13 +578,14 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
                return -EPERM;
 
        switch (msg_type) {
-       case AUDIT_GET:
        case AUDIT_LIST:
-       case AUDIT_LIST_RULES:
-       case AUDIT_SET:
        case AUDIT_ADD:
-       case AUDIT_ADD_RULE:
        case AUDIT_DEL:
+               return -EOPNOTSUPP;
+       case AUDIT_GET:
+       case AUDIT_SET:
+       case AUDIT_LIST_RULES:
+       case AUDIT_ADD_RULE:
        case AUDIT_DEL_RULE:
        case AUDIT_SIGNAL_INFO:
        case AUDIT_TTY_GET:
@@ -608,12 +608,10 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
        return err;
 }
 
-static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
-                                    kuid_t auid, u32 ses, u32 sid)
+static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
 {
        int rc = 0;
-       char *ctx = NULL;
-       u32 len;
+       uid_t uid = from_kuid(&init_user_ns, current_uid());
 
        if (!audit_enabled) {
                *ab = NULL;
@@ -623,33 +621,21 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
        *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
        if (unlikely(!*ab))
                return rc;
-       audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u",
-                        task_tgid_vnr(current),
-                        from_kuid(&init_user_ns, current_uid()),
-                        from_kuid(&init_user_ns, auid), ses);
-       if (sid) {
-               rc = security_secid_to_secctx(sid, &ctx, &len);
-               if (rc)
-                       audit_log_format(*ab, " ssid=%u", sid);
-               else {
-                       audit_log_format(*ab, " subj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
+       audit_log_format(*ab, "pid=%d uid=%u", task_tgid_vnr(current), uid);
+       audit_log_session_info(*ab);
+       audit_log_task_context(*ab);
 
        return rc;
 }
 
 static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-       u32                     seq, sid;
+       u32                     seq;
        void                    *data;
        struct audit_status     *status_get, status_set;
        int                     err;
        struct audit_buffer     *ab;
        u16                     msg_type = nlh->nlmsg_type;
-       kuid_t                  loginuid; /* loginuid of sender */
-       u32                     sessionid;
        struct audit_sig_info   *sig_data;
        char                    *ctx = NULL;
        u32                     len;
@@ -668,9 +654,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        return err;
                }
        }
-       loginuid = audit_get_loginuid(current);
-       sessionid = audit_get_sessionid(current);
-       security_task_getsecid(current, &sid);
        seq  = nlh->nlmsg_seq;
        data = nlmsg_data(nlh);
 
@@ -691,14 +674,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        return -EINVAL;
                status_get   = (struct audit_status *)data;
                if (status_get->mask & AUDIT_STATUS_ENABLED) {
-                       err = audit_set_enabled(status_get->enabled,
-                                               loginuid, sessionid, sid);
+                       err = audit_set_enabled(status_get->enabled);
                        if (err < 0)
                                return err;
                }
                if (status_get->mask & AUDIT_STATUS_FAILURE) {
-                       err = audit_set_failure(status_get->failure,
-                                               loginuid, sessionid, sid);
+                       err = audit_set_failure(status_get->failure);
                        if (err < 0)
                                return err;
                }
@@ -706,22 +687,17 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        int new_pid = status_get->pid;
 
                        if (audit_enabled != AUDIT_OFF)
-                               audit_log_config_change("audit_pid", new_pid,
-                                                       audit_pid, loginuid,
-                                                       sessionid, sid, 1);
-
+                               audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
                        audit_pid = new_pid;
                        audit_nlk_portid = NETLINK_CB(skb).portid;
                }
                if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) {
-                       err = audit_set_rate_limit(status_get->rate_limit,
-                                                  loginuid, sessionid, sid);
+                       err = audit_set_rate_limit(status_get->rate_limit);
                        if (err < 0)
                                return err;
                }
                if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
-                       err = audit_set_backlog_limit(status_get->backlog_limit,
-                                                     loginuid, sessionid, sid);
+                       err = audit_set_backlog_limit(status_get->backlog_limit);
                break;
        case AUDIT_USER:
        case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
@@ -729,25 +705,22 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (!audit_enabled && msg_type != AUDIT_USER_AVC)
                        return 0;
 
-               err = audit_filter_user();
+               err = audit_filter_user(msg_type);
                if (err == 1) {
                        err = 0;
                        if (msg_type == AUDIT_USER_TTY) {
-                               err = tty_audit_push_task(current, loginuid,
-                                                            sessionid);
+                               err = tty_audit_push_current();
                                if (err)
                                        break;
                        }
-                       audit_log_common_recv_msg(&ab, msg_type,
-                                                 loginuid, sessionid, sid);
-
+                       audit_log_common_recv_msg(&ab, msg_type);
                        if (msg_type != AUDIT_USER_TTY)
                                audit_log_format(ab, " msg='%.1024s'",
                                                 (char *)data);
                        else {
                                int size;
 
-                               audit_log_format(ab, " msg=");
+                               audit_log_format(ab, " data=");
                                size = nlmsg_len(nlh);
                                if (size > 0 &&
                                    ((unsigned char *)data)[size - 1] == '\0')
@@ -758,50 +731,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        audit_log_end(ab);
                }
                break;
-       case AUDIT_ADD:
-       case AUDIT_DEL:
-               if (nlmsg_len(nlh) < sizeof(struct audit_rule))
-                       return -EINVAL;
-               if (audit_enabled == AUDIT_LOCKED) {
-                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-                                                 loginuid, sessionid, sid);
-
-                       audit_log_format(ab, " audit_enabled=%d res=0",
-                                        audit_enabled);
-                       audit_log_end(ab);
-                       return -EPERM;
-               }
-               /* fallthrough */
-       case AUDIT_LIST:
-               err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
-                                          seq, data, nlmsg_len(nlh),
-                                          loginuid, sessionid, sid);
-               break;
        case AUDIT_ADD_RULE:
        case AUDIT_DEL_RULE:
                if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
                        return -EINVAL;
                if (audit_enabled == AUDIT_LOCKED) {
-                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-                                                 loginuid, sessionid, sid);
-
-                       audit_log_format(ab, " audit_enabled=%d res=0",
-                                        audit_enabled);
+                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
+                       audit_log_format(ab, " audit_enabled=%d res=0", audit_enabled);
                        audit_log_end(ab);
                        return -EPERM;
                }
                /* fallthrough */
        case AUDIT_LIST_RULES:
                err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
-                                          seq, data, nlmsg_len(nlh),
-                                          loginuid, sessionid, sid);
+                                          seq, data, nlmsg_len(nlh));
                break;
        case AUDIT_TRIM:
                audit_trim_trees();
-
-               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-                                         loginuid, sessionid, sid);
-
+               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
                audit_log_format(ab, " op=trim res=1");
                audit_log_end(ab);
                break;
@@ -831,8 +778,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                /* OK, here comes... */
                err = audit_tag_tree(old, new);
 
-               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-                                         loginuid, sessionid, sid);
+               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
 
                audit_log_format(ab, " op=make_equiv old=");
                audit_log_untrustedstring(ab, old);
@@ -871,27 +817,30 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                struct audit_tty_status s;
                struct task_struct *tsk = current;
 
-               spin_lock_irq(&tsk->sighand->siglock);
+               spin_lock(&tsk->sighand->siglock);
                s.enabled = tsk->signal->audit_tty != 0;
-               spin_unlock_irq(&tsk->sighand->siglock);
+               s.log_passwd = tsk->signal->audit_tty_log_passwd;
+               spin_unlock(&tsk->sighand->siglock);
 
                audit_send_reply(NETLINK_CB(skb).portid, seq,
                                 AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
                break;
        }
        case AUDIT_TTY_SET: {
-               struct audit_tty_status *s;
+               struct audit_tty_status s;
                struct task_struct *tsk = current;
 
-               if (nlh->nlmsg_len < sizeof(struct audit_tty_status))
-                       return -EINVAL;
-               s = data;
-               if (s->enabled != 0 && s->enabled != 1)
+               memset(&s, 0, sizeof(s));
+               /* guard against past and future API changes */
+               memcpy(&s, data, min(sizeof(s), (size_t)nlh->nlmsg_len));
+               if ((s.enabled != 0 && s.enabled != 1) ||
+                   (s.log_passwd != 0 && s.log_passwd != 1))
                        return -EINVAL;
 
-               spin_lock_irq(&tsk->sighand->siglock);
-               tsk->signal->audit_tty = s->enabled != 0;
-               spin_unlock_irq(&tsk->sighand->siglock);
+               spin_lock(&tsk->sighand->siglock);
+               tsk->signal->audit_tty = s.enabled;
+               tsk->signal->audit_tty_log_passwd = s.log_passwd;
+               spin_unlock(&tsk->sighand->siglock);
                break;
        }
        default:
@@ -1434,6 +1383,14 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
        kfree(pathname);
 }
 
+void audit_log_session_info(struct audit_buffer *ab)
+{
+       u32 sessionid = audit_get_sessionid(current);
+       uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current));
+
+       audit_log_format(ab, " auid=%u ses=%u\n", auid, sessionid);
+}
+
 void audit_log_key(struct audit_buffer *ab, char *key)
 {
        audit_log_format(ab, " key=");
@@ -1443,6 +1400,224 @@ void audit_log_key(struct audit_buffer *ab, char *key)
                audit_log_format(ab, "(null)");
 }
 
+void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
+{
+       int i;
+
+       audit_log_format(ab, " %s=", prefix);
+       CAP_FOR_EACH_U32(i) {
+               audit_log_format(ab, "%08x",
+                                cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
+       }
+}
+
+void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
+{
+       kernel_cap_t *perm = &name->fcap.permitted;
+       kernel_cap_t *inh = &name->fcap.inheritable;
+       int log = 0;
+
+       if (!cap_isclear(*perm)) {
+               audit_log_cap(ab, "cap_fp", perm);
+               log = 1;
+       }
+       if (!cap_isclear(*inh)) {
+               audit_log_cap(ab, "cap_fi", inh);
+               log = 1;
+       }
+
+       if (log)
+               audit_log_format(ab, " cap_fe=%d cap_fver=%x",
+                                name->fcap.fE, name->fcap_ver);
+}
+
+static inline int audit_copy_fcaps(struct audit_names *name,
+                                  const struct dentry *dentry)
+{
+       struct cpu_vfs_cap_data caps;
+       int rc;
+
+       if (!dentry)
+               return 0;
+
+       rc = get_vfs_caps_from_disk(dentry, &caps);
+       if (rc)
+               return rc;
+
+       name->fcap.permitted = caps.permitted;
+       name->fcap.inheritable = caps.inheritable;
+       name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
+       name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >>
+                               VFS_CAP_REVISION_SHIFT;
+
+       return 0;
+}
+
+/* Copy inode data into an audit_names. */
+void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
+                     const struct inode *inode)
+{
+       name->ino   = inode->i_ino;
+       name->dev   = inode->i_sb->s_dev;
+       name->mode  = inode->i_mode;
+       name->uid   = inode->i_uid;
+       name->gid   = inode->i_gid;
+       name->rdev  = inode->i_rdev;
+       security_inode_getsecid(inode, &name->osid);
+       audit_copy_fcaps(name, dentry);
+}
+
+/**
+ * audit_log_name - produce AUDIT_PATH record from struct audit_names
+ * @context: audit_context for the task
+ * @n: audit_names structure with reportable details
+ * @path: optional path to report instead of audit_names->name
+ * @record_num: record number to report when handling a list of names
+ * @call_panic: optional pointer to int that will be updated if secid fails
+ */
+void audit_log_name(struct audit_context *context, struct audit_names *n,
+                   struct path *path, int record_num, int *call_panic)
+{
+       struct audit_buffer *ab;
+       ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
+       if (!ab)
+               return;
+
+       audit_log_format(ab, "item=%d", record_num);
+
+       if (path)
+               audit_log_d_path(ab, " name=", path);
+       else if (n->name) {
+               switch (n->name_len) {
+               case AUDIT_NAME_FULL:
+                       /* log the full path */
+                       audit_log_format(ab, " name=");
+                       audit_log_untrustedstring(ab, n->name->name);
+                       break;
+               case 0:
+                       /* name was specified as a relative path and the
+                        * directory component is the cwd */
+                       audit_log_d_path(ab, " name=", &context->pwd);
+                       break;
+               default:
+                       /* log the name's directory component */
+                       audit_log_format(ab, " name=");
+                       audit_log_n_untrustedstring(ab, n->name->name,
+                                                   n->name_len);
+               }
+       } else
+               audit_log_format(ab, " name=(null)");
+
+       if (n->ino != (unsigned long)-1) {
+               audit_log_format(ab, " inode=%lu"
+                                " dev=%02x:%02x mode=%#ho"
+                                " ouid=%u ogid=%u rdev=%02x:%02x",
+                                n->ino,
+                                MAJOR(n->dev),
+                                MINOR(n->dev),
+                                n->mode,
+                                from_kuid(&init_user_ns, n->uid),
+                                from_kgid(&init_user_ns, n->gid),
+                                MAJOR(n->rdev),
+                                MINOR(n->rdev));
+       }
+       if (n->osid != 0) {
+               char *ctx = NULL;
+               u32 len;
+               if (security_secid_to_secctx(
+                       n->osid, &ctx, &len)) {
+                       audit_log_format(ab, " osid=%u", n->osid);
+                       if (call_panic)
+                               *call_panic = 2;
+               } else {
+                       audit_log_format(ab, " obj=%s", ctx);
+                       security_release_secctx(ctx, len);
+               }
+       }
+
+       audit_log_fcaps(ab, n);
+       audit_log_end(ab);
+}
+
+int audit_log_task_context(struct audit_buffer *ab)
+{
+       char *ctx = NULL;
+       unsigned len;
+       int error;
+       u32 sid;
+
+       security_task_getsecid(current, &sid);
+       if (!sid)
+               return 0;
+
+       error = security_secid_to_secctx(sid, &ctx, &len);
+       if (error) {
+               if (error != -EINVAL)
+                       goto error_path;
+               return 0;
+       }
+
+       audit_log_format(ab, " subj=%s", ctx);
+       security_release_secctx(ctx, len);
+       return 0;
+
+error_path:
+       audit_panic("error in audit_log_task_context");
+       return error;
+}
+EXPORT_SYMBOL(audit_log_task_context);
+
+void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
+{
+       const struct cred *cred;
+       char name[sizeof(tsk->comm)];
+       struct mm_struct *mm = tsk->mm;
+       char *tty;
+
+       if (!ab)
+               return;
+
+       /* tsk == current */
+       cred = current_cred();
+
+       spin_lock_irq(&tsk->sighand->siglock);
+       if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
+               tty = tsk->signal->tty->name;
+       else
+               tty = "(none)";
+       spin_unlock_irq(&tsk->sighand->siglock);
+
+       audit_log_format(ab,
+                        " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
+                        " euid=%u suid=%u fsuid=%u"
+                        " egid=%u sgid=%u fsgid=%u ses=%u tty=%s",
+                        sys_getppid(),
+                        tsk->pid,
+                        from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
+                        from_kuid(&init_user_ns, cred->uid),
+                        from_kgid(&init_user_ns, cred->gid),
+                        from_kuid(&init_user_ns, cred->euid),
+                        from_kuid(&init_user_ns, cred->suid),
+                        from_kuid(&init_user_ns, cred->fsuid),
+                        from_kgid(&init_user_ns, cred->egid),
+                        from_kgid(&init_user_ns, cred->sgid),
+                        from_kgid(&init_user_ns, cred->fsgid),
+                        audit_get_sessionid(tsk), tty);
+
+       get_task_comm(name, tsk);
+       audit_log_format(ab, " comm=");
+       audit_log_untrustedstring(ab, name);
+
+       if (mm) {
+               down_read(&mm->mmap_sem);
+               if (mm->exe_file)
+                       audit_log_d_path(ab, " exe=", &mm->exe_file->f_path);
+               up_read(&mm->mmap_sem);
+       }
+       audit_log_task_context(ab);
+}
+EXPORT_SYMBOL(audit_log_task_info);
+
 /**
  * audit_log_link_denied - report a link restriction denial
  * @operation: specific link opreation
@@ -1451,19 +1626,28 @@ void audit_log_key(struct audit_buffer *ab, char *key)
 void audit_log_link_denied(const char *operation, struct path *link)
 {
        struct audit_buffer *ab;
+       struct audit_names *name;
+
+       name = kzalloc(sizeof(*name), GFP_NOFS);
+       if (!name)
+               return;
 
+       /* Generate AUDIT_ANOM_LINK with subject, operation, outcome. */
        ab = audit_log_start(current->audit_context, GFP_KERNEL,
                             AUDIT_ANOM_LINK);
        if (!ab)
-               return;
-       audit_log_format(ab, "op=%s action=denied", operation);
-       audit_log_format(ab, " pid=%d comm=", current->pid);
-       audit_log_untrustedstring(ab, current->comm);
-       audit_log_d_path(ab, " path=", link);
-       audit_log_format(ab, " dev=");
-       audit_log_untrustedstring(ab, link->dentry->d_inode->i_sb->s_id);
-       audit_log_format(ab, " ino=%lu", link->dentry->d_inode->i_ino);
+               goto out;
+       audit_log_format(ab, "op=%s", operation);
+       audit_log_task_info(ab, current);
+       audit_log_format(ab, " res=0");
        audit_log_end(ab);
+
+       /* Generate AUDIT_PATH record with object. */
+       name->type = AUDIT_TYPE_NORMAL;
+       audit_copy_inode(name, link->dentry, link->dentry->d_inode);
+       audit_log_name(current->audit_context, name, link, 0, NULL);
+out:
+       kfree(name);
 }
 
 /**
index 11468d99dad00b9f168071701c35965cf49ae585..1c95131ef760c2f45d140efdccfd2d08636b76d7 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/fs.h>
 #include <linux/audit.h>
 #include <linux/skbuff.h>
+#include <uapi/linux/mqueue.h>
 
 /* 0 = no checking
    1 = put_count checking
 */
 #define AUDIT_DEBUG 0
 
+/* AUDIT_NAMES is the number of slots we reserve in the audit_context
+ * for saving names from getname().  If we get more names we will allocate
+ * a name dynamically and also add those to the list anchored by names_list. */
+#define AUDIT_NAMES    5
+
 /* At task start time, the audit_state is set in the audit_context using
    a per-task filter.  At syscall entry, the audit_state is augmented by
    the syscall filter. */
@@ -59,8 +65,158 @@ struct audit_entry {
        struct audit_krule      rule;
 };
 
+struct audit_cap_data {
+       kernel_cap_t            permitted;
+       kernel_cap_t            inheritable;
+       union {
+               unsigned int    fE;             /* effective bit of file cap */
+               kernel_cap_t    effective;      /* effective set of process */
+       };
+};
+
+/* When fs/namei.c:getname() is called, we store the pointer in name and
+ * we don't let putname() free it (instead we free all of the saved
+ * pointers at syscall exit time).
+ *
+ * Further, in fs/namei.c:path_lookup() we store the inode and device.
+ */
+struct audit_names {
+       struct list_head        list;           /* audit_context->names_list */
+
+       struct filename         *name;
+       int                     name_len;       /* number of chars to log */
+       bool                    name_put;       /* call __putname()? */
+
+       unsigned long           ino;
+       dev_t                   dev;
+       umode_t                 mode;
+       kuid_t                  uid;
+       kgid_t                  gid;
+       dev_t                   rdev;
+       u32                     osid;
+       struct audit_cap_data   fcap;
+       unsigned int            fcap_ver;
+       unsigned char           type;           /* record type */
+       /*
+        * This was an allocated audit_names and not from the array of
+        * names allocated in the task audit context.  Thus this name
+        * should be freed on syscall exit.
+        */
+       bool                    should_free;
+};
+
+/* The per-task audit context. */
+struct audit_context {
+       int                 dummy;      /* must be the first element */
+       int                 in_syscall; /* 1 if task is in a syscall */
+       enum audit_state    state, current_state;
+       unsigned int        serial;     /* serial number for record */
+       int                 major;      /* syscall number */
+       struct timespec     ctime;      /* time of syscall entry */
+       unsigned long       argv[4];    /* syscall arguments */
+       long                return_code;/* syscall return code */
+       u64                 prio;
+       int                 return_valid; /* return code is valid */
+       /*
+        * The names_list is the list of all audit_names collected during this
+        * syscall.  The first AUDIT_NAMES entries in the names_list will
+        * actually be from the preallocated_names array for performance
+        * reasons.  Except during allocation they should never be referenced
+        * through the preallocated_names array and should only be found/used
+        * by running the names_list.
+        */
+       struct audit_names  preallocated_names[AUDIT_NAMES];
+       int                 name_count; /* total records in names_list */
+       struct list_head    names_list; /* struct audit_names->list anchor */
+       char                *filterkey; /* key for rule that triggered record */
+       struct path         pwd;
+       struct audit_aux_data *aux;
+       struct audit_aux_data *aux_pids;
+       struct sockaddr_storage *sockaddr;
+       size_t sockaddr_len;
+                               /* Save things to print about task_struct */
+       pid_t               pid, ppid;
+       kuid_t              uid, euid, suid, fsuid;
+       kgid_t              gid, egid, sgid, fsgid;
+       unsigned long       personality;
+       int                 arch;
+
+       pid_t               target_pid;
+       kuid_t              target_auid;
+       kuid_t              target_uid;
+       unsigned int        target_sessionid;
+       u32                 target_sid;
+       char                target_comm[TASK_COMM_LEN];
+
+       struct audit_tree_refs *trees, *first_trees;
+       struct list_head killed_trees;
+       int tree_count;
+
+       int type;
+       union {
+               struct {
+                       int nargs;
+                       long args[6];
+               } socketcall;
+               struct {
+                       kuid_t                  uid;
+                       kgid_t                  gid;
+                       umode_t                 mode;
+                       u32                     osid;
+                       int                     has_perm;
+                       uid_t                   perm_uid;
+                       gid_t                   perm_gid;
+                       umode_t                 perm_mode;
+                       unsigned long           qbytes;
+               } ipc;
+               struct {
+                       mqd_t                   mqdes;
+                       struct mq_attr          mqstat;
+               } mq_getsetattr;
+               struct {
+                       mqd_t                   mqdes;
+                       int                     sigev_signo;
+               } mq_notify;
+               struct {
+                       mqd_t                   mqdes;
+                       size_t                  msg_len;
+                       unsigned int            msg_prio;
+                       struct timespec         abs_timeout;
+               } mq_sendrecv;
+               struct {
+                       int                     oflag;
+                       umode_t                 mode;
+                       struct mq_attr          attr;
+               } mq_open;
+               struct {
+                       pid_t                   pid;
+                       struct audit_cap_data   cap;
+               } capset;
+               struct {
+                       int                     fd;
+                       int                     flags;
+               } mmap;
+       };
+       int fds[2];
+
+#if AUDIT_DEBUG
+       int                 put_count;
+       int                 ino_count;
+#endif
+};
+
 extern int audit_ever_enabled;
 
+extern void audit_copy_inode(struct audit_names *name,
+                            const struct dentry *dentry,
+                            const struct inode *inode);
+extern void audit_log_cap(struct audit_buffer *ab, char *prefix,
+                         kernel_cap_t *cap);
+extern void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name);
+extern void audit_log_name(struct audit_context *context,
+                          struct audit_names *n, struct path *path,
+                          int record_num, int *call_panic);
+
 extern int audit_pid;
 
 #define AUDIT_INODE_BUCKETS    32
index 267436826c3bc179678a57de72cf08220fa8faca..6bd4a90d1991cdf84e5d14fa187af4a3b3f390f9 100644 (file)
@@ -310,121 +310,83 @@ static u32 audit_to_op(u32 op)
        return n;
 }
 
-
-/* Translate struct audit_rule to kernel's rule respresentation.
- * Exists for backward compatibility with userspace. */
-static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
+/* check if an audit field is valid */
+static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
 {
-       struct audit_entry *entry;
-       int err = 0;
-       int i;
-
-       entry = audit_to_entry_common(rule);
-       if (IS_ERR(entry))
-               goto exit_nofree;
-
-       for (i = 0; i < rule->field_count; i++) {
-               struct audit_field *f = &entry->rule.fields[i];
-               u32 n;
-
-               n = rule->fields[i] & (AUDIT_NEGATE|AUDIT_OPERATORS);
-
-               /* Support for legacy operators where
-                * AUDIT_NEGATE bit signifies != and otherwise assumes == */
-               if (n & AUDIT_NEGATE)
-                       f->op = Audit_not_equal;
-               else if (!n)
-                       f->op = Audit_equal;
-               else
-                       f->op = audit_to_op(n);
-
-               entry->rule.vers_ops = (n & AUDIT_OPERATORS) ? 2 : 1;
-
-               f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS);
-               f->val = rule->values[i];
-               f->uid = INVALID_UID;
-               f->gid = INVALID_GID;
-
-               err = -EINVAL;
-               if (f->op == Audit_bad)
-                       goto exit_free;
-
-               switch(f->type) {
-               default:
-                       goto exit_free;
-               case AUDIT_UID:
-               case AUDIT_EUID:
-               case AUDIT_SUID:
-               case AUDIT_FSUID:
-               case AUDIT_LOGINUID:
-                       /* bit ops not implemented for uid comparisons */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-
-                       f->uid = make_kuid(current_user_ns(), f->val);
-                       if (!uid_valid(f->uid))
-                               goto exit_free;
-                       break;
-               case AUDIT_GID:
-               case AUDIT_EGID:
-               case AUDIT_SGID:
-               case AUDIT_FSGID:
-                       /* bit ops not implemented for gid comparisons */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-
-                       f->gid = make_kgid(current_user_ns(), f->val);
-                       if (!gid_valid(f->gid))
-                               goto exit_free;
-                       break;
-               case AUDIT_PID:
-               case AUDIT_PERS:
-               case AUDIT_MSGTYPE:
-               case AUDIT_PPID:
-               case AUDIT_DEVMAJOR:
-               case AUDIT_DEVMINOR:
-               case AUDIT_EXIT:
-               case AUDIT_SUCCESS:
-                       /* bit ops are only useful on syscall args */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-                       break;
-               case AUDIT_ARG0:
-               case AUDIT_ARG1:
-               case AUDIT_ARG2:
-               case AUDIT_ARG3:
-                       break;
-               /* arch is only allowed to be = or != */
-               case AUDIT_ARCH:
-                       if (f->op != Audit_not_equal && f->op != Audit_equal)
-                               goto exit_free;
-                       entry->rule.arch_f = f;
-                       break;
-               case AUDIT_PERM:
-                       if (f->val & ~15)
-                               goto exit_free;
-                       break;
-               case AUDIT_FILETYPE:
-                       if (f->val & ~S_IFMT)
-                               goto exit_free;
-                       break;
-               case AUDIT_INODE:
-                       err = audit_to_inode(&entry->rule, f);
-                       if (err)
-                               goto exit_free;
-                       break;
-               }
-       }
-
-       if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal)
-               entry->rule.inode_f = NULL;
-
-exit_nofree:
-       return entry;
+       switch(f->type) {
+       case AUDIT_MSGTYPE:
+               if (entry->rule.listnr != AUDIT_FILTER_TYPE &&
+                   entry->rule.listnr != AUDIT_FILTER_USER)
+                       return -EINVAL;
+               break;
+       };
 
-exit_free:
-       audit_free_rule(entry);
-       return ERR_PTR(err);
+       switch(f->type) {
+       default:
+               return -EINVAL;
+       case AUDIT_UID:
+       case AUDIT_EUID:
+       case AUDIT_SUID:
+       case AUDIT_FSUID:
+       case AUDIT_LOGINUID:
+       case AUDIT_OBJ_UID:
+       case AUDIT_GID:
+       case AUDIT_EGID:
+       case AUDIT_SGID:
+       case AUDIT_FSGID:
+       case AUDIT_OBJ_GID:
+       case AUDIT_PID:
+       case AUDIT_PERS:
+       case AUDIT_MSGTYPE:
+       case AUDIT_PPID:
+       case AUDIT_DEVMAJOR:
+       case AUDIT_DEVMINOR:
+       case AUDIT_EXIT:
+       case AUDIT_SUCCESS:
+               /* bit ops are only useful on syscall args */
+               if (f->op == Audit_bitmask || f->op == Audit_bittest)
+                       return -EINVAL;
+               break;
+       case AUDIT_ARG0:
+       case AUDIT_ARG1:
+       case AUDIT_ARG2:
+       case AUDIT_ARG3:
+       case AUDIT_SUBJ_USER:
+       case AUDIT_SUBJ_ROLE:
+       case AUDIT_SUBJ_TYPE:
+       case AUDIT_SUBJ_SEN:
+       case AUDIT_SUBJ_CLR:
+       case AUDIT_OBJ_USER:
+       case AUDIT_OBJ_ROLE:
+       case AUDIT_OBJ_TYPE:
+       case AUDIT_OBJ_LEV_LOW:
+       case AUDIT_OBJ_LEV_HIGH:
+       case AUDIT_WATCH:
+       case AUDIT_DIR:
+       case AUDIT_FILTERKEY:
+               break;
+       case AUDIT_LOGINUID_SET:
+               if ((f->val != 0) && (f->val != 1))
+                       return -EINVAL;
+       /* FALL THROUGH */
+       case AUDIT_ARCH:
+               if (f->op != Audit_not_equal && f->op != Audit_equal)
+                       return -EINVAL;
+               break;
+       case AUDIT_PERM:
+               if (f->val & ~15)
+                       return -EINVAL;
+               break;
+       case AUDIT_FILETYPE:
+               if (f->val & ~S_IFMT)
+                       return -EINVAL;
+               break;
+       case AUDIT_FIELD_COMPARE:
+               if (f->val > AUDIT_MAX_FIELD_COMPARE)
+                       return -EINVAL;
+               break;
+       };
+       return 0;
 }
 
 /* Translate struct audit_rule_data to kernel's rule respresentation. */
@@ -459,17 +421,25 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                f->gid = INVALID_GID;
                f->lsm_str = NULL;
                f->lsm_rule = NULL;
-               switch(f->type) {
+
+               /* Support legacy tests for a valid loginuid */
+               if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
+                       f->type = AUDIT_LOGINUID_SET;
+                       f->val = 0;
+               }
+
+               err = audit_field_valid(entry, f);
+               if (err)
+                       goto exit_free;
+
+               err = -EINVAL;
+               switch (f->type) {
+               case AUDIT_LOGINUID:
                case AUDIT_UID:
                case AUDIT_EUID:
                case AUDIT_SUID:
                case AUDIT_FSUID:
-               case AUDIT_LOGINUID:
                case AUDIT_OBJ_UID:
-                       /* bit ops not implemented for uid comparisons */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-
                        f->uid = make_kuid(current_user_ns(), f->val);
                        if (!uid_valid(f->uid))
                                goto exit_free;
@@ -479,27 +449,10 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                case AUDIT_SGID:
                case AUDIT_FSGID:
                case AUDIT_OBJ_GID:
-                       /* bit ops not implemented for gid comparisons */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-
                        f->gid = make_kgid(current_user_ns(), f->val);
                        if (!gid_valid(f->gid))
                                goto exit_free;
                        break;
-               case AUDIT_PID:
-               case AUDIT_PERS:
-               case AUDIT_MSGTYPE:
-               case AUDIT_PPID:
-               case AUDIT_DEVMAJOR:
-               case AUDIT_DEVMINOR:
-               case AUDIT_EXIT:
-               case AUDIT_SUCCESS:
-               case AUDIT_ARG0:
-               case AUDIT_ARG1:
-               case AUDIT_ARG2:
-               case AUDIT_ARG3:
-                       break;
                case AUDIT_ARCH:
                        entry->rule.arch_f = f;
                        break;
@@ -570,20 +523,6 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                        entry->rule.buflen += f->val;
                        entry->rule.filterkey = str;
                        break;
-               case AUDIT_PERM:
-                       if (f->val & ~15)
-                               goto exit_free;
-                       break;
-               case AUDIT_FILETYPE:
-                       if (f->val & ~S_IFMT)
-                               goto exit_free;
-                       break;
-               case AUDIT_FIELD_COMPARE:
-                       if (f->val > AUDIT_MAX_FIELD_COMPARE)
-                               goto exit_free;
-                       break;
-               default:
-                       goto exit_free;
                }
        }
 
@@ -613,36 +552,6 @@ static inline size_t audit_pack_string(void **bufp, const char *str)
        return len;
 }
 
-/* Translate kernel rule respresentation to struct audit_rule.
- * Exists for backward compatibility with userspace. */
-static struct audit_rule *audit_krule_to_rule(struct audit_krule *krule)
-{
-       struct audit_rule *rule;
-       int i;
-
-       rule = kzalloc(sizeof(*rule), GFP_KERNEL);
-       if (unlikely(!rule))
-               return NULL;
-
-       rule->flags = krule->flags | krule->listnr;
-       rule->action = krule->action;
-       rule->field_count = krule->field_count;
-       for (i = 0; i < rule->field_count; i++) {
-               rule->values[i] = krule->fields[i].val;
-               rule->fields[i] = krule->fields[i].type;
-
-               if (krule->vers_ops == 1) {
-                       if (krule->fields[i].op == Audit_not_equal)
-                               rule->fields[i] |= AUDIT_NEGATE;
-               } else {
-                       rule->fields[i] |= audit_ops[krule->fields[i].op];
-               }
-       }
-       for (i = 0; i < AUDIT_BITMASK_SIZE; i++) rule->mask[i] = krule->mask[i];
-
-       return rule;
-}
-
 /* Translate kernel rule respresentation to struct audit_rule_data. */
 static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
 {
@@ -1055,35 +964,6 @@ static inline int audit_del_rule(struct audit_entry *entry)
        return ret;
 }
 
-/* List rules using struct audit_rule.  Exists for backward
- * compatibility with userspace. */
-static void audit_list(int pid, int seq, struct sk_buff_head *q)
-{
-       struct sk_buff *skb;
-       struct audit_krule *r;
-       int i;
-
-       /* This is a blocking read, so use audit_filter_mutex instead of rcu
-        * iterator to sync with list writers. */
-       for (i=0; i<AUDIT_NR_FILTERS; i++) {
-               list_for_each_entry(r, &audit_rules_list[i], list) {
-                       struct audit_rule *rule;
-
-                       rule = audit_krule_to_rule(r);
-                       if (unlikely(!rule))
-                               break;
-                       skb = audit_make_reply(pid, seq, AUDIT_LIST, 0, 1,
-                                        rule, sizeof(*rule));
-                       if (skb)
-                               skb_queue_tail(q, skb);
-                       kfree(rule);
-               }
-       }
-       skb = audit_make_reply(pid, seq, AUDIT_LIST, 1, 1, NULL, 0);
-       if (skb)
-               skb_queue_tail(q, skb);
-}
-
 /* List rules using struct audit_rule_data. */
 static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
 {
@@ -1113,11 +993,11 @@ static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
 }
 
 /* Log rule additions and removals */
-static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
-                                 char *action, struct audit_krule *rule,
-                                 int res)
+static void audit_log_rule_change(char *action, struct audit_krule *rule, int res)
 {
        struct audit_buffer *ab;
+       uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(current));
+       u32 sessionid = audit_get_sessionid(current);
 
        if (!audit_enabled)
                return;
@@ -1125,18 +1005,8 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
        if (!ab)
                return;
-       audit_log_format(ab, "auid=%u ses=%u",
-                        from_kuid(&init_user_ns, loginuid), sessionid);
-       if (sid) {
-               char *ctx = NULL;
-               u32 len;
-               if (security_secid_to_secctx(sid, &ctx, &len))
-                       audit_log_format(ab, " ssid=%u", sid);
-               else {
-                       audit_log_format(ab, " subj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
+       audit_log_format(ab, "auid=%u ses=%u" ,loginuid, sessionid);
+       audit_log_task_context(ab);
        audit_log_format(ab, " op=");
        audit_log_string(ab, action);
        audit_log_key(ab, rule->filterkey);
@@ -1151,12 +1021,8 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
  * @seq: netlink audit message sequence (serial) number
  * @data: payload data
  * @datasz: size of payload data
- * @loginuid: loginuid of sender
- * @sessionid: sessionid for netlink audit message
- * @sid: SE Linux Security ID of sender
  */
-int audit_receive_filter(int type, int pid, int seq, void *data,
-                        size_t datasz, kuid_t loginuid, u32 sessionid, u32 sid)
+int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz)
 {
        struct task_struct *tsk;
        struct audit_netlink_list *dest;
@@ -1164,7 +1030,6 @@ int audit_receive_filter(int type, int pid, int seq, void *data,
        struct audit_entry *entry;
 
        switch (type) {
-       case AUDIT_LIST:
        case AUDIT_LIST_RULES:
                /* We can't just spew out the rules here because we might fill
                 * the available socket buffer space and deadlock waiting for
@@ -1179,10 +1044,7 @@ int audit_receive_filter(int type, int pid, int seq, void *data,
                skb_queue_head_init(&dest->q);
 
                mutex_lock(&audit_filter_mutex);
-               if (type == AUDIT_LIST)
-                       audit_list(pid, seq, &dest->q);
-               else
-                       audit_list_rules(pid, seq, &dest->q);
+               audit_list_rules(pid, seq, &dest->q);
                mutex_unlock(&audit_filter_mutex);
 
                tsk = kthread_run(audit_send_list, dest, "audit_send_list");
@@ -1192,35 +1054,23 @@ int audit_receive_filter(int type, int pid, int seq, void *data,
                        err = PTR_ERR(tsk);
                }
                break;
-       case AUDIT_ADD:
        case AUDIT_ADD_RULE:
-               if (type == AUDIT_ADD)
-                       entry = audit_rule_to_entry(data);
-               else
-                       entry = audit_data_to_entry(data, datasz);
+               entry = audit_data_to_entry(data, datasz);
                if (IS_ERR(entry))
                        return PTR_ERR(entry);
 
                err = audit_add_rule(entry);
-               audit_log_rule_change(loginuid, sessionid, sid, "add rule",
-                                     &entry->rule, !err);
-
+               audit_log_rule_change("add rule", &entry->rule, !err);
                if (err)
                        audit_free_rule(entry);
                break;
-       case AUDIT_DEL:
        case AUDIT_DEL_RULE:
-               if (type == AUDIT_DEL)
-                       entry = audit_rule_to_entry(data);
-               else
-                       entry = audit_data_to_entry(data, datasz);
+               entry = audit_data_to_entry(data, datasz);
                if (IS_ERR(entry))
                        return PTR_ERR(entry);
 
                err = audit_del_rule(entry);
-               audit_log_rule_change(loginuid, sessionid, sid, "remove rule",
-                                     &entry->rule, !err);
-
+               audit_log_rule_change("remove rule", &entry->rule, !err);
                audit_free_rule(entry);
                break;
        default:
@@ -1358,7 +1208,7 @@ int audit_compare_dname_path(const char *dname, const char *path, int parentlen)
        return strncmp(p, dname, dlen);
 }
 
-static int audit_filter_user_rules(struct audit_krule *rule,
+static int audit_filter_user_rules(struct audit_krule *rule, int type,
                                   enum audit_state *state)
 {
        int i;
@@ -1382,6 +1232,13 @@ static int audit_filter_user_rules(struct audit_krule *rule,
                        result = audit_uid_comparator(audit_get_loginuid(current),
                                                  f->op, f->uid);
                        break;
+               case AUDIT_LOGINUID_SET:
+                       result = audit_comparator(audit_loginuid_set(current),
+                                                 f->op, f->val);
+                       break;
+               case AUDIT_MSGTYPE:
+                       result = audit_comparator(type, f->op, f->val);
+                       break;
                case AUDIT_SUBJ_USER:
                case AUDIT_SUBJ_ROLE:
                case AUDIT_SUBJ_TYPE:
@@ -1408,7 +1265,7 @@ static int audit_filter_user_rules(struct audit_krule *rule,
        return 1;
 }
 
-int audit_filter_user(void)
+int audit_filter_user(int type)
 {
        enum audit_state state = AUDIT_DISABLED;
        struct audit_entry *e;
@@ -1416,7 +1273,7 @@ int audit_filter_user(void)
 
        rcu_read_lock();
        list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_USER], list) {
-               if (audit_filter_user_rules(&e->rule, &state)) {
+               if (audit_filter_user_rules(&e->rule, type, &state)) {
                        if (state == AUDIT_DISABLED)
                                ret = 0;
                        break;
index c68229411a7c20afbd6903f7272d23afbc0e9ac0..3c8a601324a280fe9224c872f24e2995243da830 100644 (file)
 #define AUDITSC_SUCCESS 1
 #define AUDITSC_FAILURE 2
 
-/* AUDIT_NAMES is the number of slots we reserve in the audit_context
- * for saving names from getname().  If we get more names we will allocate
- * a name dynamically and also add those to the list anchored by names_list. */
-#define AUDIT_NAMES    5
-
 /* no execve audit message should be longer than this (userspace limits) */
 #define MAX_EXECVE_AUDIT_LEN 7500
 
@@ -90,44 +85,6 @@ int audit_n_rules;
 /* determines whether we collect data for signals sent */
 int audit_signals;
 
-struct audit_cap_data {
-       kernel_cap_t            permitted;
-       kernel_cap_t            inheritable;
-       union {
-               unsigned int    fE;             /* effective bit of a file capability */
-               kernel_cap_t    effective;      /* effective set of a process */
-       };
-};
-
-/* When fs/namei.c:getname() is called, we store the pointer in name and
- * we don't let putname() free it (instead we free all of the saved
- * pointers at syscall exit time).
- *
- * Further, in fs/namei.c:path_lookup() we store the inode and device.
- */
-struct audit_names {
-       struct list_head        list;           /* audit_context->names_list */
-       struct filename *name;
-       unsigned long           ino;
-       dev_t                   dev;
-       umode_t                 mode;
-       kuid_t                  uid;
-       kgid_t                  gid;
-       dev_t                   rdev;
-       u32                     osid;
-       struct audit_cap_data    fcap;
-       unsigned int            fcap_ver;
-       int                     name_len;       /* number of name's characters to log */
-       unsigned char           type;           /* record type */
-       bool                    name_put;       /* call __putname() for this name */
-       /*
-        * This was an allocated audit_names and not from the array of
-        * names allocated in the task audit context.  Thus this name
-        * should be freed on syscall exit
-        */
-       bool                    should_free;
-};
-
 struct audit_aux_data {
        struct audit_aux_data   *next;
        int                     type;
@@ -175,106 +132,6 @@ struct audit_tree_refs {
        struct audit_chunk *c[31];
 };
 
-/* The per-task audit context. */
-struct audit_context {
-       int                 dummy;      /* must be the first element */
-       int                 in_syscall; /* 1 if task is in a syscall */
-       enum audit_state    state, current_state;
-       unsigned int        serial;     /* serial number for record */
-       int                 major;      /* syscall number */
-       struct timespec     ctime;      /* time of syscall entry */
-       unsigned long       argv[4];    /* syscall arguments */
-       long                return_code;/* syscall return code */
-       u64                 prio;
-       int                 return_valid; /* return code is valid */
-       /*
-        * The names_list is the list of all audit_names collected during this
-        * syscall.  The first AUDIT_NAMES entries in the names_list will
-        * actually be from the preallocated_names array for performance
-        * reasons.  Except during allocation they should never be referenced
-        * through the preallocated_names array and should only be found/used
-        * by running the names_list.
-        */
-       struct audit_names  preallocated_names[AUDIT_NAMES];
-       int                 name_count; /* total records in names_list */
-       struct list_head    names_list; /* anchor for struct audit_names->list */
-       char *              filterkey;  /* key for rule that triggered record */
-       struct path         pwd;
-       struct audit_aux_data *aux;
-       struct audit_aux_data *aux_pids;
-       struct sockaddr_storage *sockaddr;
-       size_t sockaddr_len;
-                               /* Save things to print about task_struct */
-       pid_t               pid, ppid;
-       kuid_t              uid, euid, suid, fsuid;
-       kgid_t              gid, egid, sgid, fsgid;
-       unsigned long       personality;
-       int                 arch;
-
-       pid_t               target_pid;
-       kuid_t              target_auid;
-       kuid_t              target_uid;
-       unsigned int        target_sessionid;
-       u32                 target_sid;
-       char                target_comm[TASK_COMM_LEN];
-
-       struct audit_tree_refs *trees, *first_trees;
-       struct list_head killed_trees;
-       int tree_count;
-
-       int type;
-       union {
-               struct {
-                       int nargs;
-                       long args[6];
-               } socketcall;
-               struct {
-                       kuid_t                  uid;
-                       kgid_t                  gid;
-                       umode_t                 mode;
-                       u32                     osid;
-                       int                     has_perm;
-                       uid_t                   perm_uid;
-                       gid_t                   perm_gid;
-                       umode_t                 perm_mode;
-                       unsigned long           qbytes;
-               } ipc;
-               struct {
-                       mqd_t                   mqdes;
-                       struct mq_attr          mqstat;
-               } mq_getsetattr;
-               struct {
-                       mqd_t                   mqdes;
-                       int                     sigev_signo;
-               } mq_notify;
-               struct {
-                       mqd_t                   mqdes;
-                       size_t                  msg_len;
-                       unsigned int            msg_prio;
-                       struct timespec         abs_timeout;
-               } mq_sendrecv;
-               struct {
-                       int                     oflag;
-                       umode_t                 mode;
-                       struct mq_attr          attr;
-               } mq_open;
-               struct {
-                       pid_t                   pid;
-                       struct audit_cap_data   cap;
-               } capset;
-               struct {
-                       int                     fd;
-                       int                     flags;
-               } mmap;
-       };
-       int fds[2];
-
-#if AUDIT_DEBUG
-       int                 put_count;
-       int                 ino_count;
-#endif
-};
-
 static inline int open_arg(int flags, int mask)
 {
        int n = ACC_MODE(flags);
@@ -633,9 +490,23 @@ static int audit_filter_rules(struct task_struct *tsk,
                        break;
                case AUDIT_GID:
                        result = audit_gid_comparator(cred->gid, f->op, f->gid);
+                       if (f->op == Audit_equal) {
+                               if (!result)
+                                       result = in_group_p(f->gid);
+                       } else if (f->op == Audit_not_equal) {
+                               if (result)
+                                       result = !in_group_p(f->gid);
+                       }
                        break;
                case AUDIT_EGID:
                        result = audit_gid_comparator(cred->egid, f->op, f->gid);
+                       if (f->op == Audit_equal) {
+                               if (!result)
+                                       result = in_egroup_p(f->gid);
+                       } else if (f->op == Audit_not_equal) {
+                               if (result)
+                                       result = !in_egroup_p(f->gid);
+                       }
                        break;
                case AUDIT_SGID:
                        result = audit_gid_comparator(cred->sgid, f->op, f->gid);
@@ -742,6 +613,9 @@ static int audit_filter_rules(struct task_struct *tsk,
                        if (ctx)
                                result = audit_uid_comparator(tsk->loginuid, f->op, f->uid);
                        break;
+               case AUDIT_LOGINUID_SET:
+                       result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
+                       break;
                case AUDIT_SUBJ_USER:
                case AUDIT_SUBJ_ROLE:
                case AUDIT_SUBJ_TYPE:
@@ -987,6 +861,8 @@ static inline void audit_free_names(struct audit_context *context)
 
 #if AUDIT_DEBUG == 2
        if (context->put_count + context->ino_count != context->name_count) {
+               int i = 0;
+
                printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d"
                       " name_count=%d put_count=%d"
                       " ino_count=%d [NOT freeing]\n",
@@ -995,7 +871,7 @@ static inline void audit_free_names(struct audit_context *context)
                       context->name_count, context->put_count,
                       context->ino_count);
                list_for_each_entry(n, &context->names_list, list) {
-                       printk(KERN_ERR "names[%d] = %p = %s\n", i,
+                       printk(KERN_ERR "names[%d] = %p = %s\n", i++,
                               n->name, n->name->name ?: "(null)");
                }
                dump_stack();
@@ -1010,7 +886,7 @@ static inline void audit_free_names(struct audit_context *context)
        list_for_each_entry_safe(n, next, &context->names_list, list) {
                list_del(&n->list);
                if (n->name && n->name_put)
-                       __putname(n->name);
+                       final_putname(n->name);
                if (n->should_free)
                        kfree(n);
        }
@@ -1093,88 +969,6 @@ static inline void audit_free_context(struct audit_context *context)
        kfree(context);
 }
 
-void audit_log_task_context(struct audit_buffer *ab)
-{
-       char *ctx = NULL;
-       unsigned len;
-       int error;
-       u32 sid;
-
-       security_task_getsecid(current, &sid);
-       if (!sid)
-               return;
-
-       error = security_secid_to_secctx(sid, &ctx, &len);
-       if (error) {
-               if (error != -EINVAL)
-                       goto error_path;
-               return;
-       }
-
-       audit_log_format(ab, " subj=%s", ctx);
-       security_release_secctx(ctx, len);
-       return;
-
-error_path:
-       audit_panic("error in audit_log_task_context");
-       return;
-}
-
-EXPORT_SYMBOL(audit_log_task_context);
-
-void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
-{
-       const struct cred *cred;
-       char name[sizeof(tsk->comm)];
-       struct mm_struct *mm = tsk->mm;
-       char *tty;
-
-       if (!ab)
-               return;
-
-       /* tsk == current */
-       cred = current_cred();
-
-       spin_lock_irq(&tsk->sighand->siglock);
-       if (tsk->signal && tsk->signal->tty)
-               tty = tsk->signal->tty->name;
-       else
-               tty = "(none)";
-       spin_unlock_irq(&tsk->sighand->siglock);
-
-
-       audit_log_format(ab,
-                        " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
-                        " euid=%u suid=%u fsuid=%u"
-                        " egid=%u sgid=%u fsgid=%u ses=%u tty=%s",
-                        sys_getppid(),
-                        tsk->pid,
-                        from_kuid(&init_user_ns, tsk->loginuid),
-                        from_kuid(&init_user_ns, cred->uid),
-                        from_kgid(&init_user_ns, cred->gid),
-                        from_kuid(&init_user_ns, cred->euid),
-                        from_kuid(&init_user_ns, cred->suid),
-                        from_kuid(&init_user_ns, cred->fsuid),
-                        from_kgid(&init_user_ns, cred->egid),
-                        from_kgid(&init_user_ns, cred->sgid),
-                        from_kgid(&init_user_ns, cred->fsgid),
-                        tsk->sessionid, tty);
-
-       get_task_comm(name, tsk);
-       audit_log_format(ab, " comm=");
-       audit_log_untrustedstring(ab, name);
-
-       if (mm) {
-               down_read(&mm->mmap_sem);
-               if (mm->exe_file)
-                       audit_log_d_path(ab, " exe=", &mm->exe_file->f_path);
-               up_read(&mm->mmap_sem);
-       }
-       audit_log_task_context(ab);
-}
-
-EXPORT_SYMBOL(audit_log_task_info);
-
 static int audit_log_pid_context(struct audit_context *context, pid_t pid,
                                 kuid_t auid, kuid_t uid, unsigned int sessionid,
                                 u32 sid, char *comm)
@@ -1191,12 +985,14 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
        audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid,
                         from_kuid(&init_user_ns, auid),
                         from_kuid(&init_user_ns, uid), sessionid);
-       if (security_secid_to_secctx(sid, &ctx, &len)) {
-               audit_log_format(ab, " obj=(none)");
-               rc = 1;
-       } else {
-               audit_log_format(ab, " obj=%s", ctx);
-               security_release_secctx(ctx, len);
+       if (sid) {
+               if (security_secid_to_secctx(sid, &ctx, &len)) {
+                       audit_log_format(ab, " obj=(none)");
+                       rc = 1;
+               } else {
+                       audit_log_format(ab, " obj=%s", ctx);
+                       security_release_secctx(ctx, len);
+               }
        }
        audit_log_format(ab, " ocomm=");
        audit_log_untrustedstring(ab, comm);
@@ -1390,35 +1186,6 @@ static void audit_log_execve_info(struct audit_context *context,
        kfree(buf);
 }
 
-static void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
-{
-       int i;
-
-       audit_log_format(ab, " %s=", prefix);
-       CAP_FOR_EACH_U32(i) {
-               audit_log_format(ab, "%08x", cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
-       }
-}
-
-static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
-{
-       kernel_cap_t *perm = &name->fcap.permitted;
-       kernel_cap_t *inh = &name->fcap.inheritable;
-       int log = 0;
-
-       if (!cap_isclear(*perm)) {
-               audit_log_cap(ab, "cap_fp", perm);
-               log = 1;
-       }
-       if (!cap_isclear(*inh)) {
-               audit_log_cap(ab, "cap_fi", inh);
-               log = 1;
-       }
-
-       if (log)
-               audit_log_format(ab, " cap_fe=%d cap_fver=%x", name->fcap.fE, name->fcap_ver);
-}
-
 static void show_special(struct audit_context *context, int *call_panic)
 {
        struct audit_buffer *ab;
@@ -1516,68 +1283,6 @@ static void show_special(struct audit_context *context, int *call_panic)
        audit_log_end(ab);
 }
 
-static void audit_log_name(struct audit_context *context, struct audit_names *n,
-                          int record_num, int *call_panic)
-{
-       struct audit_buffer *ab;
-       ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
-       if (!ab)
-               return; /* audit_panic has been called */
-
-       audit_log_format(ab, "item=%d", record_num);
-
-       if (n->name) {
-               switch (n->name_len) {
-               case AUDIT_NAME_FULL:
-                       /* log the full path */
-                       audit_log_format(ab, " name=");
-                       audit_log_untrustedstring(ab, n->name->name);
-                       break;
-               case 0:
-                       /* name was specified as a relative path and the
-                        * directory component is the cwd */
-                       audit_log_d_path(ab, " name=", &context->pwd);
-                       break;
-               default:
-                       /* log the name's directory component */
-                       audit_log_format(ab, " name=");
-                       audit_log_n_untrustedstring(ab, n->name->name,
-                                                   n->name_len);
-               }
-       } else
-               audit_log_format(ab, " name=(null)");
-
-       if (n->ino != (unsigned long)-1) {
-               audit_log_format(ab, " inode=%lu"
-                                " dev=%02x:%02x mode=%#ho"
-                                " ouid=%u ogid=%u rdev=%02x:%02x",
-                                n->ino,
-                                MAJOR(n->dev),
-                                MINOR(n->dev),
-                                n->mode,
-                                from_kuid(&init_user_ns, n->uid),
-                                from_kgid(&init_user_ns, n->gid),
-                                MAJOR(n->rdev),
-                                MINOR(n->rdev));
-       }
-       if (n->osid != 0) {
-               char *ctx = NULL;
-               u32 len;
-               if (security_secid_to_secctx(
-                       n->osid, &ctx, &len)) {
-                       audit_log_format(ab, " osid=%u", n->osid);
-                       *call_panic = 2;
-               } else {
-                       audit_log_format(ab, " obj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
-
-       audit_log_fcaps(ab, n);
-
-       audit_log_end(ab);
-}
-
 static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
 {
        int i, call_panic = 0;
@@ -1695,7 +1400,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
 
        i = 0;
        list_for_each_entry(n, &context->names_list, list)
-               audit_log_name(context, n, i++, &call_panic);
+               audit_log_name(context, n, NULL, i++, &call_panic);
 
        /* Send end of event record to help user space know we are finished */
        ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
@@ -2030,18 +1735,18 @@ void audit_putname(struct filename *name)
        BUG_ON(!context);
        if (!context->in_syscall) {
 #if AUDIT_DEBUG == 2
-               printk(KERN_ERR "%s:%d(:%d): __putname(%p)\n",
+               printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n",
                       __FILE__, __LINE__, context->serial, name);
                if (context->name_count) {
                        struct audit_names *n;
-                       int i;
+                       int i = 0;
 
                        list_for_each_entry(n, &context->names_list, list)
-                               printk(KERN_ERR "name[%d] = %p = %s\n", i,
+                               printk(KERN_ERR "name[%d] = %p = %s\n", i++,
                                       n->name, n->name->name ?: "(null)");
                        }
 #endif
-               __putname(name);
+               final_putname(name);
        }
 #if AUDIT_DEBUG
        else {
@@ -2060,41 +1765,6 @@ void audit_putname(struct filename *name)
 #endif
 }
 
-static inline int audit_copy_fcaps(struct audit_names *name, const struct dentry *dentry)
-{
-       struct cpu_vfs_cap_data caps;
-       int rc;
-
-       if (!dentry)
-               return 0;
-
-       rc = get_vfs_caps_from_disk(dentry, &caps);
-       if (rc)
-               return rc;
-
-       name->fcap.permitted = caps.permitted;
-       name->fcap.inheritable = caps.inheritable;
-       name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
-       name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT;
-
-       return 0;
-}
-
-
-/* Copy inode data into an audit_names. */
-static void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
-                            const struct inode *inode)
-{
-       name->ino   = inode->i_ino;
-       name->dev   = inode->i_sb->s_dev;
-       name->mode  = inode->i_mode;
-       name->uid   = inode->i_uid;
-       name->gid   = inode->i_gid;
-       name->rdev  = inode->i_rdev;
-       security_inode_getsecid(inode, &name->osid);
-       audit_copy_fcaps(name, dentry);
-}
-
 /**
  * __audit_inode - store the inode and device from a lookup
  * @name: name being audited
@@ -2303,7 +1973,7 @@ int audit_set_loginuid(kuid_t loginuid)
        unsigned int sessionid;
 
 #ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE
-       if (uid_valid(task->loginuid))
+       if (audit_loginuid_set(task))
                return -EPERM;
 #else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
        if (!capable(CAP_AUDIT_CONTROL))
@@ -2471,17 +2141,20 @@ int __audit_bprm(struct linux_binprm *bprm)
 
 /**
  * audit_socketcall - record audit data for sys_socketcall
- * @nargs: number of args
+ * @nargs: number of args, which should not be more than AUDITSC_ARGS.
  * @args: args array
  *
  */
-void __audit_socketcall(int nargs, unsigned long *args)
+int __audit_socketcall(int nargs, unsigned long *args)
 {
        struct audit_context *context = current->audit_context;
 
+       if (nargs <= 0 || nargs > AUDITSC_ARGS || !args)
+               return -EINVAL;
        context->type = AUDIT_SOCKETCALL;
        context->socketcall.nargs = nargs;
        memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long));
+       return 0;
 }
 
 /**
index 2a9926275f806f41e7c15b6eed584acaf8796bc0..a7c9e6ddb9797a886e96bbff7c75fb8ed9670c68 100644 (file)
@@ -1686,11 +1686,14 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                 */
                cgroup_drop_root(opts.new_root);
 
-               if (((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) &&
-                   root->flags != opts.flags) {
-                       pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
-                       ret = -EINVAL;
-                       goto drop_new_super;
+               if (root->flags != opts.flags) {
+                       if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
+                               pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
+                               ret = -EINVAL;
+                               goto drop_new_super;
+                       } else {
+                               pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
+                       }
                }
 
                /* no subsys rebinding, so refcounts don't change */
@@ -2699,13 +2702,14 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
                goto out;
        }
 
+       cfe->type = (void *)cft;
+       cfe->dentry = dentry;
+       dentry->d_fsdata = cfe;
+       simple_xattrs_init(&cfe->xattrs);
+
        mode = cgroup_file_mode(cft);
        error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb);
        if (!error) {
-               cfe->type = (void *)cft;
-               cfe->dentry = dentry;
-               dentry->d_fsdata = cfe;
-               simple_xattrs_init(&cfe->xattrs);
                list_add_tail(&cfe->node, &parent->files);
                cfe = NULL;
        }
@@ -2953,11 +2957,8 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
        WARN_ON_ONCE(!rcu_read_lock_held());
 
        /* if first iteration, pretend we just visited @cgroup */
-       if (!pos) {
-               if (list_empty(&cgroup->children))
-                       return NULL;
+       if (!pos)
                pos = cgroup;
-       }
 
        /* visit the first child if exists */
        next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling);
@@ -2965,14 +2966,14 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
                return next;
 
        /* no child, visit my or the closest ancestor's next sibling */
-       do {
+       while (pos != cgroup) {
                next = list_entry_rcu(pos->sibling.next, struct cgroup,
                                      sibling);
                if (&next->sibling != &pos->parent->children)
                        return next;
 
                pos = pos->parent;
-       } while (pos != cgroup);
+       }
 
        return NULL;
 }
index 8b86c0c68edf4458e894433a45ac8d5b80f673a0..d5585f5e038ee1080f17777792a6927d5897e5ea 100644 (file)
@@ -40,11 +40,13 @@ __setup("hlt", cpu_idle_nopoll_setup);
 
 static inline int cpu_idle_poll(void)
 {
+       rcu_idle_enter();
        trace_cpu_idle_rcuidle(0, smp_processor_id());
        local_irq_enable();
        while (!need_resched())
                cpu_relax();
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
+       rcu_idle_exit();
        return 1;
 }
 
index 6b41c1899a8b00acc0ca48ae30b0e8dfbdd2ad9d..9dc297faf7c01b68cbb1a7a24444b99c300cb8a3 100644 (file)
@@ -4394,6 +4394,64 @@ perf_event_read_event(struct perf_event *event,
        perf_output_end(&handle);
 }
 
+typedef int  (perf_event_aux_match_cb)(struct perf_event *event, void *data);
+typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
+
+static void
+perf_event_aux_ctx(struct perf_event_context *ctx,
+                  perf_event_aux_match_cb match,
+                  perf_event_aux_output_cb output,
+                  void *data)
+{
+       struct perf_event *event;
+
+       list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+               if (event->state < PERF_EVENT_STATE_INACTIVE)
+                       continue;
+               if (!event_filter_match(event))
+                       continue;
+               if (match(event, data))
+                       output(event, data);
+       }
+}
+
+static void
+perf_event_aux(perf_event_aux_match_cb match,
+              perf_event_aux_output_cb output,
+              void *data,
+              struct perf_event_context *task_ctx)
+{
+       struct perf_cpu_context *cpuctx;
+       struct perf_event_context *ctx;
+       struct pmu *pmu;
+       int ctxn;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               if (cpuctx->unique_pmu != pmu)
+                       goto next;
+               perf_event_aux_ctx(&cpuctx->ctx, match, output, data);
+               if (task_ctx)
+                       goto next;
+               ctxn = pmu->task_ctx_nr;
+               if (ctxn < 0)
+                       goto next;
+               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+               if (ctx)
+                       perf_event_aux_ctx(ctx, match, output, data);
+next:
+               put_cpu_ptr(pmu->pmu_cpu_context);
+       }
+
+       if (task_ctx) {
+               preempt_disable();
+               perf_event_aux_ctx(task_ctx, match, output, data);
+               preempt_enable();
+       }
+       rcu_read_unlock();
+}
+
 /*
  * task tracking -- fork/exit
  *
@@ -4416,8 +4474,9 @@ struct perf_task_event {
 };
 
 static void perf_event_task_output(struct perf_event *event,
-                                    struct perf_task_event *task_event)
+                                  void *data)
 {
+       struct perf_task_event *task_event = data;
        struct perf_output_handle handle;
        struct perf_sample_data sample;
        struct task_struct *task = task_event->task;
@@ -4445,62 +4504,11 @@ static void perf_event_task_output(struct perf_event *event,
        task_event->event_id.header.size = size;
 }
 
-static int perf_event_task_match(struct perf_event *event)
-{
-       if (event->state < PERF_EVENT_STATE_INACTIVE)
-               return 0;
-
-       if (!event_filter_match(event))
-               return 0;
-
-       if (event->attr.comm || event->attr.mmap ||
-           event->attr.mmap_data || event->attr.task)
-               return 1;
-
-       return 0;
-}
-
-static void perf_event_task_ctx(struct perf_event_context *ctx,
-                                 struct perf_task_event *task_event)
+static int perf_event_task_match(struct perf_event *event,
+                                void *data __maybe_unused)
 {
-       struct perf_event *event;
-
-       list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
-               if (perf_event_task_match(event))
-                       perf_event_task_output(event, task_event);
-       }
-}
-
-static void perf_event_task_event(struct perf_task_event *task_event)
-{
-       struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx;
-       struct pmu *pmu;
-       int ctxn;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
-               if (cpuctx->unique_pmu != pmu)
-                       goto next;
-               perf_event_task_ctx(&cpuctx->ctx, task_event);
-
-               ctx = task_event->task_ctx;
-               if (!ctx) {
-                       ctxn = pmu->task_ctx_nr;
-                       if (ctxn < 0)
-                               goto next;
-                       ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
-                       if (ctx)
-                               perf_event_task_ctx(ctx, task_event);
-               }
-next:
-               put_cpu_ptr(pmu->pmu_cpu_context);
-       }
-       if (task_event->task_ctx)
-               perf_event_task_ctx(task_event->task_ctx, task_event);
-
-       rcu_read_unlock();
+       return event->attr.comm || event->attr.mmap ||
+              event->attr.mmap_data || event->attr.task;
 }
 
 static void perf_event_task(struct task_struct *task,
@@ -4531,7 +4539,10 @@ static void perf_event_task(struct task_struct *task,
                },
        };
 
-       perf_event_task_event(&task_event);
+       perf_event_aux(perf_event_task_match,
+                      perf_event_task_output,
+                      &task_event,
+                      task_ctx);
 }
 
 void perf_event_fork(struct task_struct *task)
@@ -4557,8 +4568,9 @@ struct perf_comm_event {
 };
 
 static void perf_event_comm_output(struct perf_event *event,
-                                    struct perf_comm_event *comm_event)
+                                  void *data)
 {
+       struct perf_comm_event *comm_event = data;
        struct perf_output_handle handle;
        struct perf_sample_data sample;
        int size = comm_event->event_id.header.size;
@@ -4585,39 +4597,16 @@ static void perf_event_comm_output(struct perf_event *event,
        comm_event->event_id.header.size = size;
 }
 
-static int perf_event_comm_match(struct perf_event *event)
-{
-       if (event->state < PERF_EVENT_STATE_INACTIVE)
-               return 0;
-
-       if (!event_filter_match(event))
-               return 0;
-
-       if (event->attr.comm)
-               return 1;
-
-       return 0;
-}
-
-static void perf_event_comm_ctx(struct perf_event_context *ctx,
-                                 struct perf_comm_event *comm_event)
+static int perf_event_comm_match(struct perf_event *event,
+                                void *data __maybe_unused)
 {
-       struct perf_event *event;
-
-       list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
-               if (perf_event_comm_match(event))
-                       perf_event_comm_output(event, comm_event);
-       }
+       return event->attr.comm;
 }
 
 static void perf_event_comm_event(struct perf_comm_event *comm_event)
 {
-       struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx;
        char comm[TASK_COMM_LEN];
        unsigned int size;
-       struct pmu *pmu;
-       int ctxn;
 
        memset(comm, 0, sizeof(comm));
        strlcpy(comm, comm_event->task->comm, sizeof(comm));
@@ -4627,24 +4616,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
        comm_event->comm_size = size;
 
        comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
-       rcu_read_lock();
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
-               if (cpuctx->unique_pmu != pmu)
-                       goto next;
-               perf_event_comm_ctx(&cpuctx->ctx, comm_event);
 
-               ctxn = pmu->task_ctx_nr;
-               if (ctxn < 0)
-                       goto next;
-
-               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
-               if (ctx)
-                       perf_event_comm_ctx(ctx, comm_event);
-next:
-               put_cpu_ptr(pmu->pmu_cpu_context);
-       }
-       rcu_read_unlock();
+       perf_event_aux(perf_event_comm_match,
+                      perf_event_comm_output,
+                      comm_event,
+                      NULL);
 }
 
 void perf_event_comm(struct task_struct *task)
@@ -4706,8 +4682,9 @@ struct perf_mmap_event {
 };
 
 static void perf_event_mmap_output(struct perf_event *event,
-                                    struct perf_mmap_event *mmap_event)
+                                  void *data)
 {
+       struct perf_mmap_event *mmap_event = data;
        struct perf_output_handle handle;
        struct perf_sample_data sample;
        int size = mmap_event->event_id.header.size;
@@ -4734,46 +4711,24 @@ static void perf_event_mmap_output(struct perf_event *event,
 }
 
 static int perf_event_mmap_match(struct perf_event *event,
-                                  struct perf_mmap_event *mmap_event,
-                                  int executable)
-{
-       if (event->state < PERF_EVENT_STATE_INACTIVE)
-               return 0;
-
-       if (!event_filter_match(event))
-               return 0;
-
-       if ((!executable && event->attr.mmap_data) ||
-           (executable && event->attr.mmap))
-               return 1;
-
-       return 0;
-}
-
-static void perf_event_mmap_ctx(struct perf_event_context *ctx,
-                                 struct perf_mmap_event *mmap_event,
-                                 int executable)
+                                void *data)
 {
-       struct perf_event *event;
+       struct perf_mmap_event *mmap_event = data;
+       struct vm_area_struct *vma = mmap_event->vma;
+       int executable = vma->vm_flags & VM_EXEC;
 
-       list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
-               if (perf_event_mmap_match(event, mmap_event, executable))
-                       perf_event_mmap_output(event, mmap_event);
-       }
+       return (!executable && event->attr.mmap_data) ||
+              (executable && event->attr.mmap);
 }
 
 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
 {
-       struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx;
        struct vm_area_struct *vma = mmap_event->vma;
        struct file *file = vma->vm_file;
        unsigned int size;
        char tmp[16];
        char *buf = NULL;
        const char *name;
-       struct pmu *pmu;
-       int ctxn;
 
        memset(tmp, 0, sizeof(tmp));
 
@@ -4829,27 +4784,10 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
 
        mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
-               if (cpuctx->unique_pmu != pmu)
-                       goto next;
-               perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
-                                       vma->vm_flags & VM_EXEC);
-
-               ctxn = pmu->task_ctx_nr;
-               if (ctxn < 0)
-                       goto next;
-
-               ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
-               if (ctx) {
-                       perf_event_mmap_ctx(ctx, mmap_event,
-                                       vma->vm_flags & VM_EXEC);
-               }
-next:
-               put_cpu_ptr(pmu->pmu_cpu_context);
-       }
-       rcu_read_unlock();
+       perf_event_aux(perf_event_mmap_match,
+                      perf_event_mmap_output,
+                      mmap_event,
+                      NULL);
 
        kfree(buf);
 }
index 1296e72e4161be305e37606f6024db1a84ff73a3..8241906c4b61a0887304f6524516563707d1a876 100644 (file)
@@ -569,6 +569,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
        int retval = 0;
 
        helper_lock();
+       if (!sub_info->path) {
+               retval = -EINVAL;
+               goto out;
+       }
+
        if (sub_info->path[0] == '\0')
                goto out;
 
index b049939177f6d3a4983341b42eb4719c27b4af62..cab4bce49c23dbe3779d02db8259dda28b7b4258 100644 (file)
@@ -2431,10 +2431,10 @@ static void kmemleak_load_module(const struct module *mod,
        kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
 
        for (i = 1; i < info->hdr->e_shnum; i++) {
-               const char *name = info->secstrings + info->sechdrs[i].sh_name;
-               if (!(info->sechdrs[i].sh_flags & SHF_ALLOC))
-                       continue;
-               if (!strstarts(name, ".data") && !strstarts(name, ".bss"))
+               /* Scan all writable sections that's not executable */
+               if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
+                   !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
+                   (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
                        continue;
 
                kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
@@ -2769,24 +2769,11 @@ static void find_module_sections(struct module *mod, struct load_info *info)
        mod->trace_events = section_objs(info, "_ftrace_events",
                                         sizeof(*mod->trace_events),
                                         &mod->num_trace_events);
-       /*
-        * This section contains pointers to allocated objects in the trace
-        * code and not scanning it leads to false positives.
-        */
-       kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
-                          mod->num_trace_events, GFP_KERNEL);
 #endif
 #ifdef CONFIG_TRACING
        mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
                                         sizeof(*mod->trace_bprintk_fmt_start),
                                         &mod->num_trace_bprintk_fmt);
-       /*
-        * This section contains pointers to allocated objects in the trace
-        * code and not scanning it leads to false positives.
-        */
-       kmemleak_scan_area(mod->trace_bprintk_fmt_start,
-                          sizeof(*mod->trace_bprintk_fmt_start) *
-                          mod->num_trace_bprintk_fmt, GFP_KERNEL);
 #endif
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
        /* sechdrs[0].sh_size is always zero */
index ed35345be536eefb68f1cd585b7c0213f24419b4..53b958fcd639eb2d93d9bed73cffbb9250df7c06 100644 (file)
@@ -613,10 +613,13 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
                       sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1),
                       GFP_KERNEL);
        if (!new) {
-               kfree(mk->mp);
+               kfree(attrs);
                err = -ENOMEM;
                goto fail;
        }
+       /* Despite looking like the typical realloc() bug, this is safe.
+        * We *want* the old 'attrs' to be freed either way, and we'll store
+        * the new one in the success case. */
        attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL);
        if (!attrs) {
                err = -ENOMEM;
index 071b0ab455cb39b81d78b362a86ff6ee86263a55..eb911dbce2679d29fea7226084be73d34a7336dd 100644 (file)
@@ -48,9 +48,11 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
                final_start = min(range[i].start, start);
                final_end = max(range[i].end, end);
 
-               range[i].start = final_start;
-               range[i].end =  final_end;
-               return nr_range;
+               /* clear it and add it back for further merge */
+               range[i].start = 0;
+               range[i].end =  0;
+               return add_range_with_merge(range, az, nr_range,
+                       final_start, final_end);
        }
 
        /* Need to add it: */
index 170814dc418f63a4753b1cfdae39d4d2dbff78cf..3db5a375d8dd52a93f64b48a5d0d7cad5a59559a 100644 (file)
@@ -88,7 +88,7 @@ static void __init rcu_bootup_announce_oddness(void)
 #ifdef CONFIG_RCU_NOCB_CPU
 #ifndef CONFIG_RCU_NOCB_CPU_NONE
        if (!have_rcu_nocb_mask) {
-               alloc_bootmem_cpumask_var(&rcu_nocb_mask);
+               zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
                have_rcu_nocb_mask = true;
        }
 #ifdef CONFIG_RCU_NOCB_CPU_ZERO
@@ -1667,7 +1667,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
        rdtp->last_accelerate = jiffies;
 
        /* Request timer delay depending on laziness, and round. */
-       if (rdtp->all_lazy) {
+       if (!rdtp->all_lazy) {
                *dj = round_up(rcu_idle_gp_delay + jiffies,
                               rcu_idle_gp_delay) - jiffies;
        } else {
index bfd6787b355a13813cb0b19c018d47a46f2bba8a..7078052284fd9eda7bae8a86805054328af7bb4b 100644 (file)
@@ -200,6 +200,7 @@ cond_syscall(sys_perf_event_open);
 /* fanotify! */
 cond_syscall(sys_fanotify_init);
 cond_syscall(sys_fanotify_mark);
+cond_syscall(compat_sys_fanotify_mark);
 
 /* open by handle */
 cond_syscall(sys_name_to_handle_at);
index ebf72358e86aec33c270edd7e8167789fea59ff9..aea4a9ea6fc845b884f84a367589a5945703e761 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/netdevice.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/compat.h>
 
 #ifdef CONFIG_SYSCTL_SYSCALL
 
@@ -1447,7 +1448,6 @@ SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
 
 
 #ifdef CONFIG_COMPAT
-#include <asm/compat.h>
 
 struct compat_sysctl_args {
        compat_uptr_t   name;
@@ -1459,7 +1459,7 @@ struct compat_sysctl_args {
        compat_ulong_t  __unused[4];
 };
 
-asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args)
+COMPAT_SYSCALL_DEFINE1(sysctl, struct compat_sysctl_args __user *, args)
 {
        struct compat_sysctl_args tmp;
        compat_size_t __user *compat_oldlenp;
index e4c07b0692bbba4a79676ffceed77324fd6087bf..70f27e89012b29d18b4aeb9c6e5c367d1b3955a5 100644 (file)
@@ -12,11 +12,6 @@ config CLOCKSOURCE_WATCHDOG
 config ARCH_CLOCKSOURCE_DATA
        bool
 
-# Platforms has a persistent clock
-config ALWAYS_USE_PERSISTENT_CLOCK
-       bool
-       default n
-
 # Timekeeping vsyscall support
 config GENERIC_TIME_VSYSCALL
        bool
index 206bbfb34e091d21f5928a846f4e1a53ab984663..24938d57766948646e567ba4814129d3d74b7112 100644 (file)
@@ -786,11 +786,11 @@ bool tick_broadcast_oneshot_available(void)
 
 void __init tick_broadcast_init(void)
 {
-       alloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
-       alloc_cpumask_var(&tmpmask, GFP_NOWAIT);
+       zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
+       zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
 #ifdef CONFIG_TICK_ONESHOT
-       alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
-       alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
-       alloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
+       zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
+       zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
+       zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
 #endif
 }
index bc67d4245e1d130b523fd36482d50ebe4cbab763..f4208138fbf4cd1a72791f99c31adcbea9180e3e 100644 (file)
@@ -717,6 +717,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
        if (unlikely(!cpu_online(cpu))) {
                if (cpu == tick_do_timer_cpu)
                        tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+               return false;
        }
 
        if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
@@ -1168,7 +1169,7 @@ void tick_cancel_sched_timer(int cpu)
                hrtimer_cancel(&ts->sched_timer);
 # endif
 
-       ts->nohz_mode = NOHZ_MODE_INACTIVE;
+       memset(ts, 0, sizeof(*ts));
 }
 #endif
 
index a860bba34412c0d8047ff4d81c54fb500fa5de9f..15ffdb3f1948b9468c2c04527beb8190b0d79d45 100644 (file)
@@ -1539,12 +1539,12 @@ static int __cpuinit init_timers_cpu(int cpu)
                        boot_done = 1;
                        base = &boot_tvec_bases;
                }
+               spin_lock_init(&base->lock);
                tvec_base_done[cpu] = 1;
        } else {
                base = per_cpu(tvec_bases, cpu);
        }
 
-       spin_lock_init(&base->lock);
 
        for (j = 0; j < TVN_SIZE; j++) {
                INIT_LIST_HEAD(base->tv5.vec + j);
index 5e9efd4b83a47fda4e70825078baabbc090a9a72..015f85aaca08f5f5d6eb55af1f1aab46670bb03b 100644 (file)
@@ -71,6 +71,7 @@ config TRACE_CLOCK
 config RING_BUFFER
        bool
        select TRACE_CLOCK
+       select IRQ_WORK
 
 config FTRACE_NMI_ENTER
        bool
@@ -107,7 +108,6 @@ config TRACING
        select BINARY_PRINTF
        select EVENT_TRACING
        select TRACE_CLOCK
-       select IRQ_WORK
 
 config GENERIC_TRACER
        bool
index 8a5c017bb50c141bfca4d8206ac2a1805d224185..b549b0f5b9771624159d8dbbf1b6a103127a842f 100644 (file)
 
 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define INIT_REGEX_LOCK(opsname)       \
+       .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
+#else
+#define INIT_REGEX_LOCK(opsname)
+#endif
+
 static struct ftrace_ops ftrace_list_end __read_mostly = {
        .func           = ftrace_stub,
        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
@@ -131,6 +138,16 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
        while (likely(op = rcu_dereference_raw((op)->next)) &&  \
               unlikely((op) != &ftrace_list_end))
 
+static inline void ftrace_ops_init(struct ftrace_ops *ops)
+{
+#ifdef CONFIG_DYNAMIC_FTRACE
+       if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
+               mutex_init(&ops->regex_lock);
+               ops->flags |= FTRACE_OPS_FL_INITIALIZED;
+       }
+#endif
+}
+
 /**
  * ftrace_nr_registered_ops - return number of ops registered
  *
@@ -907,7 +924,8 @@ static void unregister_ftrace_profiler(void)
 #else
 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
        .func           = function_profile_call,
-       .flags          = FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(ftrace_profile_ops)
 };
 
 static int register_ftrace_profiler(void)
@@ -1103,11 +1121,10 @@ static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
        .notrace_hash           = EMPTY_HASH,
        .filter_hash            = EMPTY_HASH,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(global_ops)
 };
 
-static DEFINE_MUTEX(ftrace_regex_lock);
-
 struct ftrace_page {
        struct ftrace_page      *next;
        struct dyn_ftrace       *records;
@@ -1247,6 +1264,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
 
 void ftrace_free_filter(struct ftrace_ops *ops)
 {
+       ftrace_ops_init(ops);
        free_ftrace_hash(ops->filter_hash);
        free_ftrace_hash(ops->notrace_hash);
 }
@@ -2441,7 +2459,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                     !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
 
                    ((iter->flags & FTRACE_ITER_ENABLED) &&
-                    !(rec->flags & ~FTRACE_FL_MASK))) {
+                    !(rec->flags & FTRACE_FL_ENABLED))) {
 
                        rec = NULL;
                        goto retry;
@@ -2624,6 +2642,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
        struct ftrace_hash *hash;
        int ret = 0;
 
+       ftrace_ops_init(ops);
+
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
@@ -2636,28 +2656,26 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                return -ENOMEM;
        }
 
+       iter->ops = ops;
+       iter->flags = flag;
+
+       mutex_lock(&ops->regex_lock);
+
        if (flag & FTRACE_ITER_NOTRACE)
                hash = ops->notrace_hash;
        else
                hash = ops->filter_hash;
 
-       iter->ops = ops;
-       iter->flags = flag;
-
        if (file->f_mode & FMODE_WRITE) {
-               mutex_lock(&ftrace_lock);
                iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
-               mutex_unlock(&ftrace_lock);
-
                if (!iter->hash) {
                        trace_parser_put(&iter->parser);
                        kfree(iter);
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto out_unlock;
                }
        }
 
-       mutex_lock(&ftrace_regex_lock);
-
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC))
                ftrace_filter_reset(iter->hash);
@@ -2677,7 +2695,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                }
        } else
                file->private_data = iter;
-       mutex_unlock(&ftrace_regex_lock);
+
+ out_unlock:
+       mutex_unlock(&ops->regex_lock);
 
        return ret;
 }
@@ -2910,6 +2930,8 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
 static struct ftrace_ops trace_probe_ops __read_mostly =
 {
        .func           = function_trace_probe_call,
+       .flags          = FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(trace_probe_ops)
 };
 
 static int ftrace_probe_registered;
@@ -2919,8 +2941,12 @@ static void __enable_ftrace_function_probe(void)
        int ret;
        int i;
 
-       if (ftrace_probe_registered)
+       if (ftrace_probe_registered) {
+               /* still need to update the function call sites */
+               if (ftrace_enabled)
+                       ftrace_run_update_code(FTRACE_UPDATE_CALLS);
                return;
+       }
 
        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
                struct hlist_head *hhd = &ftrace_func_hash[i];
@@ -2990,19 +3016,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        if (WARN_ON(not))
                return -EINVAL;
 
-       mutex_lock(&ftrace_lock);
+       mutex_lock(&trace_probe_ops.regex_lock);
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash) {
                count = -ENOMEM;
-               goto out_unlock;
+               goto out;
        }
 
        if (unlikely(ftrace_disabled)) {
                count = -ENODEV;
-               goto out_unlock;
+               goto out;
        }
 
+       mutex_lock(&ftrace_lock);
+
        do_for_each_ftrace_rec(pg, rec) {
 
                if (!ftrace_match_record(rec, NULL, search, len, type))
@@ -3056,6 +3084,8 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 
  out_unlock:
        mutex_unlock(&ftrace_lock);
+ out:
+       mutex_unlock(&trace_probe_ops.regex_lock);
        free_ftrace_hash(hash);
 
        return count;
@@ -3095,7 +3125,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                        return;
        }
 
-       mutex_lock(&ftrace_lock);
+       mutex_lock(&trace_probe_ops.regex_lock);
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash)
@@ -3133,6 +3163,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                        list_add(&entry->free_list, &free_list);
                }
        }
+       mutex_lock(&ftrace_lock);
        __disable_ftrace_function_probe();
        /*
         * Remove after the disable is called. Otherwise, if the last
@@ -3144,9 +3175,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                list_del(&entry->free_list);
                ftrace_free_entry(entry);
        }
+       mutex_unlock(&ftrace_lock);
                
  out_unlock:
-       mutex_unlock(&ftrace_lock);
+       mutex_unlock(&trace_probe_ops.regex_lock);
        free_ftrace_hash(hash);
 }
 
@@ -3256,18 +3288,17 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
        if (!cnt)
                return 0;
 
-       mutex_lock(&ftrace_regex_lock);
-
-       ret = -ENODEV;
-       if (unlikely(ftrace_disabled))
-               goto out_unlock;
-
        if (file->f_mode & FMODE_READ) {
                struct seq_file *m = file->private_data;
                iter = m->private;
        } else
                iter = file->private_data;
 
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       /* iter->hash is a local copy, so we don't need regex_lock */
+
        parser = &iter->parser;
        read = trace_get_user(parser, ubuf, cnt, ppos);
 
@@ -3276,14 +3307,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
                ret = ftrace_process_regex(iter->hash, parser->buffer,
                                           parser->idx, enable);
                trace_parser_clear(parser);
-               if (ret)
-                       goto out_unlock;
+               if (ret < 0)
+                       goto out;
        }
 
        ret = read;
-out_unlock:
-       mutex_unlock(&ftrace_regex_lock);
-
+ out:
        return ret;
 }
 
@@ -3335,16 +3364,19 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
+       mutex_lock(&ops->regex_lock);
+
        if (enable)
                orig_hash = &ops->filter_hash;
        else
                orig_hash = &ops->notrace_hash;
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
-       if (!hash)
-               return -ENOMEM;
+       if (!hash) {
+               ret = -ENOMEM;
+               goto out_regex_unlock;
+       }
 
-       mutex_lock(&ftrace_regex_lock);
        if (reset)
                ftrace_filter_reset(hash);
        if (buf && !ftrace_match_records(hash, buf, len)) {
@@ -3366,7 +3398,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        mutex_unlock(&ftrace_lock);
 
  out_regex_unlock:
-       mutex_unlock(&ftrace_regex_lock);
+       mutex_unlock(&ops->regex_lock);
 
        free_ftrace_hash(hash);
        return ret;
@@ -3392,6 +3424,7 @@ ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
                         int remove, int reset)
 {
+       ftrace_ops_init(ops);
        return ftrace_set_addr(ops, ip, remove, reset, 1);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
@@ -3416,6 +3449,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
                       int len, int reset)
 {
+       ftrace_ops_init(ops);
        return ftrace_set_regex(ops, buf, len, reset, 1);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter);
@@ -3434,6 +3468,7 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
                        int len, int reset)
 {
+       ftrace_ops_init(ops);
        return ftrace_set_regex(ops, buf, len, reset, 0);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
@@ -3524,6 +3559,8 @@ ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
 {
        char *func;
 
+       ftrace_ops_init(ops);
+
        while (buf) {
                func = strsep(&buf, ",");
                ftrace_set_regex(ops, func, strlen(func), 0, enable);
@@ -3551,10 +3588,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
        int filter_hash;
        int ret;
 
-       mutex_lock(&ftrace_regex_lock);
        if (file->f_mode & FMODE_READ) {
                iter = m->private;
-
                seq_release(inode, file);
        } else
                iter = file->private_data;
@@ -3567,6 +3602,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        trace_parser_put(parser);
 
+       mutex_lock(&iter->ops->regex_lock);
+
        if (file->f_mode & FMODE_WRITE) {
                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
 
@@ -3584,10 +3621,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
                mutex_unlock(&ftrace_lock);
        }
+
+       mutex_unlock(&iter->ops->regex_lock);
        free_ftrace_hash(iter->hash);
        kfree(iter);
 
-       mutex_unlock(&ftrace_regex_lock);
        return 0;
 }
 
@@ -4126,7 +4164,8 @@ void __init ftrace_init(void)
 
 static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(global_ops)
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -4180,8 +4219,9 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
 }
 
 static struct ftrace_ops control_ops = {
-       .func = ftrace_ops_control_func,
-       .flags = FTRACE_OPS_FL_RECURSION_SAFE,
+       .func   = ftrace_ops_control_func,
+       .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(control_ops)
 };
 
 static inline void
@@ -4539,6 +4579,8 @@ int register_ftrace_function(struct ftrace_ops *ops)
 {
        int ret = -1;
 
+       ftrace_ops_init(ops);
+
        mutex_lock(&ftrace_lock);
 
        ret = __register_ftrace_function(ops);
index b59aea2c48c287f5de894efcba7d53c02fd6f279..e444ff88f0a425a00a4921291b46678b6a7efe21 100644 (file)
@@ -620,6 +620,9 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
        if (cpu == RING_BUFFER_ALL_CPUS)
                work = &buffer->irq_work;
        else {
+               if (!cpumask_test_cpu(cpu, buffer->cpumask))
+                       return -EINVAL;
+
                cpu_buffer = buffer->buffers[cpu];
                work = &cpu_buffer->irq_work;
        }
index ae6fa2d1cdf7dd75dd7e9aa4cfb66bcb32192315..4d79485b3237597a3657b3f2b98b4fca1f40d3a6 100644 (file)
@@ -6216,10 +6216,15 @@ __init static int tracer_alloc_buffers(void)
 
        trace_init_cmdlines();
 
-       register_tracer(&nop_trace);
-
+       /*
+        * register_tracer() might reference current_trace, so it
+        * needs to be set before we register anything. This is
+        * just a bootstrap of current_trace anyway.
+        */
        global_trace.current_trace = &nop_trace;
 
+       register_tracer(&nop_trace);
+
        /* All seems OK, enable tracing */
        tracing_disabled = 0;
 
index 53582e982e51673e7294080097e9acc7b2bdf8f2..27963e2bf4bfe141d0884bbe9f05b0d74a7fcc47 100644 (file)
@@ -251,7 +251,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
        switch (enable) {
        case 0:
                /*
-                * When soft_disable is set and enable is cleared, we want
+                * When soft_disable is set and enable is cleared, the sm_ref
+                * reference counter is decremented. If it reaches 0, we want
                 * to clear the SOFT_DISABLED flag but leave the event in the
                 * state that it was. That is, if the event was enabled and
                 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
@@ -263,6 +264,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
                 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
                 */
                if (soft_disable) {
+                       if (atomic_dec_return(&file->sm_ref) > 0)
+                               break;
                        disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
                        clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
                } else
@@ -291,8 +294,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
                 */
                if (!soft_disable)
                        clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
-               else
+               else {
+                       if (atomic_inc_return(&file->sm_ref) > 1)
+                               break;
                        set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
+               }
 
                if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
 
@@ -623,6 +629,8 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
        if (file->flags & FTRACE_EVENT_FL_ENABLED) {
                if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
                        buf = "0*\n";
+               else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
+                       buf = "1*\n";
                else
                        buf = "1\n";
        } else
@@ -1521,6 +1529,24 @@ __register_event(struct ftrace_event_call *call, struct module *mod)
        return 0;
 }
 
+static struct ftrace_event_file *
+trace_create_new_event(struct ftrace_event_call *call,
+                      struct trace_array *tr)
+{
+       struct ftrace_event_file *file;
+
+       file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+       if (!file)
+               return NULL;
+
+       file->event_call = call;
+       file->tr = tr;
+       atomic_set(&file->sm_ref, 0);
+       list_add(&file->list, &tr->events);
+
+       return file;
+}
+
 /* Add an event to a trace directory */
 static int
 __trace_add_new_event(struct ftrace_event_call *call,
@@ -1532,14 +1558,10 @@ __trace_add_new_event(struct ftrace_event_call *call,
 {
        struct ftrace_event_file *file;
 
-       file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+       file = trace_create_new_event(call, tr);
        if (!file)
                return -ENOMEM;
 
-       file->event_call = call;
-       file->tr = tr;
-       list_add(&file->list, &tr->events);
-
        return event_create_dir(tr->event_dir, file, id, enable, filter, format);
 }
 
@@ -1554,14 +1576,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
 {
        struct ftrace_event_file *file;
 
-       file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+       file = trace_create_new_event(call, tr);
        if (!file)
                return -ENOMEM;
 
-       file->event_call = call;
-       file->tr = tr;
-       list_add(&file->list, &tr->events);
-
        return 0;
 }
 
@@ -2054,15 +2072,27 @@ event_enable_func(struct ftrace_hash *hash,
  out_reg:
        /* Don't let event modules unload while probe registered */
        ret = try_module_get(file->event_call->mod);
-       if (!ret)
+       if (!ret) {
+               ret = -EBUSY;
                goto out_free;
+       }
 
        ret = __ftrace_event_enable_disable(file, 1, 1);
        if (ret < 0)
                goto out_put;
        ret = register_ftrace_function_probe(glob, ops, data);
-       if (!ret)
+       /*
+        * The above returns on success the # of functions enabled,
+        * but if it didn't find any functions it returns zero.
+        * Consider no functions a failure too.
+        */
+       if (!ret) {
+               ret = -ENOENT;
+               goto out_disable;
+       } else if (ret < 0)
                goto out_disable;
+       /* Just return zero, not the number of enabled functions */
+       ret = 0;
  out:
        mutex_unlock(&event_mutex);
        return ret;
index a6361178de5ae4e17d0501db852a06ae80b06cf4..e1b653f7e1ca101f0861351d610b5c8297a4b3ca 100644 (file)
@@ -750,7 +750,11 @@ static int filter_set_pred(struct event_filter *filter,
 
 static void __free_preds(struct event_filter *filter)
 {
+       int i;
+
        if (filter->preds) {
+               for (i = 0; i < filter->n_preds; i++)
+                       kfree(filter->preds[i].ops);
                kfree(filter->preds);
                filter->preds = NULL;
        }
index 1865d5f765387f55600d063a633e6a7e79bcae3c..9f46e98ba8f22a712962ccc68e17b34f166439ba 100644 (file)
@@ -27,7 +27,6 @@
 /**
  * Kprobe event core functions
  */
-
 struct trace_probe {
        struct list_head        list;
        struct kretprobe        rp;     /* Use rp.kp for kprobe use */
@@ -36,6 +35,7 @@ struct trace_probe {
        const char              *symbol;        /* symbol name */
        struct ftrace_event_class       class;
        struct ftrace_event_call        call;
+       struct ftrace_event_file * __rcu *files;
        ssize_t                 size;           /* trace entry size */
        unsigned int            nr_args;
        struct probe_arg        args[];
@@ -46,7 +46,7 @@ struct trace_probe {
        (sizeof(struct probe_arg) * (n)))
 
 
-static __kprobes int trace_probe_is_return(struct trace_probe *tp)
+static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
 {
        return tp->rp.handler != NULL;
 }
@@ -183,12 +183,63 @@ static struct trace_probe *find_trace_probe(const char *event,
        return NULL;
 }
 
-/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
-static int enable_trace_probe(struct trace_probe *tp, int flag)
+static int trace_probe_nr_files(struct trace_probe *tp)
+{
+       struct ftrace_event_file **file;
+       int ret = 0;
+
+       /*
+        * Since all tp->files updater is protected by probe_enable_lock,
+        * we don't need to lock an rcu_read_lock.
+        */
+       file = rcu_dereference_raw(tp->files);
+       if (file)
+               while (*(file++))
+                       ret++;
+
+       return ret;
+}
+
+static DEFINE_MUTEX(probe_enable_lock);
+
+/*
+ * Enable trace_probe
+ * if the file is NULL, enable "perf" handler, or enable "trace" handler.
+ */
+static int
+enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
 {
        int ret = 0;
 
-       tp->flags |= flag;
+       mutex_lock(&probe_enable_lock);
+
+       if (file) {
+               struct ftrace_event_file **new, **old;
+               int n = trace_probe_nr_files(tp);
+
+               old = rcu_dereference_raw(tp->files);
+               /* 1 is for new one and 1 is for stopper */
+               new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
+                             GFP_KERNEL);
+               if (!new) {
+                       ret = -ENOMEM;
+                       goto out_unlock;
+               }
+               memcpy(new, old, n * sizeof(struct ftrace_event_file *));
+               new[n] = file;
+               /* The last one keeps a NULL */
+
+               rcu_assign_pointer(tp->files, new);
+               tp->flags |= TP_FLAG_TRACE;
+
+               if (old) {
+                       /* Make sure the probe is done with old files */
+                       synchronize_sched();
+                       kfree(old);
+               }
+       } else
+               tp->flags |= TP_FLAG_PROFILE;
+
        if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
            !trace_probe_has_gone(tp)) {
                if (trace_probe_is_return(tp))
@@ -197,19 +248,90 @@ static int enable_trace_probe(struct trace_probe *tp, int flag)
                        ret = enable_kprobe(&tp->rp.kp);
        }
 
+ out_unlock:
+       mutex_unlock(&probe_enable_lock);
+
        return ret;
 }
 
-/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
-static void disable_trace_probe(struct trace_probe *tp, int flag)
+static int
+trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
+{
+       struct ftrace_event_file **files;
+       int i;
+
+       /*
+        * Since all tp->files updater is protected by probe_enable_lock,
+        * we don't need to lock an rcu_read_lock.
+        */
+       files = rcu_dereference_raw(tp->files);
+       if (files) {
+               for (i = 0; files[i]; i++)
+                       if (files[i] == file)
+                               return i;
+       }
+
+       return -1;
+}
+
+/*
+ * Disable trace_probe
+ * if the file is NULL, disable "perf" handler, or disable "trace" handler.
+ */
+static int
+disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
 {
-       tp->flags &= ~flag;
+       int ret = 0;
+
+       mutex_lock(&probe_enable_lock);
+
+       if (file) {
+               struct ftrace_event_file **new, **old;
+               int n = trace_probe_nr_files(tp);
+               int i, j;
+
+               old = rcu_dereference_raw(tp->files);
+               if (n == 0 || trace_probe_file_index(tp, file) < 0) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+
+               if (n == 1) {   /* Remove the last file */
+                       tp->flags &= ~TP_FLAG_TRACE;
+                       new = NULL;
+               } else {
+                       new = kzalloc(n * sizeof(struct ftrace_event_file *),
+                                     GFP_KERNEL);
+                       if (!new) {
+                               ret = -ENOMEM;
+                               goto out_unlock;
+                       }
+
+                       /* This copy & check loop copies the NULL stopper too */
+                       for (i = 0, j = 0; j < n && i < n + 1; i++)
+                               if (old[i] != file)
+                                       new[j++] = old[i];
+               }
+
+               rcu_assign_pointer(tp->files, new);
+
+               /* Make sure the probe is done with old files */
+               synchronize_sched();
+               kfree(old);
+       } else
+               tp->flags &= ~TP_FLAG_PROFILE;
+
        if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
                if (trace_probe_is_return(tp))
                        disable_kretprobe(&tp->rp);
                else
                        disable_kprobe(&tp->rp.kp);
        }
+
+ out_unlock:
+       mutex_unlock(&probe_enable_lock);
+
+       return ret;
 }
 
 /* Internal register function - just handle k*probes and flags */
@@ -723,9 +845,10 @@ static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
 }
 
 /* Kprobe handler */
-static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
+static __kprobes void
+__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
+                   struct ftrace_event_file *ftrace_file)
 {
-       struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
        struct kprobe_trace_entry_head *entry;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
@@ -733,7 +856,10 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
        unsigned long irq_flags;
        struct ftrace_event_call *call = &tp->call;
 
-       tp->nhit++;
+       WARN_ON(call != ftrace_file->event_call);
+
+       if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
+               return;
 
        local_save_flags(irq_flags);
        pc = preempt_count();
@@ -741,13 +867,14 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
        dsize = __get_data_size(tp, regs);
        size = sizeof(*entry) + tp->size + dsize;
 
-       event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
-                                                 size, irq_flags, pc);
+       event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
+                                               call->event.type,
+                                               size, irq_flags, pc);
        if (!event)
                return;
 
        entry = ring_buffer_event_data(event);
-       entry->ip = (unsigned long)kp->addr;
+       entry->ip = (unsigned long)tp->rp.kp.addr;
        store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
        if (!filter_current_check_discard(buffer, call, entry, event))
@@ -755,11 +882,31 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
                                                irq_flags, pc, regs);
 }
 
+static __kprobes void
+kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
+{
+       /*
+        * Note: preempt is already disabled around the kprobe handler.
+        * However, we still need an smp_read_barrier_depends() corresponding
+        * to smp_wmb() in rcu_assign_pointer() to access the pointer.
+        */
+       struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
+
+       if (unlikely(!file))
+               return;
+
+       while (*file) {
+               __kprobe_trace_func(tp, regs, *file);
+               file++;
+       }
+}
+
 /* Kretprobe handler */
-static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
-                                         struct pt_regs *regs)
+static __kprobes void
+__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
+                      struct pt_regs *regs,
+                      struct ftrace_event_file *ftrace_file)
 {
-       struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
        struct kretprobe_trace_entry_head *entry;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
@@ -767,14 +914,20 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
        unsigned long irq_flags;
        struct ftrace_event_call *call = &tp->call;
 
+       WARN_ON(call != ftrace_file->event_call);
+
+       if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
+               return;
+
        local_save_flags(irq_flags);
        pc = preempt_count();
 
        dsize = __get_data_size(tp, regs);
        size = sizeof(*entry) + tp->size + dsize;
 
-       event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
-                                                 size, irq_flags, pc);
+       event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
+                                               call->event.type,
+                                               size, irq_flags, pc);
        if (!event)
                return;
 
@@ -788,8 +941,28 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
                                                irq_flags, pc, regs);
 }
 
+static __kprobes void
+kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
+                    struct pt_regs *regs)
+{
+       /*
+        * Note: preempt is already disabled around the kprobe handler.
+        * However, we still need an smp_read_barrier_depends() corresponding
+        * to smp_wmb() in rcu_assign_pointer() to access the pointer.
+        */
+       struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
+
+       if (unlikely(!file))
+               return;
+
+       while (*file) {
+               __kretprobe_trace_func(tp, ri, regs, *file);
+               file++;
+       }
+}
+
 /* Event entry printers */
-enum print_line_t
+static enum print_line_t
 print_kprobe_event(struct trace_iterator *iter, int flags,
                   struct trace_event *event)
 {
@@ -825,7 +998,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags,
        return TRACE_TYPE_PARTIAL_LINE;
 }
 
-enum print_line_t
+static enum print_line_t
 print_kretprobe_event(struct trace_iterator *iter, int flags,
                      struct trace_event *event)
 {
@@ -975,10 +1148,9 @@ static int set_print_fmt(struct trace_probe *tp)
 #ifdef CONFIG_PERF_EVENTS
 
 /* Kprobe profile handler */
-static __kprobes void kprobe_perf_func(struct kprobe *kp,
-                                        struct pt_regs *regs)
+static __kprobes void
+kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
 {
-       struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
        struct ftrace_event_call *call = &tp->call;
        struct kprobe_trace_entry_head *entry;
        struct hlist_head *head;
@@ -997,7 +1169,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
        if (!entry)
                return;
 
-       entry->ip = (unsigned long)kp->addr;
+       entry->ip = (unsigned long)tp->rp.kp.addr;
        memset(&entry[1], 0, dsize);
        store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
@@ -1007,10 +1179,10 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
 }
 
 /* Kretprobe profile handler */
-static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
-                                           struct pt_regs *regs)
+static __kprobes void
+kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
+                   struct pt_regs *regs)
 {
-       struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
        struct ftrace_event_call *call = &tp->call;
        struct kretprobe_trace_entry_head *entry;
        struct hlist_head *head;
@@ -1044,20 +1216,19 @@ int kprobe_register(struct ftrace_event_call *event,
                    enum trace_reg type, void *data)
 {
        struct trace_probe *tp = (struct trace_probe *)event->data;
+       struct ftrace_event_file *file = data;
 
        switch (type) {
        case TRACE_REG_REGISTER:
-               return enable_trace_probe(tp, TP_FLAG_TRACE);
+               return enable_trace_probe(tp, file);
        case TRACE_REG_UNREGISTER:
-               disable_trace_probe(tp, TP_FLAG_TRACE);
-               return 0;
+               return disable_trace_probe(tp, file);
 
 #ifdef CONFIG_PERF_EVENTS
        case TRACE_REG_PERF_REGISTER:
-               return enable_trace_probe(tp, TP_FLAG_PROFILE);
+               return enable_trace_probe(tp, NULL);
        case TRACE_REG_PERF_UNREGISTER:
-               disable_trace_probe(tp, TP_FLAG_PROFILE);
-               return 0;
+               return disable_trace_probe(tp, NULL);
        case TRACE_REG_PERF_OPEN:
        case TRACE_REG_PERF_CLOSE:
        case TRACE_REG_PERF_ADD:
@@ -1073,11 +1244,13 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
 {
        struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
 
+       tp->nhit++;
+
        if (tp->flags & TP_FLAG_TRACE)
-               kprobe_trace_func(kp, regs);
+               kprobe_trace_func(tp, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tp->flags & TP_FLAG_PROFILE)
-               kprobe_perf_func(kp, regs);
+               kprobe_perf_func(tp, regs);
 #endif
        return 0;       /* We don't tweek kernel, so just return 0 */
 }
@@ -1087,11 +1260,13 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
        struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
 
+       tp->nhit++;
+
        if (tp->flags & TP_FLAG_TRACE)
-               kretprobe_trace_func(ri, regs);
+               kretprobe_trace_func(tp, ri, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tp->flags & TP_FLAG_PROFILE)
-               kretprobe_perf_func(ri, regs);
+               kretprobe_perf_func(tp, ri, regs);
 #endif
        return 0;       /* We don't tweek kernel, so just return 0 */
 }
@@ -1189,11 +1364,24 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
        return a1 + a2 + a3 + a4 + a5 + a6;
 }
 
+static struct ftrace_event_file *
+find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
+{
+       struct ftrace_event_file *file;
+
+       list_for_each_entry(file, &tr->events, list)
+               if (file->event_call == &tp->call)
+                       return file;
+
+       return NULL;
+}
+
 static __init int kprobe_trace_self_tests_init(void)
 {
        int ret, warn = 0;
        int (*target)(int, int, int, int, int, int);
        struct trace_probe *tp;
+       struct ftrace_event_file *file;
 
        target = kprobe_trace_selftest_target;
 
@@ -1203,31 +1391,43 @@ static __init int kprobe_trace_self_tests_init(void)
                                  "$stack $stack0 +0($stack)",
                                  create_trace_probe);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error on probing function entry.\n");
+               pr_warn("error on probing function entry.\n");
                warn++;
        } else {
                /* Enable trace point */
                tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
                if (WARN_ON_ONCE(tp == NULL)) {
-                       pr_warning("error on getting new probe.\n");
+                       pr_warn("error on getting new probe.\n");
                        warn++;
-               } else
-                       enable_trace_probe(tp, TP_FLAG_TRACE);
+               } else {
+                       file = find_trace_probe_file(tp, top_trace_array());
+                       if (WARN_ON_ONCE(file == NULL)) {
+                               pr_warn("error on getting probe file.\n");
+                               warn++;
+                       } else
+                               enable_trace_probe(tp, file);
+               }
        }
 
        ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
                                  "$retval", create_trace_probe);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error on probing function return.\n");
+               pr_warn("error on probing function return.\n");
                warn++;
        } else {
                /* Enable trace point */
                tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
                if (WARN_ON_ONCE(tp == NULL)) {
-                       pr_warning("error on getting new probe.\n");
+                       pr_warn("error on getting 2nd new probe.\n");
                        warn++;
-               } else
-                       enable_trace_probe(tp, TP_FLAG_TRACE);
+               } else {
+                       file = find_trace_probe_file(tp, top_trace_array());
+                       if (WARN_ON_ONCE(file == NULL)) {
+                               pr_warn("error on getting probe file.\n");
+                               warn++;
+                       } else
+                               enable_trace_probe(tp, file);
+               }
        }
 
        if (warn)
@@ -1238,27 +1438,39 @@ static __init int kprobe_trace_self_tests_init(void)
        /* Disable trace points before removing it */
        tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
        if (WARN_ON_ONCE(tp == NULL)) {
-               pr_warning("error on getting test probe.\n");
+               pr_warn("error on getting test probe.\n");
                warn++;
-       } else
-               disable_trace_probe(tp, TP_FLAG_TRACE);
+       } else {
+               file = find_trace_probe_file(tp, top_trace_array());
+               if (WARN_ON_ONCE(file == NULL)) {
+                       pr_warn("error on getting probe file.\n");
+                       warn++;
+               } else
+                       disable_trace_probe(tp, file);
+       }
 
        tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
        if (WARN_ON_ONCE(tp == NULL)) {
-               pr_warning("error on getting 2nd test probe.\n");
+               pr_warn("error on getting 2nd test probe.\n");
                warn++;
-       } else
-               disable_trace_probe(tp, TP_FLAG_TRACE);
+       } else {
+               file = find_trace_probe_file(tp, top_trace_array());
+               if (WARN_ON_ONCE(file == NULL)) {
+                       pr_warn("error on getting probe file.\n");
+                       warn++;
+               } else
+                       disable_trace_probe(tp, file);
+       }
 
        ret = traceprobe_command("-:testprobe", create_trace_probe);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error on deleting a probe.\n");
+               pr_warn("error on deleting a probe.\n");
                warn++;
        }
 
        ret = traceprobe_command("-:testprobe2", create_trace_probe);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error on deleting a probe.\n");
+               pr_warn("error on deleting a probe.\n");
                warn++;
        }
 
index 4aa9f5bc6b2dfdd9f152f5d20bf06c15f070aa21..ee8e29a2320c7c76d67df8f4816af52cd9da5f68 100644 (file)
@@ -296,7 +296,7 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
 
 struct workqueue_struct *system_wq __read_mostly;
-EXPORT_SYMBOL_GPL(system_wq);
+EXPORT_SYMBOL(system_wq);
 struct workqueue_struct *system_highpri_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_highpri_wq);
 struct workqueue_struct *system_long_wq __read_mostly;
@@ -1411,7 +1411,7 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
        local_irq_restore(flags);
        return ret;
 }
-EXPORT_SYMBOL_GPL(queue_work_on);
+EXPORT_SYMBOL(queue_work_on);
 
 void delayed_work_timer_fn(unsigned long __data)
 {
@@ -1485,7 +1485,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
        local_irq_restore(flags);
        return ret;
 }
-EXPORT_SYMBOL_GPL(queue_delayed_work_on);
+EXPORT_SYMBOL(queue_delayed_work_on);
 
 /**
  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
@@ -2059,6 +2059,7 @@ static bool manage_workers(struct worker *worker)
        if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
                spin_unlock_irq(&pool->lock);
                mutex_lock(&pool->manager_mutex);
+               spin_lock_irq(&pool->lock);
                ret = true;
        }
 
@@ -4311,6 +4312,12 @@ bool current_is_workqueue_rescuer(void)
  * no synchronization around this function and the test result is
  * unreliable and only useful as advisory hints or for debugging.
  *
+ * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
+ * Note that both per-cpu and unbound workqueues may be associated with
+ * multiple pool_workqueues which have separate congested states.  A
+ * workqueue being congested on one CPU doesn't mean the workqueue is also
+ * contested on other CPUs / NUMA nodes.
+ *
  * RETURNS:
  * %true if congested, %false otherwise.
  */
@@ -4321,6 +4328,9 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
 
        rcu_read_lock_sched();
 
+       if (cpu == WORK_CPU_UNBOUND)
+               cpu = smp_processor_id();
+
        if (!(wq->flags & WQ_UNBOUND))
                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
        else
@@ -4895,7 +4905,8 @@ static void __init wq_numa_init(void)
        BUG_ON(!tbl);
 
        for_each_node(node)
-               BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node));
+               BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
+                               node_online(node) ? node : NUMA_NO_NODE));
 
        for_each_possible_cpu(cpu) {
                node = cpu_to_node(cpu);
index e9c52e1b853a658c0b99580e092023d7f53a341e..c55a037a354eb9ffbd8f492f8e07c92e3aa0b941 100644 (file)
@@ -23,7 +23,7 @@ lib-y += kobject.o klist.o
 
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
         bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
-        gcd.o lcm.o list_sort.o uuid.o flex_array.o \
+        gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o \
         bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
diff --git a/lib/iovec.c b/lib/iovec.c
new file mode 100644 (file)
index 0000000..454baa8
--- /dev/null
@@ -0,0 +1,53 @@
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/uio.h>
+
+/*
+ *     Copy iovec to kernel. Returns -EFAULT on error.
+ *
+ *     Note: this modifies the original iovec.
+ */
+
+int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
+{
+       while (len > 0) {
+               if (iov->iov_len) {
+                       int copy = min_t(unsigned int, len, iov->iov_len);
+                       if (copy_from_user(kdata, iov->iov_base, copy))
+                               return -EFAULT;
+                       len -= copy;
+                       kdata += copy;
+                       iov->iov_base += copy;
+                       iov->iov_len -= copy;
+               }
+               iov++;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(memcpy_fromiovec);
+
+/*
+ *     Copy kernel to iovec. Returns -EFAULT on error.
+ *
+ *     Note: this modifies the original iovec.
+ */
+
+int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
+{
+       while (len > 0) {
+               if (iov->iov_len) {
+                       int copy = min_t(unsigned int, iov->iov_len, len);
+                       if (copy_to_user(iov->iov_base, kdata, copy))
+                               return -EFAULT;
+                       kdata += copy;
+                       len -= copy;
+                       iov->iov_len -= copy;
+                       iov->iov_base += copy;
+               }
+               iov++;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(memcpy_toiovec);
index 0874e41609a6fbf4d786f3b49aa76f85130893f7..358a368a2947057ef9d9309c9dd4fc893523d63d 100644 (file)
@@ -193,10 +193,10 @@ static void klist_release(struct kref *kref)
                if (waiter->node != n)
                        continue;
 
+               list_del(&waiter->list);
                waiter->woken = 1;
                mb();
                wake_up_process(waiter->process);
-               list_del(&waiter->list);
        }
        spin_unlock(&klist_remove_lock);
        knode_set_klist(n, NULL);
index 095ab157a5215a800d01585bbb50b0d6b6a88a17..d411355f238e2088247f0cbf51631c77ae78b10e 100644 (file)
@@ -318,7 +318,8 @@ extern UDItype __udiv_qrnnd();
             "rM" ((USItype)(bh)), \
             "rM" ((USItype)(al)), \
             "rM" ((USItype)(bl)))
-#if defined(_PA_RISC1_1)
+#if 0 && defined(_PA_RISC1_1)
+/* xmpyu uses floating point register which is not allowed in Linux kernel. */
 #define umul_ppmm(wh, wl, u, v) \
 do { \
        union {UDItype __ll; \
@@ -337,7 +338,7 @@ do { \
 #define UMUL_TIME 40
 #define UDIV_TIME 80
 #endif
-#ifndef LONGLONG_STANDALONE
+#if 0 /* #ifndef LONGLONG_STANDALONE */
 #define udiv_qrnnd(q, r, n1, n0, d) \
 do { USItype __r; \
        (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
index 03a89a2f464bef283770e84ad7186a5cc0915924..362c329b83fe7441b4d2119c1e164a54c58fc860 100644 (file)
@@ -2325,7 +2325,12 @@ static void collapse_huge_page(struct mm_struct *mm,
                pte_unmap(pte);
                spin_lock(&mm->page_table_lock);
                BUG_ON(!pmd_none(*pmd));
-               set_pmd_at(mm, address, pmd, _pmd);
+               /*
+                * We can only use set_pmd_at when establishing
+                * hugepmds and never for establishing regular pmds that
+                * points to regular pagetables. Use pmd_populate for that
+                */
+               pmd_populate(mm, pmd, pmd_pgtable(_pmd));
                spin_unlock(&mm->page_table_lock);
                anon_vma_unlock_write(vma->anon_vma);
                goto out;
index cb1c9dedf9b65c08a4a6d9d6f81ee01f2cb44c36..010d6c14129ae320ab9655a573d49af33e034659 100644 (file)
@@ -4108,8 +4108,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
        if (mem_cgroup_disabled())
                return NULL;
 
-       VM_BUG_ON(PageSwapCache(page));
-
        if (PageTransHuge(page)) {
                nr_pages <<= compound_order(page);
                VM_BUG_ON(!PageTransHuge(page));
@@ -4205,6 +4203,18 @@ void mem_cgroup_uncharge_page(struct page *page)
        if (page_mapped(page))
                return;
        VM_BUG_ON(page->mapping && !PageAnon(page));
+       /*
+        * If the page is in swap cache, uncharge should be deferred
+        * to the swap path, which also properly accounts swap usage
+        * and handles memcg lifetime.
+        *
+        * Note that this check is not stable and reclaim may add the
+        * page to swap cache at any time after this.  However, if the
+        * page is not in swap cache by the time page->mapcount hits
+        * 0, there won't be any page table references to the swap
+        * slot, and reclaim will free it and not actually write the
+        * page to disk.
+        */
        if (PageSwapCache(page))
                return;
        __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
index 6dc1882fbd725c61badb4fd072418c7330001ce1..61a262b08e53efa2f69c1387e488bea6f87bb0f9 100644 (file)
@@ -220,7 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
        tlb->start      = -1UL;
        tlb->end        = 0;
        tlb->need_flush = 0;
-       tlb->fast_mode  = (num_possible_cpus() == 1);
        tlb->local.next = NULL;
        tlb->local.nr   = 0;
        tlb->local.max  = ARRAY_SIZE(tlb->__pages);
@@ -244,9 +243,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_table_flush(tlb);
 #endif
 
-       if (tlb_fast_mode(tlb))
-               return;
-
        for (batch = &tlb->local; batch; batch = batch->next) {
                free_pages_and_swap_cache(batch->pages, batch->nr);
                batch->nr = 0;
@@ -288,11 +284,6 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 
        VM_BUG_ON(!tlb->need_flush);
 
-       if (tlb_fast_mode(tlb)) {
-               free_page_and_swap_cache(page);
-               return 1; /* avoid calling tlb_flush_mmu() */
-       }
-
        batch = tlb->active;
        batch->pages[batch->nr++] = page;
        if (batch->nr == batch->max) {
index a221fac1f47d39aef56278758765c023a7981796..1ad92b46753edfe8d9f54fae81c5f3aa15b34110 100644 (file)
@@ -720,9 +720,12 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
        start = phys_start_pfn << PAGE_SHIFT;
        size = nr_pages * PAGE_SIZE;
        ret = release_mem_region_adjustable(&iomem_resource, start, size);
-       if (ret)
-               pr_warn("Unable to release resource <%016llx-%016llx> (%d)\n",
-                               start, start + size - 1, ret);
+       if (ret) {
+               resource_size_t endres = start + size - 1;
+
+               pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
+                               &start, &endres, ret);
+       }
 
        sections_to_remove = nr_pages / PAGES_PER_SECTION;
        for (i = 0; i < sections_to_remove; i++) {
index 27ed22579fd97a21171b952deb78316438e69297..b1f57501de9c8f4f0705b30fef748c4c3f145e86 100644 (file)
@@ -165,7 +165,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
                pte = arch_make_huge_pte(pte, vma, new, 0);
        }
 #endif
-       flush_cache_page(vma, addr, pte_pfn(pte));
+       flush_dcache_page(new);
        set_pte_at(mm, addr, ptep, pte);
 
        if (PageHuge(new)) {
index be04122fb277acd6a43a2f020e184ed2065258df..6725ff183374280ac9a5dcc786cd42f91d3cf669 100644 (file)
@@ -40,48 +40,44 @@ void __mmu_notifier_release(struct mm_struct *mm)
        int id;
 
        /*
-        * srcu_read_lock() here will block synchronize_srcu() in
-        * mmu_notifier_unregister() until all registered
-        * ->release() callouts this function makes have
-        * returned.
+        * SRCU here will block mmu_notifier_unregister until
+        * ->release returns.
         */
        id = srcu_read_lock(&srcu);
+       hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
+               /*
+                * If ->release runs before mmu_notifier_unregister it must be
+                * handled, as it's the only way for the driver to flush all
+                * existing sptes and stop the driver from establishing any more
+                * sptes before all the pages in the mm are freed.
+                */
+               if (mn->ops->release)
+                       mn->ops->release(mn, mm);
+       srcu_read_unlock(&srcu, id);
+
        spin_lock(&mm->mmu_notifier_mm->lock);
        while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
                mn = hlist_entry(mm->mmu_notifier_mm->list.first,
                                 struct mmu_notifier,
                                 hlist);
-
                /*
-                * Unlink.  This will prevent mmu_notifier_unregister()
-                * from also making the ->release() callout.
+                * We arrived before mmu_notifier_unregister so
+                * mmu_notifier_unregister will do nothing other than to wait
+                * for ->release to finish and for mmu_notifier_unregister to
+                * return.
                 */
                hlist_del_init_rcu(&mn->hlist);
-               spin_unlock(&mm->mmu_notifier_mm->lock);
-
-               /*
-                * Clear sptes. (see 'release' description in mmu_notifier.h)
-                */
-               if (mn->ops->release)
-                       mn->ops->release(mn, mm);
-
-               spin_lock(&mm->mmu_notifier_mm->lock);
        }
        spin_unlock(&mm->mmu_notifier_mm->lock);
 
        /*
-        * All callouts to ->release() which we have done are complete.
-        * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
-        */
-       srcu_read_unlock(&srcu, id);
-
-       /*
-        * mmu_notifier_unregister() may have unlinked a notifier and may
-        * still be calling out to it.  Additionally, other notifiers
-        * may have been active via vmtruncate() et. al. Block here
-        * to ensure that all notifier callouts for this mm have been
-        * completed and the sptes are really cleaned up before returning
-        * to exit_mmap().
+        * synchronize_srcu here prevents mmu_notifier_release from returning to
+        * exit_mmap (which would proceed with freeing all pages in the mm)
+        * until the ->release method returns, if it was invoked by
+        * mmu_notifier_unregister.
+        *
+        * The mmu_notifier_mm can't go away from under us because one mm_count
+        * is held by exit_mmap.
         */
        synchronize_srcu(&srcu);
 }
@@ -292,31 +288,34 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
 {
        BUG_ON(atomic_read(&mm->mm_count) <= 0);
 
-       spin_lock(&mm->mmu_notifier_mm->lock);
        if (!hlist_unhashed(&mn->hlist)) {
+               /*
+                * SRCU here will force exit_mmap to wait for ->release to
+                * finish before freeing the pages.
+                */
                int id;
 
+               id = srcu_read_lock(&srcu);
                /*
-                * Ensure we synchronize up with __mmu_notifier_release().
+                * exit_mmap will block in mmu_notifier_release to guarantee
+                * that ->release is called before freeing the pages.
                 */
-               id = srcu_read_lock(&srcu);
-
-               hlist_del_rcu(&mn->hlist);
-               spin_unlock(&mm->mmu_notifier_mm->lock);
-
                if (mn->ops->release)
                        mn->ops->release(mn, mm);
+               srcu_read_unlock(&srcu, id);
 
+               spin_lock(&mm->mmu_notifier_mm->lock);
                /*
-                * Allow __mmu_notifier_release() to complete.
+                * Can not use list_del_rcu() since __mmu_notifier_release
+                * can delete it before we hold the lock.
                 */
-               srcu_read_unlock(&srcu, id);
-       } else
+               hlist_del_init_rcu(&mn->hlist);
                spin_unlock(&mm->mmu_notifier_mm->lock);
+       }
 
        /*
-        * Wait for any running method to finish, including ->release() if it
-        * was run by __mmu_notifier_release() instead of us.
+        * Wait for any running method to finish, of course including
+        * ->release if it was run by mmu_notifier_relase instead of us.
         */
        synchronize_srcu(&srcu);
 
index 98cbdf6e553217a87b62daac80d26c026ecf60e3..378a15bcd64940cd4bab29335604f757543839be 100644 (file)
@@ -5158,7 +5158,7 @@ unsigned long free_reserved_area(unsigned long start, unsigned long end,
        for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) {
                if (poison)
                        memset((void *)pos, poison, PAGE_SIZE);
-               free_reserved_page(virt_to_page(pos));
+               free_reserved_page(virt_to_page((void *)pos));
        }
 
        if (pages && s)
index 35aa294656cd812d2773fede0d51179a8b7ee09a..5da2cbcfdbb56b0e9f4fe27d6e04137e59dfce3b 100644 (file)
@@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
        return 0;
 }
 
-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
-{
-       struct vm_area_struct *vma;
-
-       /* We don't need vma lookup at all. */
-       if (!walk->hugetlb_entry)
-               return NULL;
-
-       VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
-       vma = find_vma(walk->mm, addr);
-       if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
-               return vma;
-
-       return NULL;
-}
-
 #else /* CONFIG_HUGETLB_PAGE */
-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
-{
-       return NULL;
-}
-
 static int walk_hugetlb_range(struct vm_area_struct *vma,
                              unsigned long addr, unsigned long end,
                              struct mm_walk *walk)
@@ -198,30 +177,53 @@ int walk_page_range(unsigned long addr, unsigned long end,
        if (!walk->mm)
                return -EINVAL;
 
+       VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+
        pgd = pgd_offset(walk->mm, addr);
        do {
-               struct vm_area_struct *vma;
+               struct vm_area_struct *vma = NULL;
 
                next = pgd_addr_end(addr, end);
 
                /*
-                * handle hugetlb vma individually because pagetable walk for
-                * the hugetlb page is dependent on the architecture and
-                * we can't handled it in the same manner as non-huge pages.
+                * This function was not intended to be vma based.
+                * But there are vma special cases to be handled:
+                * - hugetlb vma's
+                * - VM_PFNMAP vma's
                 */
-               vma = hugetlb_vma(addr, walk);
+               vma = find_vma(walk->mm, addr);
                if (vma) {
-                       if (vma->vm_end < next)
+                       /*
+                        * There are no page structures backing a VM_PFNMAP
+                        * range, so do not allow split_huge_page_pmd().
+                        */
+                       if ((vma->vm_start <= addr) &&
+                           (vma->vm_flags & VM_PFNMAP)) {
                                next = vma->vm_end;
+                               pgd = pgd_offset(walk->mm, next);
+                               continue;
+                       }
                        /*
-                        * Hugepage is very tightly coupled with vma, so
-                        * walk through hugetlb entries within a given vma.
+                        * Handle hugetlb vma individually because pagetable
+                        * walk for the hugetlb page is dependent on the
+                        * architecture and we can't handled it in the same
+                        * manner as non-huge pages.
                         */
-                       err = walk_hugetlb_range(vma, addr, next, walk);
-                       if (err)
-                               break;
-                       pgd = pgd_offset(walk->mm, next);
-                       continue;
+                       if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
+                           is_vm_hugetlb_page(vma)) {
+                               if (vma->vm_end < next)
+                                       next = vma->vm_end;
+                               /*
+                                * Hugepage is very tightly coupled with vma,
+                                * so walk through hugetlb entries within a
+                                * given vma.
+                                */
+                               err = walk_hugetlb_range(vma, addr, next, walk);
+                               if (err)
+                                       break;
+                               pgd = pgd_offset(walk->mm, next);
+                               continue;
+                       }
                }
 
                if (pgd_none_or_clear_bad(pgd)) {
index e085bcc754f602e4401e51c8c29442a1bd143de8..1eb05d80b07bea736e85a389be0d98c8bfcb3d9c 100644 (file)
@@ -871,10 +871,10 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
         */
        del_timer_sync(&app->join_timer);
 
-       spin_lock(&app->lock);
+       spin_lock_bh(&app->lock);
        mrp_mad_event(app, MRP_EVENT_TX);
        mrp_pdu_queue(app);
-       spin_unlock(&app->lock);
+       spin_unlock_bh(&app->lock);
 
        mrp_queue_xmit(app);
 
index 8e15d966d9b0a9f30e1010c242c440e206f73ea2..239992021b1d635382dfcec141723696d7cbf564 100644 (file)
@@ -837,6 +837,19 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
 
        dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
        if (dat_entry) {
+               /* If the ARP request is destined for a local client the local
+                * client will answer itself. DAT would only generate a
+                * duplicate packet.
+                *
+                * Moreover, if the soft-interface is enslaved into a bridge, an
+                * additional DAT answer may trigger kernel warnings about
+                * a packet coming from the wrong port.
+                */
+               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr)) {
+                       ret = true;
+                       goto out;
+               }
+
                skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
                                     bat_priv->soft_iface, ip_dst, hw_src,
                                     dat_entry->mac_addr, hw_src);
index 3e30a0f1b908b2872bcb6a13100306e0deda0c85..51aafd669cbbd6a7714f9ab075f647313c8234d5 100644 (file)
@@ -163,16 +163,25 @@ void batadv_mesh_free(struct net_device *soft_iface)
        batadv_vis_quit(bat_priv);
 
        batadv_gw_node_purge(bat_priv);
-       batadv_originator_free(bat_priv);
        batadv_nc_free(bat_priv);
+       batadv_dat_free(bat_priv);
+       batadv_bla_free(bat_priv);
 
+       /* Free the TT and the originator tables only after having terminated
+        * all the other depending components which may use these structures for
+        * their purposes.
+        */
        batadv_tt_free(bat_priv);
 
-       batadv_bla_free(bat_priv);
-
-       batadv_dat_free(bat_priv);
+       /* Since the originator table clean up routine is accessing the TT
+        * tables as well, it has to be invoked after the TT tables have been
+        * freed and marked as empty. This ensures that no cleanup RCU callbacks
+        * accessing the TT data are scheduled for later execution.
+        */
+       batadv_originator_free(bat_priv);
 
        free_percpu(bat_priv->bat_counters);
+       bat_priv->bat_counters = NULL;
 
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
 }
@@ -475,7 +484,7 @@ static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
        char *algo_name = (char *)val;
        size_t name_len = strlen(algo_name);
 
-       if (algo_name[name_len - 1] == '\n')
+       if (name_len > 0 && algo_name[name_len - 1] == '\n')
                algo_name[name_len - 1] = '\0';
 
        bat_algo_ops = batadv_algo_get(algo_name);
index f7c54305a9188f83dbd02af6eae287247854babf..e84629ece9b7cfb0a06a0baf856474173f02a144 100644 (file)
@@ -1514,6 +1514,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
        struct ethhdr *ethhdr, ethhdr_tmp;
        uint8_t *orig_dest, ttl, ttvn;
        unsigned int coding_len;
+       int err;
 
        /* Save headers temporarily */
        memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp));
@@ -1568,8 +1569,11 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
                         coding_len);
 
        /* Resize decoded skb if decoded with larger packet */
-       if (nc_packet->skb->len > coding_len + h_size)
-               pskb_trim_rcsum(skb, coding_len + h_size);
+       if (nc_packet->skb->len > coding_len + h_size) {
+               err = pskb_trim_rcsum(skb, coding_len + h_size);
+               if (err)
+                       return NULL;
+       }
 
        /* Create decoded unicast packet */
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
index 2f3452546636ce71747ef9a8b1a949caae8d8f04..fad1a2093e15fee8889bb190fac96868143bd0f5 100644 (file)
@@ -156,12 +156,28 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
        kfree(orig_node);
 }
 
+/**
+ * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
+ * schedule an rcu callback for freeing it
+ * @orig_node: the orig node to free
+ */
 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
 {
        if (atomic_dec_and_test(&orig_node->refcount))
                call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
 }
 
+/**
+ * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
+ * possibly free it (without rcu callback)
+ * @orig_node: the orig node to free
+ */
+void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
+{
+       if (atomic_dec_and_test(&orig_node->refcount))
+               batadv_orig_node_free_rcu(&orig_node->rcu);
+}
+
 void batadv_originator_free(struct batadv_priv *bat_priv)
 {
        struct batadv_hashtable *hash = bat_priv->orig_hash;
index 7df48fa7669dd0ac173a51bafe3d6538fb049e4b..734e5a3d8a5b29fdf389b04db14e3a37504cf3cb 100644 (file)
@@ -26,6 +26,7 @@ int batadv_originator_init(struct batadv_priv *bat_priv);
 void batadv_originator_free(struct batadv_priv *bat_priv);
 void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
+void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
 struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
                                              const uint8_t *addr);
 struct batadv_neigh_node *
index 6f20d339e33adb3bab929d9bc406f3c8cebd959a..819dfb006cdfafbcd3c63d12c5f7e76c0588ed43 100644 (file)
@@ -505,6 +505,7 @@ static int batadv_softif_init_late(struct net_device *dev)
        batadv_debugfs_del_meshif(dev);
 free_bat_counters:
        free_percpu(bat_priv->bat_counters);
+       bat_priv->bat_counters = NULL;
 
        return ret;
 }
index 5e89deeb9542979c72ba635fb1c18d301a9c1621..9e87485758455743984bf51bf783c7bf61cee348 100644 (file)
@@ -144,7 +144,12 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
        struct batadv_tt_orig_list_entry *orig_entry;
 
        orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
-       batadv_orig_node_free_ref(orig_entry->orig_node);
+
+       /* We are in an rcu callback here, therefore we cannot use
+        * batadv_orig_node_free_ref() and its call_rcu():
+        * An rcu_barrier() wouldn't wait for that to finish
+        */
+       batadv_orig_node_free_ref_now(orig_entry->orig_node);
        kfree(orig_entry);
 }
 
index 9878eb8204c524334be49d0445cae9c29927ca0d..19c37a4929bcd7619d43573285d5d33dfa3b865f 100644 (file)
@@ -72,13 +72,12 @@ print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
 }
 
 static void
-ebt_log_packet(u_int8_t pf, unsigned int hooknum,
-   const struct sk_buff *skb, const struct net_device *in,
-   const struct net_device *out, const struct nf_loginfo *loginfo,
-   const char *prefix)
+ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
+              const struct sk_buff *skb, const struct net_device *in,
+              const struct net_device *out, const struct nf_loginfo *loginfo,
+              const char *prefix)
 {
        unsigned int bitmask;
-       struct net *net = dev_net(in ? in : out);
 
        /* FIXME: Disabled from containers until syslog ns is supported */
        if (!net_eq(net, &init_net))
@@ -191,7 +190,7 @@ ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par)
                nf_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb,
                              par->in, par->out, &li, "%s", info->prefix);
        else
-               ebt_log_packet(NFPROTO_BRIDGE, par->hooknum, skb, par->in,
+               ebt_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb, par->in,
                               par->out, &li, info->prefix);
        return EBT_CONTINUE;
 }
index fc1905c514178a1ae439060588e06a7c5d26e446..df0364aa12d518c74b769b04d11c55182323773c 100644 (file)
@@ -131,14 +131,16 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
        return skb;
 }
 
-static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
-   const struct net_device *in, const struct net_device *out,
-   const struct ebt_ulog_info *uloginfo, const char *prefix)
+static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
+                           const struct sk_buff *skb,
+                           const struct net_device *in,
+                           const struct net_device *out,
+                           const struct ebt_ulog_info *uloginfo,
+                           const char *prefix)
 {
        ebt_ulog_packet_msg_t *pm;
        size_t size, copy_len;
        struct nlmsghdr *nlh;
-       struct net *net = dev_net(in ? in : out);
        struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
        unsigned int group = uloginfo->nlgroup;
        ebt_ulog_buff_t *ub = &ebt->ulog_buffers[group];
@@ -233,7 +235,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
 }
 
 /* this function is registered with the netfilter core */
-static void ebt_log_packet(u_int8_t pf, unsigned int hooknum,
+static void ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
    const struct sk_buff *skb, const struct net_device *in,
    const struct net_device *out, const struct nf_loginfo *li,
    const char *prefix)
@@ -252,13 +254,15 @@ static void ebt_log_packet(u_int8_t pf, unsigned int hooknum,
                strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
        }
 
-       ebt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
+       ebt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
 }
 
 static unsigned int
 ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
-       ebt_ulog_packet(par->hooknum, skb, par->in, par->out,
+       struct net *net = dev_net(par->in ? par->in : par->out);
+
+       ebt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
                        par->targinfo, NULL);
        return EBT_CONTINUE;
 }
index a3395fdfbd4f6345cdf3bd3a12ebdc1d7f26ecbb..d5953b87918c072daaa1427187246f9f9cfcad3e 100644 (file)
@@ -1204,6 +1204,7 @@ void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
        mutex_lock(&osdc->request_mutex);
        if (req->r_linger) {
                __unregister_linger_request(osdc, req);
+               req->r_linger = 0;
                ceph_osdc_put_request(req);
        }
        mutex_unlock(&osdc->request_mutex);
@@ -2120,7 +2121,9 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
        down_read(&osdc->map_sem);
        mutex_lock(&osdc->request_mutex);
        __register_request(osdc, req);
-       WARN_ON(req->r_sent);
+       req->r_sent = 0;
+       req->r_got_reply = 0;
+       req->r_completed = 0;
        rc = __map_request(osdc, req, 0);
        if (rc < 0) {
                if (nofail) {
index 79ae884850015a99a1e43c4b2fc228670c3779b9..f0a1ba6c8086acc65a87e519fca48853a3bd091e 100644 (file)
@@ -734,19 +734,25 @@ static unsigned char nas[21] = {
 
 asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
 {
-       return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
+       return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
 asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
                                    unsigned int vlen, unsigned int flags)
 {
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
        return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
                              flags | MSG_CMSG_COMPAT);
 }
 
 asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
 {
-       return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
+       return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
 asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
@@ -768,6 +774,9 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
        int datagrams;
        struct timespec ktspec;
 
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
+
        if (COMPAT_USE_64BIT_TIME)
                return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
                                      flags | MSG_CMSG_COMPAT,
index c013f38482a1ef6572a9de27a2aa39946971b40b..6cda4e2c2132e345f82e3cd0feaeaaa16fbd75e6 100644 (file)
@@ -39,6 +39,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
        ha->refcount = 1;
        ha->global_use = global;
        ha->synced = sync;
+       ha->sync_cnt = 0;
        list_add_tail_rcu(&ha->list, &list->list);
        list->count++;
 
@@ -66,7 +67,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
                        }
                        if (sync) {
                                if (ha->synced)
-                                       return 0;
+                                       return -EEXIST;
                                else
                                        ha->synced = true;
                        }
@@ -139,10 +140,13 @@ static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list,
 
        err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type,
                               false, true);
-       if (err)
+       if (err && err != -EEXIST)
                return err;
-       ha->sync_cnt++;
-       ha->refcount++;
+
+       if (!err) {
+               ha->sync_cnt++;
+               ha->refcount++;
+       }
 
        return 0;
 }
@@ -159,7 +163,8 @@ static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list,
        if (err)
                return;
        ha->sync_cnt--;
-       __hw_addr_del_entry(from_list, ha, false, true);
+       /* address on from list is not marked synced */
+       __hw_addr_del_entry(from_list, ha, false, false);
 }
 
 static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
@@ -796,7 +801,7 @@ int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
                return -EINVAL;
 
        netif_addr_lock_nested(to);
-       err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
+       err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
        netif_addr_unlock(to);
index dad2a178f9f8a477488f091962c5e771d1d117b3..6438f29ff26650b240be40d7d80953dc28f13cb0 100644 (file)
@@ -778,7 +778,7 @@ int sk_detach_filter(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(sk_detach_filter);
 
-static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
+void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
 {
        static const u16 decodes[] = {
                [BPF_S_ALU_ADD_K]       = BPF_ALU|BPF_ADD|BPF_K,
index 7e7aeb01de45cf3236469d7f62b51254b73abac4..de178e462682af6c97dbfc4326df85942ec25174 100644 (file)
@@ -73,31 +73,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
        return err;
 }
 
-/*
- *     Copy kernel to iovec. Returns -EFAULT on error.
- *
- *     Note: this modifies the original iovec.
- */
-
-int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
-{
-       while (len > 0) {
-               if (iov->iov_len) {
-                       int copy = min_t(unsigned int, iov->iov_len, len);
-                       if (copy_to_user(iov->iov_base, kdata, copy))
-                               return -EFAULT;
-                       kdata += copy;
-                       len -= copy;
-                       iov->iov_len -= copy;
-                       iov->iov_base += copy;
-               }
-               iov++;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_toiovec);
-
 /*
  *     Copy kernel to iovec. Returns -EFAULT on error.
  */
@@ -124,31 +99,6 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
 }
 EXPORT_SYMBOL(memcpy_toiovecend);
 
-/*
- *     Copy iovec to kernel. Returns -EFAULT on error.
- *
- *     Note: this modifies the original iovec.
- */
-
-int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
-{
-       while (len > 0) {
-               if (iov->iov_len) {
-                       int copy = min_t(unsigned int, len, iov->iov_len);
-                       if (copy_from_user(kdata, iov->iov_base, copy))
-                               return -EFAULT;
-                       len -= copy;
-                       kdata += copy;
-                       iov->iov_base += copy;
-                       iov->iov_len -= copy;
-               }
-               iov++;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_fromiovec);
-
 /*
  *     Copy iovec from kernel. Returns -EFAULT on error.
  */
index af9185d0be6a9e3b3b81c10d061b82992c40fc1d..cfd777bd6bd0cea8023cfe32e431488727982225 100644 (file)
@@ -195,7 +195,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
         * the tail pointer in struct sk_buff!
         */
        memset(skb, 0, offsetof(struct sk_buff, tail));
-       skb->data = NULL;
+       skb->head = NULL;
        skb->truesize = sizeof(struct sk_buff);
        atomic_set(&skb->users, 1);
 
@@ -611,7 +611,7 @@ static void skb_release_head_state(struct sk_buff *skb)
 static void skb_release_all(struct sk_buff *skb)
 {
        skb_release_head_state(skb);
-       if (likely(skb->data))
+       if (likely(skb->head))
                skb_release_data(skb);
 }
 
index d4f4cea726e7bfdb0d9cc47a779d5252738bf37c..88868a9d21da54761a09b2ecaf0b88766190152f 100644 (file)
@@ -210,7 +210,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
-  "sk_lock-AF_NFC"   , "sk_lock-AF_MAX"
+  "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_MAX"
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
@@ -226,7 +226,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
-  "slock-AF_NFC"   , "slock-AF_MAX"
+  "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_MAX"
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
@@ -242,7 +242,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
-  "clock-AF_NFC"   , "clock-AF_MAX"
+  "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_MAX"
 };
 
 /*
@@ -1217,18 +1217,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
 #endif
 }
 
-/*
- * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
- * un-modified. Special care is taken when initializing object to zero.
- */
-static inline void sk_prot_clear_nulls(struct sock *sk, int size)
-{
-       if (offsetof(struct sock, sk_node.next) != 0)
-               memset(sk, 0, offsetof(struct sock, sk_node.next));
-       memset(&sk->sk_node.pprev, 0,
-              size - offsetof(struct sock, sk_node.pprev));
-}
-
 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
 {
        unsigned long nulls1, nulls2;
index d5bef0b0f63968bdfc906921bcc99522154750cc..a0e9cf6379de3eac8ac1182c3be73e8d140b4a1b 100644 (file)
@@ -73,8 +73,13 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
                goto out;
        }
 
-       if (filter)
-               memcpy(nla_data(attr), filter->insns, len);
+       if (filter) {
+               struct sock_filter *fb = (struct sock_filter *)nla_data(attr);
+               int i;
+
+               for (i = 0; i < filter->len; i++, fb++)
+                       sk_decode_filter(&filter->insns[i], fb);
+       }
 
 out:
        rcu_read_unlock();
index c625e4dad4b05066d92fe02d1778b555234d0f2a..2a83591492dd6f3e8b7470c3e9b897dad7625a49 100644 (file)
@@ -235,7 +235,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
           */
        struct net *net = dev_net(skb->dev);
        struct ip_tunnel_net *itn;
-       const struct iphdr *iph = (const struct iphdr *)skb->data;
+       const struct iphdr *iph;
        const int type = icmp_hdr(skb)->type;
        const int code = icmp_hdr(skb)->code;
        struct ip_tunnel *t;
@@ -281,6 +281,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
        else
                itn = net_generic(net, ipgre_net_id);
 
+       iph = (const struct iphdr *)skb->data;
        t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi.flags,
                             iph->daddr, iph->saddr, tpi.key);
 
index 147abf5275aa6604df58c3b022eaeceaee10df4f..4bcabf3ab4cad3bdc43f5b9ed33eba9c1357557d 100644 (file)
@@ -84,7 +84,7 @@ int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
 EXPORT_SYMBOL(sysctl_ip_default_ttl);
 
 /* Generate a checksum for an outgoing IP datagram. */
-__inline__ void ip_send_check(struct iphdr *iph)
+void ip_send_check(struct iphdr *iph)
 {
        iph->check = 0;
        iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
index e4147ec1665a7af9c9661a12712babdf4bfd29ea..be2f8da0ae8ebc4b94a257dd50914db14212a451 100644 (file)
@@ -503,6 +503,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
 
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
        dst = tnl_params->daddr;
        if (dst == 0) {
                /* NBMA tunnel */
@@ -658,7 +659,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 
        skb_dst_drop(skb);
        skb_dst_set(skb, &rt->dst);
-       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 
        /* Push down and install the IP header. */
        skb_push(skb, sizeof(struct iphdr));
index f8a222cb64481c95e2f3b53207e3c6a3d372f4d2..ff4b781b1056e59937d44e9d751b6acf299d7a60 100644 (file)
@@ -162,7 +162,8 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
        return skb;
 }
 
-static void ipt_ulog_packet(unsigned int hooknum,
+static void ipt_ulog_packet(struct net *net,
+                           unsigned int hooknum,
                            const struct sk_buff *skb,
                            const struct net_device *in,
                            const struct net_device *out,
@@ -174,7 +175,6 @@ static void ipt_ulog_packet(unsigned int hooknum,
        size_t size, copy_len;
        struct nlmsghdr *nlh;
        struct timeval tv;
-       struct net *net = dev_net(in ? in : out);
        struct ulog_net *ulog = ulog_pernet(net);
 
        /* ffs == find first bit set, necessary because userspace
@@ -231,8 +231,10 @@ static void ipt_ulog_packet(unsigned int hooknum,
        put_unaligned(tv.tv_usec, &pm->timestamp_usec);
        put_unaligned(skb->mark, &pm->mark);
        pm->hook = hooknum;
-       if (prefix != NULL)
-               strncpy(pm->prefix, prefix, sizeof(pm->prefix));
+       if (prefix != NULL) {
+               strncpy(pm->prefix, prefix, sizeof(pm->prefix) - 1);
+               pm->prefix[sizeof(pm->prefix) - 1] = '\0';
+       }
        else if (loginfo->prefix[0] != '\0')
                strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
        else
@@ -291,12 +293,15 @@ static void ipt_ulog_packet(unsigned int hooknum,
 static unsigned int
 ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
-       ipt_ulog_packet(par->hooknum, skb, par->in, par->out,
+       struct net *net = dev_net(par->in ? par->in : par->out);
+
+       ipt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
                        par->targinfo, NULL);
        return XT_CONTINUE;
 }
 
-static void ipt_logfn(u_int8_t pf,
+static void ipt_logfn(struct net *net,
+                     u_int8_t pf,
                      unsigned int hooknum,
                      const struct sk_buff *skb,
                      const struct net_device *in,
@@ -318,7 +323,7 @@ static void ipt_logfn(u_int8_t pf,
                strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
        }
 
-       ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
+       ipt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
 }
 
 static int ulog_tg_check(const struct xt_tgchk_param *par)
index 550781a17b34f75f94b919a39120eb119db106a1..d35bbf0cf4045d04f67d30e31f15c5271058dd5a 100644 (file)
@@ -737,10 +737,15 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
 {
        struct rtable *rt;
        struct flowi4 fl4;
+       const struct iphdr *iph = (const struct iphdr *) skb->data;
+       int oif = skb->dev->ifindex;
+       u8 tos = RT_TOS(iph->tos);
+       u8 prot = iph->protocol;
+       u32 mark = skb->mark;
 
        rt = (struct rtable *) dst;
 
-       ip_rt_build_flow_key(&fl4, sk, skb);
+       __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
        __ip_do_redirect(rt, skb, &fl4, true);
 }
 
index dcb116dde2168759025d315f4e2d3b77994a276a..ab450c099aa49a3d4b68ca531e7f5426a8eabaf1 100644 (file)
@@ -2887,6 +2887,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
        unsigned int mss;
        struct sk_buff *gso_skb = skb;
        __sum16 newcheck;
+       bool ooo_okay, copy_destructor;
 
        if (!pskb_may_pull(skb, sizeof(*th)))
                goto out;
@@ -2927,10 +2928,18 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
                goto out;
        }
 
+       copy_destructor = gso_skb->destructor == tcp_wfree;
+       ooo_okay = gso_skb->ooo_okay;
+       /* All segments but the first should have ooo_okay cleared */
+       skb->ooo_okay = 0;
+
        segs = skb_segment(skb, features);
        if (IS_ERR(segs))
                goto out;
 
+       /* Only first segment might have ooo_okay set */
+       segs->ooo_okay = ooo_okay;
+
        delta = htonl(oldlen + (thlen + mss));
 
        skb = segs;
@@ -2950,6 +2959,17 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
                                                    thlen, skb->csum));
 
                seq += mss;
+               if (copy_destructor) {
+                       skb->destructor = gso_skb->destructor;
+                       skb->sk = gso_skb->sk;
+                       /* {tcp|sock}_wfree() use exact truesize accounting :
+                        * sum(skb->truesize) MUST be exactly be gso_skb->truesize
+                        * So we account mss bytes of 'true size' for each segment.
+                        * The last segment will contain the remaining.
+                        */
+                       skb->truesize = mss;
+                       gso_skb->truesize -= mss;
+               }
                skb = skb->next;
                th = tcp_hdr(skb);
 
@@ -2962,7 +2982,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
         * is freed at TX completion, and not right now when gso_skb
         * is freed by GSO engine
         */
-       if (gso_skb->destructor == tcp_wfree) {
+       if (copy_destructor) {
                swap(gso_skb->sk, skb->sk);
                swap(gso_skb->destructor, skb->destructor);
                swap(gso_skb->truesize, skb->truesize);
@@ -3269,8 +3289,11 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
 
        for (i = 0; i < shi->nr_frags; ++i) {
                const struct skb_frag_struct *f = &shi->frags[i];
-               struct page *page = skb_frag_page(f);
-               sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
+               unsigned int offset = f->page_offset;
+               struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
+
+               sg_set_page(&sg, page, skb_frag_size(f),
+                           offset_in_page(offset));
                if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
                        return 1;
        }
index 08bbe609652891f593ccc2f48ec9975bbe2f335d..9c6225780bd5aafdb175a762d45c704e82416582 100644 (file)
@@ -2743,8 +2743,8 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
  * tcp_xmit_retransmit_queue().
  */
 static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
-                                 int prior_sacked, bool is_dupack,
-                                 int flag)
+                                 int prior_sacked, int prior_packets,
+                                 bool is_dupack, int flag)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
@@ -2804,7 +2804,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                                tcp_add_reno_sack(sk);
                } else
                        do_lost = tcp_try_undo_partial(sk, pkts_acked);
-               newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
+               newly_acked_sacked = prior_packets - tp->packets_out +
+                                    tp->sacked_out - prior_sacked;
                break;
        case TCP_CA_Loss:
                tcp_process_loss(sk, flag, is_dupack);
@@ -2818,7 +2819,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                        if (is_dupack)
                                tcp_add_reno_sack(sk);
                }
-               newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
+               newly_acked_sacked = prior_packets - tp->packets_out +
+                                    tp->sacked_out - prior_sacked;
 
                if (icsk->icsk_ca_state <= TCP_CA_Disorder)
                        tcp_try_undo_dsack(sk);
@@ -3330,9 +3332,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        bool is_dupack = false;
        u32 prior_in_flight;
        u32 prior_fackets;
-       int prior_packets;
+       int prior_packets = tp->packets_out;
        int prior_sacked = tp->sacked_out;
        int pkts_acked = 0;
+       int previous_packets_out = 0;
 
        /* If the ack is older than previous acks
         * then we can probably ignore it.
@@ -3403,14 +3406,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        sk->sk_err_soft = 0;
        icsk->icsk_probes_out = 0;
        tp->rcv_tstamp = tcp_time_stamp;
-       prior_packets = tp->packets_out;
        if (!prior_packets)
                goto no_queue;
 
        /* See if we can take anything off of the retransmit queue. */
+       previous_packets_out = tp->packets_out;
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
 
-       pkts_acked = prior_packets - tp->packets_out;
+       pkts_acked = previous_packets_out - tp->packets_out;
 
        if (tcp_ack_is_dubious(sk, flag)) {
                /* Advance CWND, if state allows this. */
@@ -3418,7 +3421,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                        tcp_cong_avoid(sk, ack, prior_in_flight);
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
                tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
-                                     is_dupack, flag);
+                                     prior_packets, is_dupack, flag);
        } else {
                if (flag & FLAG_DATA_ACKED)
                        tcp_cong_avoid(sk, ack, prior_in_flight);
@@ -3441,7 +3444,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        /* If data was DSACKed, see if we can undo a cwnd reduction. */
        if (flag & FLAG_DSACKING_ACK)
                tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
-                                     is_dupack, flag);
+                                     prior_packets, is_dupack, flag);
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
@@ -3464,7 +3467,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        if (TCP_SKB_CB(skb)->sacked) {
                flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
                tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
-                                     is_dupack, flag);
+                                     prior_packets, is_dupack, flag);
        }
 
        SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
index 536d40929ba6f809d5c79968c48053202dd3748f..ec335fabd5cc12daba8addf5de86adc90abbd561 100644 (file)
@@ -874,11 +874,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                                                           &md5);
        tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
 
-       if (tcp_packets_in_flight(tp) == 0) {
+       if (tcp_packets_in_flight(tp) == 0)
                tcp_ca_event(sk, CA_EVENT_TX_START);
-               skb->ooo_okay = 1;
-       } else
-               skb->ooo_okay = 0;
+
+       /* if no packet is in qdisc/device queue, then allow XPS to select
+        * another queue.
+        */
+       skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
 
        skb_push(skb, tcp_header_size);
        skb_reset_transport_header(skb);
index d1ab6ab29a55c11e0570bb9854c524d9f21369fc..1bbf744c2cc35e7953b9580db08a357f4e829a04 100644 (file)
@@ -1487,7 +1487,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
 }
 
 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
-                 struct net_device *dev, int strict)
+                 const struct net_device *dev, int strict)
 {
        struct inet6_ifaddr *ifp;
        unsigned int hash = inet6_addr_hash(addr);
@@ -2658,8 +2658,10 @@ static void init_loopback(struct net_device *dev)
                        sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
 
                        /* Failure cases are ignored */
-                       if (!IS_ERR(sp_rt))
+                       if (!IS_ERR(sp_rt)) {
+                               sp_ifa->rt = sp_rt;
                                ip6_ins_rt(sp_rt);
+                       }
                }
                read_unlock_bh(&idev->lock);
        }
index d3ddd840035450570d634fb22002496a447d5d34..ecd60733e5e24afdb28a52c95686fec28e2e4d73 100644 (file)
@@ -1081,6 +1081,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
                }
                if (t == NULL)
                        t = netdev_priv(dev);
+               memset(&p, 0, sizeof(p));
                ip6gre_tnl_parm_to_user(&p, &t->parms);
                if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
                        err = -EFAULT;
@@ -1128,6 +1129,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
                if (t) {
                        err = 0;
 
+                       memset(&p, 0, sizeof(p));
                        ip6gre_tnl_parm_to_user(&p, &t->parms);
                        if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
                                err = -EFAULT;
index d2eedf192330caf5a963c27ddaceffae5e4886b8..dae1949019d7b8dc77d14c39478571e86ad89f34 100644 (file)
@@ -1147,7 +1147,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                        if (WARN_ON(np->cork.opt))
                                return -EINVAL;
 
-                       np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
+                       np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
                        if (unlikely(np->cork.opt == NULL))
                                return -ENOBUFS;
 
index 72836f40b73075430f4ef76e0fb7d20c32b7401c..95f3f1da0d7f2ff20c3afa3eeda315dd9e2e6b5f 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
 #include <linux/export.h>
+#include <net/addrconf.h>
 #include <net/dst.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
@@ -186,6 +187,10 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
        return csum;
 };
 
+static const struct nf_ipv6_ops ipv6ops = {
+       .chk_addr       = ipv6_chk_addr,
+};
+
 static const struct nf_afinfo nf_ip6_afinfo = {
        .family                 = AF_INET6,
        .checksum               = nf_ip6_checksum,
@@ -198,6 +203,7 @@ static const struct nf_afinfo nf_ip6_afinfo = {
 
 int __init ipv6_netfilter_init(void)
 {
+       RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
        return nf_register_afinfo(&nf_ip6_afinfo);
 }
 
@@ -206,5 +212,6 @@ int __init ipv6_netfilter_init(void)
  */
 void ipv6_netfilter_fini(void)
 {
+       RCU_INIT_POINTER(nf_ipv6_ops, NULL);
        nf_unregister_afinfo(&nf_ip6_afinfo);
 }
index f3c1ff4357ff1ac91f5e69c704e03b1e83c0192d..51c3285b5d9b582cb1d6101d3fafc70c781f5a74 100644 (file)
@@ -90,7 +90,7 @@ static const struct snmp_mib snmp6_ipstats_list[] = {
        SNMP_MIB_ITEM("Ip6OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
        SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
        SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
-       SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS),
+       /* IPSTATS_MIB_CSUMERRORS is not relevant in IPv6 (no checksum) */
        SNMP_MIB_SENTINEL
 };
 
index 71167069b394e741101d33486cb8bd1e2680c68f..0a17ed9eaf390c4b84a6264c03697f60af9281a1 100644 (file)
@@ -1890,6 +1890,17 @@ void tcp6_proc_exit(struct net *net)
 }
 #endif
 
+static void tcp_v6_clear_sk(struct sock *sk, int size)
+{
+       struct inet_sock *inet = inet_sk(sk);
+
+       /* we do not want to clear pinet6 field, because of RCU lookups */
+       sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
+
+       size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
+       memset(&inet->pinet6 + 1, 0, size);
+}
+
 struct proto tcpv6_prot = {
        .name                   = "TCPv6",
        .owner                  = THIS_MODULE,
@@ -1933,6 +1944,7 @@ struct proto tcpv6_prot = {
 #ifdef CONFIG_MEMCG_KMEM
        .proto_cgroup           = tcp_proto_cgroup,
 #endif
+       .clear_sk               = tcp_v6_clear_sk,
 };
 
 static const struct inet6_protocol tcpv6_protocol = {
index d4defdd449372604c21f2d3c353c20339e920d5c..42923b14dfa618ce83ba969c126611fe2ad4a828 100644 (file)
@@ -1432,6 +1432,17 @@ void udp6_proc_exit(struct net *net) {
 }
 #endif /* CONFIG_PROC_FS */
 
+void udp_v6_clear_sk(struct sock *sk, int size)
+{
+       struct inet_sock *inet = inet_sk(sk);
+
+       /* we do not want to clear pinet6 field, because of RCU lookups */
+       sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
+
+       size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
+       memset(&inet->pinet6 + 1, 0, size);
+}
+
 /* ------------------------------------------------------------------------ */
 
 struct proto udpv6_prot = {
@@ -1462,7 +1473,7 @@ struct proto udpv6_prot = {
        .compat_setsockopt = compat_udpv6_setsockopt,
        .compat_getsockopt = compat_udpv6_getsockopt,
 #endif
-       .clear_sk          = sk_prot_clear_portaddr_nulls,
+       .clear_sk          = udp_v6_clear_sk,
 };
 
 static struct inet_protosw udpv6_protosw = {
index d7571046bfc440386a52b71bdb2c5295e4360001..4691ed50a9282a108e43ce8e7de56d79c1635c4a 100644 (file)
@@ -31,6 +31,8 @@ extern int    udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
 extern int     udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
 extern void    udpv6_destroy_sock(struct sock *sk);
 
+extern void udp_v6_clear_sk(struct sock *sk, int size);
+
 #ifdef CONFIG_PROC_FS
 extern int     udp6_seq_show(struct seq_file *seq, void *v);
 #endif
index 3bb3a891a42416b23ddb278d3fd7c051d25cfcf7..d3cfaf9c7a0858d8bb1e9fd60c412c3b03cbf3e3 100644 (file)
@@ -46,11 +46,12 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
        unsigned int mss;
        unsigned int unfrag_ip6hlen, unfrag_len;
        struct frag_hdr *fptr;
-       u8 *mac_start, *prevhdr;
+       u8 *packet_start, *prevhdr;
        u8 nexthdr;
        u8 frag_hdr_sz = sizeof(struct frag_hdr);
        int offset;
        __wsum csum;
+       int tnl_hlen;
 
        mss = skb_shinfo(skb)->gso_size;
        if (unlikely(skb->len <= mss))
@@ -83,9 +84,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
        skb->ip_summed = CHECKSUM_NONE;
 
        /* Check if there is enough headroom to insert fragment header. */
-       if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) &&
-           pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
-               goto out;
+       tnl_hlen = skb_tnl_header_len(skb);
+       if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
+               if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+                       goto out;
+       }
 
        /* Find the unfragmentable header and shift it left by frag_hdr_sz
         * bytes to insert fragment header.
@@ -93,11 +96,12 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
        unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
        nexthdr = *prevhdr;
        *prevhdr = NEXTHDR_FRAGMENT;
-       unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
-                    unfrag_ip6hlen;
-       mac_start = skb_mac_header(skb);
-       memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
+       unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
+                    unfrag_ip6hlen + tnl_hlen;
+       packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
+       memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
 
+       SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
        skb->mac_header -= frag_hdr_sz;
        skb->network_header -= frag_hdr_sz;
 
index 1d08e21d9f6993e92a4a16b589784873aaf63fd8..dfcc4be46898281f09038bbd7638edfefa04ea3e 100644 (file)
@@ -56,7 +56,7 @@ struct proto udplitev6_prot = {
        .compat_setsockopt = compat_udpv6_setsockopt,
        .compat_getsockopt = compat_udpv6_getsockopt,
 #endif
-       .clear_sk          = sk_prot_clear_portaddr_nulls,
+       .clear_sk          = udp_v6_clear_sk,
 };
 
 static struct inet_protosw udplite6_protosw = {
index 4ef7bdb65440ca965561d4ca6fdd10b4d97f97d6..23ed03d786c8376cc59f9fa2cf577ee01a4c2c2d 100644 (file)
@@ -103,8 +103,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
        dev_hold(dev);
 
        xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
-       if (!xdst->u.rt6.rt6i_idev)
+       if (!xdst->u.rt6.rt6i_idev) {
+               dev_put(dev);
                return -ENODEV;
+       }
 
        rt6_transfer_peer(&xdst->u.rt6, rt);
 
index 8c004161a843a2e33a963ddd254c4a0e5a129969..9ea0c933b9ff8803c071367bfef0df210090a90f 100644 (file)
@@ -544,7 +544,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
                /*
                 *  We now have some discovery info to deliver!
                 */
-               discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC);
+               discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC);
                if (!discovery) {
                        IRDA_WARNING("%s: unable to malloc!\n", __func__);
                        return;
index 5b1e5af257137e4c6a03a2c575f1adb5a949e25e..c5fbd7589681cf984220325fff463696163340d0 100644 (file)
@@ -2366,6 +2366,8 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
 
 out:
        xfrm_pol_put(xp);
+       if (err == 0)
+               xfrm_garbage_collect(net);
        return err;
 }
 
@@ -2615,6 +2617,8 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
 
 out:
        xfrm_pol_put(xp);
+       if (delete && err == 0)
+               xfrm_garbage_collect(net);
        return err;
 }
 
index 07c865a31a3d2adba8933c576f4228b0e8be6e55..857ca9f351771f403625d8762e86866ed8bf4b4c 100644 (file)
@@ -30,6 +30,8 @@ static DEFINE_MUTEX(afinfo_mutex);
 
 const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
 EXPORT_SYMBOL(nf_afinfo);
+const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ipv6_ops);
 
 int nf_register_afinfo(const struct nf_afinfo *afinfo)
 {
index 085b5880ab0de4aa90819d9729a13ff5b635cc15..05565d2b3a61b530acad48cfeda90b2af4b3adda 100644 (file)
@@ -1001,6 +1001,32 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
        return th->rst;
 }
 
+static inline bool is_new_conn(const struct sk_buff *skb,
+                              struct ip_vs_iphdr *iph)
+{
+       switch (iph->protocol) {
+       case IPPROTO_TCP: {
+               struct tcphdr _tcph, *th;
+
+               th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
+               if (th == NULL)
+                       return false;
+               return th->syn;
+       }
+       case IPPROTO_SCTP: {
+               sctp_chunkhdr_t *sch, schunk;
+
+               sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
+                                        sizeof(schunk), &schunk);
+               if (sch == NULL)
+                       return false;
+               return sch->type == SCTP_CID_INIT;
+       }
+       default:
+               return false;
+       }
+}
+
 /* Handle response packets: rewrite addresses and send away...
  */
 static unsigned int
@@ -1612,6 +1638,15 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
         * Check if the packet belongs to an existing connection entry
         */
        cp = pp->conn_in_get(af, skb, &iph, 0);
+
+       if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp && cp->dest &&
+           unlikely(!atomic_read(&cp->dest->weight)) && !iph.fragoffs &&
+           is_new_conn(skb, &iph)) {
+               ip_vs_conn_expire_now(cp);
+               __ip_vs_conn_put(cp);
+               cp = NULL;
+       }
+
        if (unlikely(!cp) && !iph.fragoffs) {
                /* No (second) fragments need to enter here, as nf_defrag_ipv6
                 * replayed fragment zero will already have created the cp
index 5b142fb164801bee9ab126d00fa3e485cdf96c60..9e6c2a075a4c9954c841556fb69f9e2523ddd894 100644 (file)
@@ -2542,6 +2542,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
                struct ip_vs_dest *dest;
                struct ip_vs_dest_entry entry;
 
+               memset(&entry, 0, sizeof(entry));
                list_for_each_entry(dest, &svc->destinations, n_list) {
                        if (count >= get->num_dests)
                                break;
index 0df269d7c99f6d9513cae41e46a5879f691a423b..a65edfe4b16c91d6adb0ee9a54ee345d9bc5fa80 100644 (file)
@@ -67,8 +67,8 @@ struct ip_vs_sh_bucket {
 #define IP_VS_SH_TAB_MASK               (IP_VS_SH_TAB_SIZE - 1)
 
 struct ip_vs_sh_state {
-       struct ip_vs_sh_bucket          buckets[IP_VS_SH_TAB_SIZE];
        struct rcu_head                 rcu_head;
+       struct ip_vs_sh_bucket          buckets[IP_VS_SH_TAB_SIZE];
 };
 
 /*
index 388656d5a9ec45a6049af9e2915aaa7df1eefbbe..3b18dd1be7d9948ca4eb768a3c670a1c1e8ee910 100644 (file)
@@ -148,7 +148,7 @@ void nf_log_packet(struct net *net,
                va_start(args, fmt);
                vsnprintf(prefix, sizeof(prefix), fmt, args);
                va_end(args);
-               logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix);
+               logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
        }
        rcu_read_unlock();
 }
@@ -368,17 +368,20 @@ static int __net_init nf_log_net_init(struct net *net)
        return 0;
 
 out_sysctl:
+#ifdef CONFIG_PROC_FS
        /* For init_net: errors will trigger panic, don't unroll on error. */
        if (!net_eq(net, &init_net))
                remove_proc_entry("nf_log", net->nf.proc_netfilter);
-
+#endif
        return ret;
 }
 
 static void __net_exit nf_log_net_exit(struct net *net)
 {
        netfilter_log_sysctl_exit(net);
+#ifdef CONFIG_PROC_FS
        remove_proc_entry("nf_log", net->nf.proc_netfilter);
+#endif
 }
 
 static struct pernet_operations nf_log_net_ops = {
index dc3fd5d44464a3ca7cdc5ed68d52ec71256b0cc2..c7b6d466a66247c3fa18b9a9a6f3e174a18d40da 100644 (file)
@@ -149,9 +149,12 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        rcu_read_lock();
        list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
-               if (last && cur != last)
-                       continue;
+               if (last) {
+                       if (cur != last)
+                               continue;
 
+                       last = NULL;
+               }
                if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                       cb->nlh->nlmsg_seq,
                                       NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
index 701c88a20fea4c2a3ca6c9b4b2189521e13f96fe..65074dfb9383a40faef6c27346399e7dc6711889 100644 (file)
@@ -220,9 +220,12 @@ ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        rcu_read_lock();
        list_for_each_entry_rcu(cur, &cttimeout_list, head) {
-               if (last && cur != last)
-                       continue;
+               if (last) {
+                       if (cur != last)
+                               continue;
 
+                       last = NULL;
+               }
                if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq,
                                           NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
index faf1e9300d8adc358fa857ff4296892351060193..962e9792e3179997db98a448a76fc909432d841f 100644 (file)
@@ -602,7 +602,8 @@ static struct nf_loginfo default_loginfo = {
 
 /* log handler for internal netfilter logging api */
 void
-nfulnl_log_packet(u_int8_t pf,
+nfulnl_log_packet(struct net *net,
+                 u_int8_t pf,
                  unsigned int hooknum,
                  const struct sk_buff *skb,
                  const struct net_device *in,
@@ -615,7 +616,6 @@ nfulnl_log_packet(u_int8_t pf,
        const struct nf_loginfo *li;
        unsigned int qthreshold;
        unsigned int plen;
-       struct net *net = dev_net(in ? in : out);
        struct nfnl_log_net *log = nfnl_log_pernet(net);
 
        if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
@@ -1045,7 +1045,9 @@ static int __net_init nfnl_log_net_init(struct net *net)
 
 static void __net_exit nfnl_log_net_exit(struct net *net)
 {
+#ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
+#endif
 }
 
 static struct pernet_operations nfnl_log_net_ops = {
index 2e0e835baf7273427e84c971d4701f122e9c4a40..5352b2d2d5bf644cffd04ed5a571924f623f65d5 100644 (file)
@@ -637,9 +637,6 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
        if (queue->copy_mode == NFQNL_COPY_NONE)
                return -EINVAL;
 
-       if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(entry->skb))
-               return __nfqnl_enqueue_packet(net, queue, entry);
-
        skb = entry->skb;
 
        switch (entry->pf) {
@@ -651,6 +648,9 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
                break;
        }
 
+       if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
+               return __nfqnl_enqueue_packet(net, queue, entry);
+
        nf_bridge_adjust_skb_data(skb);
        segs = skb_gso_segment(skb, 0);
        /* Does not use PTR_ERR to limit the number of error codes that can be
@@ -1285,7 +1285,9 @@ static int __net_init nfnl_queue_net_init(struct net *net)
 
 static void __net_exit nfnl_queue_net_exit(struct net *net)
 {
+#ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
+#endif
 }
 
 static struct pernet_operations nfnl_queue_net_ops = {
index fe573f6c9e9161e4201e6cbc8698c3b58b0583b2..5ab24843370a0b452e8647b4b0a99d54c96d6087 100644 (file)
@@ -466,7 +466,8 @@ log_packet_common(struct sbuff *m,
 
 
 static void
-ipt_log_packet(u_int8_t pf,
+ipt_log_packet(struct net *net,
+              u_int8_t pf,
               unsigned int hooknum,
               const struct sk_buff *skb,
               const struct net_device *in,
@@ -475,7 +476,6 @@ ipt_log_packet(u_int8_t pf,
               const char *prefix)
 {
        struct sbuff *m;
-       struct net *net = dev_net(in ? in : out);
 
        /* FIXME: Disabled from containers until syslog ns is supported */
        if (!net_eq(net, &init_net))
@@ -737,7 +737,7 @@ static void dump_ipv6_packet(struct sbuff *m,
                dump_sk_uid_gid(m, skb->sk);
 
        /* Max length: 16 "MARK=0xFFFFFFFF " */
-       if (!recurse && skb->mark)
+       if (recurse && skb->mark)
                sb_add(m, "MARK=0x%x ", skb->mark);
 }
 
@@ -797,7 +797,8 @@ static void dump_ipv6_mac_header(struct sbuff *m,
 }
 
 static void
-ip6t_log_packet(u_int8_t pf,
+ip6t_log_packet(struct net *net,
+               u_int8_t pf,
                unsigned int hooknum,
                const struct sk_buff *skb,
                const struct net_device *in,
@@ -806,7 +807,6 @@ ip6t_log_packet(u_int8_t pf,
                const char *prefix)
 {
        struct sbuff *m;
-       struct net *net = dev_net(in ? in : out);
 
        /* FIXME: Disabled from containers until syslog ns is supported */
        if (!net_eq(net, &init_net))
@@ -833,17 +833,18 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_log_info *loginfo = par->targinfo;
        struct nf_loginfo li;
+       struct net *net = dev_net(par->in ? par->in : par->out);
 
        li.type = NF_LOG_TYPE_LOG;
        li.u.log.level = loginfo->level;
        li.u.log.logflags = loginfo->logflags;
 
        if (par->family == NFPROTO_IPV4)
-               ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in,
+               ipt_log_packet(net, NFPROTO_IPV4, par->hooknum, skb, par->in,
                               par->out, &li, loginfo->prefix);
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        else if (par->family == NFPROTO_IPV6)
-               ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in,
+               ip6t_log_packet(net, NFPROTO_IPV6, par->hooknum, skb, par->in,
                                par->out, &li, loginfo->prefix);
 #endif
        else
index a17dd0f589b22d3ffce573177414c4afa0790c1d..fb7497c928a0158675e377fe7b1f3ba4c043a8a9 100644 (file)
@@ -26,13 +26,14 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_nflog_info *info = par->targinfo;
        struct nf_loginfo li;
+       struct net *net = dev_net(par->in ? par->in : par->out);
 
        li.type              = NF_LOG_TYPE_ULOG;
        li.u.ulog.copy_len   = info->len;
        li.u.ulog.group      = info->group;
        li.u.ulog.qthreshold = info->threshold;
 
-       nfulnl_log_packet(par->family, par->hooknum, skb, par->in,
+       nfulnl_log_packet(net, par->family, par->hooknum, skb, par->in,
                          par->out, &li, info->prefix);
        return XT_CONTINUE;
 }
index a75240f0d42b8da245035a8f8ee59b51895c445e..afaebc766933a70de1198ce66b987ccdb62be9bd 100644 (file)
@@ -125,6 +125,12 @@ tcpmss_mangle_packet(struct sk_buff *skb,
 
        skb_put(skb, TCPOLEN_MSS);
 
+       /* RFC 879 states that the default MSS is 536 without specific
+        * knowledge that the destination host is prepared to accept larger.
+        * Since no MSS was provided, we MUST NOT set a value > 536.
+        */
+       newmss = min(newmss, (u16)536);
+
        opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
        memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
 
index 25fd1c4e1eec3229e8629420e6750ebe74bb6bea..1eb1a44bfd3d134452993dfb071aa15481857ad7 100644 (file)
@@ -30,17 +30,28 @@ static inline unsigned int optlen(const u_int8_t *opt, unsigned int offset)
 
 static unsigned int
 tcpoptstrip_mangle_packet(struct sk_buff *skb,
-                         const struct xt_tcpoptstrip_target_info *info,
+                         const struct xt_action_param *par,
                          unsigned int tcphoff, unsigned int minlen)
 {
+       const struct xt_tcpoptstrip_target_info *info = par->targinfo;
        unsigned int optl, i, j;
        struct tcphdr *tcph;
        u_int16_t n, o;
        u_int8_t *opt;
+       int len;
+
+       /* This is a fragment, no TCP header is available */
+       if (par->fragoff != 0)
+               return XT_CONTINUE;
 
        if (!skb_make_writable(skb, skb->len))
                return NF_DROP;
 
+       len = skb->len - tcphoff;
+       if (len < (int)sizeof(struct tcphdr) ||
+           tcp_hdr(skb)->doff * 4 > len)
+               return NF_DROP;
+
        tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
        opt  = (u_int8_t *)tcph;
 
@@ -76,7 +87,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
 static unsigned int
 tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
-       return tcpoptstrip_mangle_packet(skb, par->targinfo, ip_hdrlen(skb),
+       return tcpoptstrip_mangle_packet(skb, par, ip_hdrlen(skb),
               sizeof(struct iphdr) + sizeof(struct tcphdr));
 }
 
@@ -94,7 +105,7 @@ tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        if (tcphoff < 0)
                return NF_DROP;
 
-       return tcpoptstrip_mangle_packet(skb, par->targinfo, tcphoff,
+       return tcpoptstrip_mangle_packet(skb, par, tcphoff,
               sizeof(*ipv6h) + sizeof(struct tcphdr));
 }
 #endif
index 49c5ff7f6dd67fdbec03b360323aa102ff59e37a..68ff29f608679f598f718e3018bdd8e516963c2a 100644 (file)
@@ -22,6 +22,7 @@
 #include <net/ip6_fib.h>
 #endif
 
+#include <linux/netfilter_ipv6.h>
 #include <linux/netfilter/xt_addrtype.h>
 #include <linux/netfilter/x_tables.h>
 
@@ -33,12 +34,12 @@ MODULE_ALIAS("ip6t_addrtype");
 
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
-                           const struct in6_addr *addr)
+                           const struct in6_addr *addr, u16 mask)
 {
        const struct nf_afinfo *afinfo;
        struct flowi6 flow;
        struct rt6_info *rt;
-       u32 ret;
+       u32 ret = 0;
        int route_err;
 
        memset(&flow, 0, sizeof(flow));
@@ -49,12 +50,19 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
        rcu_read_lock();
 
        afinfo = nf_get_afinfo(NFPROTO_IPV6);
-       if (afinfo != NULL)
+       if (afinfo != NULL) {
+               const struct nf_ipv6_ops *v6ops;
+
+               if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
+                       v6ops = nf_get_ipv6_ops();
+                       if (v6ops && v6ops->chk_addr(net, addr, dev, true))
+                               ret = XT_ADDRTYPE_LOCAL;
+               }
                route_err = afinfo->route(net, (struct dst_entry **)&rt,
-                                       flowi6_to_flowi(&flow), !!dev);
-       else
+                                         flowi6_to_flowi(&flow), false);
+       } else {
                route_err = 1;
-
+       }
        rcu_read_unlock();
 
        if (route_err)
@@ -62,15 +70,12 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
 
        if (rt->rt6i_flags & RTF_REJECT)
                ret = XT_ADDRTYPE_UNREACHABLE;
-       else
-               ret = 0;
 
-       if (rt->rt6i_flags & RTF_LOCAL)
+       if (dev == NULL && rt->rt6i_flags & RTF_LOCAL)
                ret |= XT_ADDRTYPE_LOCAL;
        if (rt->rt6i_flags & RTF_ANYCAST)
                ret |= XT_ADDRTYPE_ANYCAST;
 
-
        dst_release(&rt->dst);
        return ret;
 }
@@ -90,7 +95,7 @@ static bool match_type6(struct net *net, const struct net_device *dev,
 
        if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
             XT_ADDRTYPE_UNREACHABLE) & mask)
-               return !!(mask & match_lookup_rt6(net, dev, addr));
+               return !!(mask & match_lookup_rt6(net, dev, addr, mask));
        return true;
 }
 
index d8d42433755051023320299f919f69db9e58ec29..6bb1d42f0fac04c02a69ee1790d40baf7af53824 100644 (file)
@@ -245,6 +245,71 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
        }
 }
 
+/**
+ * netlbl_domhsh_validate - Validate a new domain mapping entry
+ * @entry: the entry to validate
+ *
+ * This function validates the new domain mapping entry to ensure that it is
+ * a valid entry.  Returns zero on success, negative values on failure.
+ *
+ */
+static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
+{
+       struct netlbl_af4list *iter4;
+       struct netlbl_domaddr4_map *map4;
+#if IS_ENABLED(CONFIG_IPV6)
+       struct netlbl_af6list *iter6;
+       struct netlbl_domaddr6_map *map6;
+#endif /* IPv6 */
+
+       if (entry == NULL)
+               return -EINVAL;
+
+       switch (entry->type) {
+       case NETLBL_NLTYPE_UNLABELED:
+               if (entry->type_def.cipsov4 != NULL ||
+                   entry->type_def.addrsel != NULL)
+                       return -EINVAL;
+               break;
+       case NETLBL_NLTYPE_CIPSOV4:
+               if (entry->type_def.cipsov4 == NULL)
+                       return -EINVAL;
+               break;
+       case NETLBL_NLTYPE_ADDRSELECT:
+               netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) {
+                       map4 = netlbl_domhsh_addr4_entry(iter4);
+                       switch (map4->type) {
+                       case NETLBL_NLTYPE_UNLABELED:
+                               if (map4->type_def.cipsov4 != NULL)
+                                       return -EINVAL;
+                               break;
+                       case NETLBL_NLTYPE_CIPSOV4:
+                               if (map4->type_def.cipsov4 == NULL)
+                                       return -EINVAL;
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+               }
+#if IS_ENABLED(CONFIG_IPV6)
+               netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
+                       map6 = netlbl_domhsh_addr6_entry(iter6);
+                       switch (map6->type) {
+                       case NETLBL_NLTYPE_UNLABELED:
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+               }
+#endif /* IPv6 */
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /*
  * Domain Hash Table Functions
  */
@@ -311,6 +376,10 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
        struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
 
+       ret_val = netlbl_domhsh_validate(entry);
+       if (ret_val != 0)
+               return ret_val;
+
        /* XXX - we can remove this RCU read lock as the spinlock protects the
         *       entire function, but before we do we need to fixup the
         *       netlbl_af[4,6]list RCU functions to do "the right thing" with
index 12ac6b47a35c45fe0abb5fce87ef07dfdf1ba1f7..57ee84d21470703b662f7320ed77bad518e5bede 100644 (file)
@@ -371,7 +371,7 @@ static int netlink_mmap(struct file *file, struct socket *sock,
        err = 0;
 out:
        mutex_unlock(&nlk->pg_vec_lock);
-       return 0;
+       return err;
 }
 
 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
@@ -747,7 +747,7 @@ static void netlink_skb_destructor(struct sk_buff *skb)
                atomic_dec(&ring->pending);
                sock_put(sk);
 
-               skb->data = NULL;
+               skb->head = NULL;
        }
 #endif
        if (skb->sk != NULL)
index 823463adbd21fe9c46b87fc20d1e2393e8d5e072..189e3c5b3d098a0d224a8fb2a04b1bd76e47b3ac 100644 (file)
@@ -231,14 +231,14 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
        }
        if (R_tab) {
                police->rate_present = true;
-               psched_ratecfg_precompute(&police->rate, R_tab->rate.rate);
+               psched_ratecfg_precompute(&police->rate, &R_tab->rate);
                qdisc_put_rtab(R_tab);
        } else {
                police->rate_present = false;
        }
        if (P_tab) {
                police->peak_present = true;
-               psched_ratecfg_precompute(&police->peak, P_tab->rate.rate);
+               psched_ratecfg_precompute(&police->peak, &P_tab->rate);
                qdisc_put_rtab(P_tab);
        } else {
                police->peak_present = false;
@@ -376,9 +376,9 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
        };
 
        if (police->rate_present)
-               opt.rate.rate = psched_ratecfg_getrate(&police->rate);
+               psched_ratecfg_getrate(&opt.rate, &police->rate);
        if (police->peak_present)
-               opt.peakrate.rate = psched_ratecfg_getrate(&police->peak);
+               psched_ratecfg_getrate(&opt.peakrate, &police->peak);
        if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
                goto nla_put_failure;
        if (police->tcfp_result &&
index 2b935e7cfe7b7bbb78c3cc914a39929f1db766a8..281c1bded1f60f94934e406c9cafe076e4e2706d 100644 (file)
@@ -291,17 +291,18 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
 {
        struct qdisc_rate_table *rtab;
 
+       if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
+           nla_len(tab) != TC_RTAB_SIZE)
+               return NULL;
+
        for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
-               if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
+               if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
+                   !memcmp(&rtab->data, nla_data(tab), 1024)) {
                        rtab->refcnt++;
                        return rtab;
                }
        }
 
-       if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
-           nla_len(tab) != TC_RTAB_SIZE)
-               return NULL;
-
        rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
        if (rtab) {
                rtab->rate = *r;
index eac7e0ee23c18708354e3cef8c237d289b81235f..20224086cc28abc30a63ef95964253fd2a55f183 100644 (file)
@@ -898,14 +898,16 @@ void dev_shutdown(struct net_device *dev)
        WARN_ON(timer_pending(&dev->watchdog_timer));
 }
 
-void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
+void psched_ratecfg_precompute(struct psched_ratecfg *r,
+                              const struct tc_ratespec *conf)
 {
        u64 factor;
        u64 mult;
        int shift;
 
-       r->rate_bps = (u64)rate << 3;
-       r->shift = 0;
+       memset(r, 0, sizeof(*r));
+       r->overhead = conf->overhead;
+       r->rate_bps = (u64)conf->rate << 3;
        r->mult = 1;
        /*
         * Calibrate mult, shift so that token counting is accurate
index 79b1876b6cd260fac7d150ceeaa7a5a647266a56..adaedd79389ccd0775a824c9e3c61aef2ea5355e 100644 (file)
@@ -109,7 +109,7 @@ struct htb_class {
        } un;
        struct rb_node node[TC_HTB_NUMPRIO];    /* node for self or feed tree */
        struct rb_node pq_node; /* node for event queue */
-       psched_time_t pq_key;
+       s64     pq_key;
 
        int prio_activity;      /* for which prios are we active */
        enum htb_cmode cmode;   /* current mode of the class */
@@ -121,10 +121,10 @@ struct htb_class {
        /* token bucket parameters */
        struct psched_ratecfg rate;
        struct psched_ratecfg ceil;
-       s64 buffer, cbuffer;    /* token bucket depth/rate */
-       psched_tdiff_t mbuffer; /* max wait time */
-       s64 tokens, ctokens;    /* current number of tokens */
-       psched_time_t t_c;      /* checkpoint time */
+       s64     buffer, cbuffer;        /* token bucket depth/rate */
+       s64     mbuffer;                /* max wait time */
+       s64     tokens, ctokens;        /* current number of tokens */
+       s64     t_c;                    /* checkpoint time */
 };
 
 struct htb_sched {
@@ -141,15 +141,15 @@ struct htb_sched {
        struct rb_root wait_pq[TC_HTB_MAXDEPTH];
 
        /* time of nearest event per level (row) */
-       psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
+       s64     near_ev_cache[TC_HTB_MAXDEPTH];
 
        int defcls;             /* class where unclassified flows go to */
 
        /* filters for qdisc itself */
        struct tcf_proto *filter_list;
 
-       int rate2quantum;       /* quant = rate / rate2quantum */
-       psched_time_t now;      /* cached dequeue time */
+       int     rate2quantum;   /* quant = rate / rate2quantum */
+       s64     now;    /* cached dequeue time */
        struct qdisc_watchdog watchdog;
 
        /* non shaped skbs; let them go directly thru */
@@ -664,8 +664,8 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
  * next pending event (0 for no event in pq, q->now for too many events).
  * Note: Applied are events whose have cl->pq_key <= q->now.
  */
-static psched_time_t htb_do_events(struct htb_sched *q, int level,
-                                  unsigned long start)
+static s64 htb_do_events(struct htb_sched *q, int level,
+                        unsigned long start)
 {
        /* don't run for longer than 2 jiffies; 2 is used instead of
         * 1 to simplify things when jiffy is going to be incremented
@@ -857,7 +857,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
        struct sk_buff *skb;
        struct htb_sched *q = qdisc_priv(sch);
        int level;
-       psched_time_t next_event;
+       s64 next_event;
        unsigned long start_at;
 
        /* try to dequeue direct packets as high prio (!) to minimize cpu work */
@@ -880,7 +880,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
        for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
                /* common case optimization - skip event handler quickly */
                int m;
-               psched_time_t event;
+               s64 event;
 
                if (q->now >= q->near_ev_cache[level]) {
                        event = htb_do_events(q, level, start_at);
@@ -1090,9 +1090,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
 
        memset(&opt, 0, sizeof(opt));
 
-       opt.rate.rate = psched_ratecfg_getrate(&cl->rate);
+       psched_ratecfg_getrate(&opt.rate, &cl->rate);
        opt.buffer = PSCHED_NS2TICKS(cl->buffer);
-       opt.ceil.rate = psched_ratecfg_getrate(&cl->ceil);
+       psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
        opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
        opt.quantum = cl->quantum;
        opt.prio = cl->prio;
@@ -1117,8 +1117,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
 
        if (!cl->level && cl->un.leaf.q)
                cl->qstats.qlen = cl->un.leaf.q->q.qlen;
-       cl->xstats.tokens = cl->tokens;
-       cl->xstats.ctokens = cl->ctokens;
+       cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
+       cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
 
        if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
@@ -1200,7 +1200,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
        parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
        parent->tokens = parent->buffer;
        parent->ctokens = parent->cbuffer;
-       parent->t_c = psched_get_time();
+       parent->t_c = ktime_to_ns(ktime_get());
        parent->cmode = HTB_CAN_SEND;
 }
 
@@ -1417,8 +1417,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                /* set class to be in HTB_CAN_SEND state */
                cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
                cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
-               cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC;        /* 1min */
-               cl->t_c = psched_get_time();
+               cl->mbuffer = 60ULL * NSEC_PER_SEC;     /* 1min */
+               cl->t_c = ktime_to_ns(ktime_get());
                cl->cmode = HTB_CAN_SEND;
 
                /* attach to the hash list and parent's family */
@@ -1459,8 +1459,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                        cl->prio = TC_HTB_NUMPRIO - 1;
        }
 
-       psched_ratecfg_precompute(&cl->rate, hopt->rate.rate);
-       psched_ratecfg_precompute(&cl->ceil, hopt->ceil.rate);
+       psched_ratecfg_precompute(&cl->rate, &hopt->rate);
+       psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
 
        cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
        cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);
index c8388f3c3426ab862414d1ce67e5ec3e9d13ecde..e478d316602b1b9ca94100369e878f0a315a72cd 100644 (file)
@@ -298,9 +298,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
        q->tokens = q->buffer;
        q->ptokens = q->mtu;
 
-       psched_ratecfg_precompute(&q->rate, rtab->rate.rate);
+       psched_ratecfg_precompute(&q->rate, &rtab->rate);
        if (ptab) {
-               psched_ratecfg_precompute(&q->peak, ptab->rate.rate);
+               psched_ratecfg_precompute(&q->peak, &ptab->rate);
                q->peak_present = true;
        } else {
                q->peak_present = false;
@@ -350,9 +350,9 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
                goto nla_put_failure;
 
        opt.limit = q->limit;
-       opt.rate.rate = psched_ratecfg_getrate(&q->rate);
+       psched_ratecfg_getrate(&opt.rate, &q->rate);
        if (q->peak_present)
-               opt.peakrate.rate = psched_ratecfg_getrate(&q->peak);
+               psched_ratecfg_getrate(&opt.peakrate, &q->peak);
        else
                memset(&opt.peakrate, 0, sizeof(opt.peakrate));
        opt.mtu = PSCHED_NS2TICKS(q->mtu);
index f631c5ff4dbfd84426d140920b15650972f34646..6abb1caf9836fe25829ac6c26eaf7da76b408520 100644 (file)
@@ -4003,6 +4003,12 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
 
        /* Release our hold on the endpoint. */
        sp = sctp_sk(sk);
+       /* This could happen during socket init, thus we bail out
+        * early, since the rest of the below is not setup either.
+        */
+       if (sp->ep == NULL)
+               return;
+
        if (sp->do_auto_asconf) {
                sp->do_auto_asconf = 0;
                list_del(&sp->auto_asconf_list);
index b416093997dafcf528204be9c99a9986dfe7965d..4ca1526db7562ff7a1ae189a5b51bd6487c045ed 100644 (file)
@@ -1956,7 +1956,7 @@ struct used_address {
        unsigned int name_len;
 };
 
-static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
                         struct msghdr *msg_sys, unsigned int flags,
                         struct used_address *used_address)
 {
@@ -2071,22 +2071,30 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
  *     BSD sendmsg interface
  */
 
-SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
+long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
 {
        int fput_needed, err;
        struct msghdr msg_sys;
-       struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
+       struct socket *sock;
 
+       sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;
 
-       err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
+       err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
 
        fput_light(sock->file, fput_needed);
 out:
        return err;
 }
 
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
+{
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
+       return __sys_sendmsg(fd, msg, flags);
+}
+
 /*
  *     Linux sendmmsg interface
  */
@@ -2117,15 +2125,16 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
 
        while (datagrams < vlen) {
                if (MSG_CMSG_COMPAT & flags) {
-                       err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
-                                           &msg_sys, flags, &used_address);
+                       err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
+                                            &msg_sys, flags, &used_address);
                        if (err < 0)
                                break;
                        err = __put_user(err, &compat_entry->msg_len);
                        ++compat_entry;
                } else {
-                       err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
-                                           &msg_sys, flags, &used_address);
+                       err = ___sys_sendmsg(sock,
+                                            (struct msghdr __user *)entry,
+                                            &msg_sys, flags, &used_address);
                        if (err < 0)
                                break;
                        err = put_user(err, &entry->msg_len);
@@ -2149,10 +2158,12 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
 SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
                unsigned int, vlen, unsigned int, flags)
 {
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
        return __sys_sendmmsg(fd, mmsg, vlen, flags);
 }
 
-static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
                         struct msghdr *msg_sys, unsigned int flags, int nosec)
 {
        struct compat_msghdr __user *msg_compat =
@@ -2244,23 +2255,31 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
  *     BSD recvmsg interface
  */
 
-SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
-               unsigned int, flags)
+long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags)
 {
        int fput_needed, err;
        struct msghdr msg_sys;
-       struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
+       struct socket *sock;
 
+       sock = sockfd_lookup_light(fd, &err, &fput_needed);
        if (!sock)
                goto out;
 
-       err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0);
+       err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
 
        fput_light(sock->file, fput_needed);
 out:
        return err;
 }
 
+SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+               unsigned int, flags)
+{
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
+       return __sys_recvmsg(fd, msg, flags);
+}
+
 /*
  *     Linux recvmmsg interface
  */
@@ -2298,17 +2317,18 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
                 * No need to ask LSM for more than the first datagram.
                 */
                if (MSG_CMSG_COMPAT & flags) {
-                       err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
-                                           &msg_sys, flags & ~MSG_WAITFORONE,
-                                           datagrams);
+                       err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
+                                            &msg_sys, flags & ~MSG_WAITFORONE,
+                                            datagrams);
                        if (err < 0)
                                break;
                        err = __put_user(err, &compat_entry->msg_len);
                        ++compat_entry;
                } else {
-                       err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
-                                           &msg_sys, flags & ~MSG_WAITFORONE,
-                                           datagrams);
+                       err = ___sys_recvmsg(sock,
+                                            (struct msghdr __user *)entry,
+                                            &msg_sys, flags & ~MSG_WAITFORONE,
+                                            datagrams);
                        if (err < 0)
                                break;
                        err = put_user(err, &entry->msg_len);
@@ -2375,6 +2395,9 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
        int datagrams;
        struct timespec timeout_sys;
 
+       if (flags & MSG_CMSG_COMPAT)
+               return -EINVAL;
+
        if (!timeout)
                return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL);
 
@@ -2412,7 +2435,7 @@ static const unsigned char nargs[21] = {
 
 SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
 {
-       unsigned long a[6];
+       unsigned long a[AUDITSC_ARGS];
        unsigned long a0, a1;
        int err;
        unsigned int len;
@@ -2428,7 +2451,9 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
        if (copy_from_user(a, args, len))
                return -EFAULT;
 
-       audit_socketcall(nargs[call] / sizeof(unsigned long), a);
+       err = audit_socketcall(nargs[call] / sizeof(unsigned long), a);
+       if (err)
+               return err;
 
        a0 = a[0];
        a1 = a[1];
index 7da6b457f66abfab016fd8b21aeedcb14d5e7ff0..fc2f78d6a9b46fae51a3ba366bc50c23e9238101 100644 (file)
@@ -52,6 +52,8 @@
 #include <linux/sunrpc/gss_api.h>
 #include <asm/uaccess.h>
 
+#include "../netns.h"
+
 static const struct rpc_authops authgss_ops;
 
 static const struct rpc_credops gss_credops;
@@ -85,8 +87,6 @@ struct gss_auth {
 };
 
 /* pipe_version >= 0 if and only if someone has a pipe open. */
-static int pipe_version = -1;
-static atomic_t pipe_users = ATOMIC_INIT(0);
 static DEFINE_SPINLOCK(pipe_version_lock);
 static struct rpc_wait_queue pipe_version_rpc_waitqueue;
 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
@@ -266,24 +266,27 @@ struct gss_upcall_msg {
        char databuf[UPCALL_BUF_LEN];
 };
 
-static int get_pipe_version(void)
+static int get_pipe_version(struct net *net)
 {
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        int ret;
 
        spin_lock(&pipe_version_lock);
-       if (pipe_version >= 0) {
-               atomic_inc(&pipe_users);
-               ret = pipe_version;
+       if (sn->pipe_version >= 0) {
+               atomic_inc(&sn->pipe_users);
+               ret = sn->pipe_version;
        } else
                ret = -EAGAIN;
        spin_unlock(&pipe_version_lock);
        return ret;
 }
 
-static void put_pipe_version(void)
+static void put_pipe_version(struct net *net)
 {
-       if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) {
-               pipe_version = -1;
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+       if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
+               sn->pipe_version = -1;
                spin_unlock(&pipe_version_lock);
        }
 }
@@ -291,9 +294,10 @@ static void put_pipe_version(void)
 static void
 gss_release_msg(struct gss_upcall_msg *gss_msg)
 {
+       struct net *net = rpc_net_ns(gss_msg->auth->client);
        if (!atomic_dec_and_test(&gss_msg->count))
                return;
-       put_pipe_version();
+       put_pipe_version(net);
        BUG_ON(!list_empty(&gss_msg->list));
        if (gss_msg->ctx != NULL)
                gss_put_ctx(gss_msg->ctx);
@@ -439,7 +443,10 @@ static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
                                struct rpc_clnt *clnt,
                                const char *service_name)
 {
-       if (pipe_version == 0)
+       struct net *net = rpc_net_ns(clnt);
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+       if (sn->pipe_version == 0)
                gss_encode_v0_msg(gss_msg);
        else /* pipe_version == 1 */
                gss_encode_v1_msg(gss_msg, clnt, service_name);
@@ -455,7 +462,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt,
        gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
        if (gss_msg == NULL)
                return ERR_PTR(-ENOMEM);
-       vers = get_pipe_version();
+       vers = get_pipe_version(rpc_net_ns(clnt));
        if (vers < 0) {
                kfree(gss_msg);
                return ERR_PTR(vers);
@@ -559,24 +566,34 @@ gss_refresh_upcall(struct rpc_task *task)
 static inline int
 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
 {
+       struct net *net = rpc_net_ns(gss_auth->client);
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        struct rpc_pipe *pipe;
        struct rpc_cred *cred = &gss_cred->gc_base;
        struct gss_upcall_msg *gss_msg;
+       unsigned long timeout;
        DEFINE_WAIT(wait);
-       int err = 0;
+       int err;
 
        dprintk("RPC:       %s for uid %u\n",
                __func__, from_kuid(&init_user_ns, cred->cr_uid));
 retry:
+       err = 0;
+       /* Default timeout is 15s unless we know that gssd is not running */
+       timeout = 15 * HZ;
+       if (!sn->gssd_running)
+               timeout = HZ >> 2;
        gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
        if (PTR_ERR(gss_msg) == -EAGAIN) {
                err = wait_event_interruptible_timeout(pipe_version_waitqueue,
-                               pipe_version >= 0, 15*HZ);
-               if (pipe_version < 0) {
+                               sn->pipe_version >= 0, timeout);
+               if (sn->pipe_version < 0) {
+                       if (err == 0)
+                               sn->gssd_running = 0;
                        warn_gssd();
                        err = -EACCES;
                }
-               if (err)
+               if (err < 0)
                        goto out;
                goto retry;
        }
@@ -707,20 +724,22 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
 
 static int gss_pipe_open(struct inode *inode, int new_version)
 {
+       struct net *net = inode->i_sb->s_fs_info;
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        int ret = 0;
 
        spin_lock(&pipe_version_lock);
-       if (pipe_version < 0) {
+       if (sn->pipe_version < 0) {
                /* First open of any gss pipe determines the version: */
-               pipe_version = new_version;
+               sn->pipe_version = new_version;
                rpc_wake_up(&pipe_version_rpc_waitqueue);
                wake_up(&pipe_version_waitqueue);
-       } else if (pipe_version != new_version) {
+       } else if (sn->pipe_version != new_version) {
                /* Trying to open a pipe of a different version */
                ret = -EBUSY;
                goto out;
        }
-       atomic_inc(&pipe_users);
+       atomic_inc(&sn->pipe_users);
 out:
        spin_unlock(&pipe_version_lock);
        return ret;
@@ -740,6 +759,7 @@ static int gss_pipe_open_v1(struct inode *inode)
 static void
 gss_pipe_release(struct inode *inode)
 {
+       struct net *net = inode->i_sb->s_fs_info;
        struct rpc_pipe *pipe = RPC_I(inode)->pipe;
        struct gss_upcall_msg *gss_msg;
 
@@ -758,7 +778,7 @@ gss_pipe_release(struct inode *inode)
        }
        spin_unlock(&pipe->lock);
 
-       put_pipe_version();
+       put_pipe_version(net);
 }
 
 static void
index 5c4c61d527e2dbc1752e1d2ddbfc04417e4fc286..357f613df7ff49d3460e37d3f338cb89c7029a69 100644 (file)
 #include <linux/sunrpc/svcauth.h>
 #include "gss_rpc_xdr.h"
 
-static bool gssx_check_pointer(struct xdr_stream *xdr)
-{
-       __be32 *p;
-
-       p = xdr_reserve_space(xdr, 4);
-       if (unlikely(p == NULL))
-               return -ENOSPC;
-       return *p?true:false;
-}
-
 static int gssx_enc_bool(struct xdr_stream *xdr, int v)
 {
        __be32 *p;
@@ -264,25 +254,27 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
        if (unlikely(p == NULL))
                return -ENOSPC;
        count = be32_to_cpup(p++);
-       if (count != 0) {
-               /* we recognize only 1 currently: CREDS_VALUE */
-               oa->count = 1;
+       if (!count)
+               return 0;
 
-               oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL);
-               if (!oa->data)
-                       return -ENOMEM;
+       /* we recognize only 1 currently: CREDS_VALUE */
+       oa->count = 1;
 
-               creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
-               if (!creds) {
-                       kfree(oa->data);
-                       return -ENOMEM;
-               }
+       oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL);
+       if (!oa->data)
+               return -ENOMEM;
 
-               oa->data[0].option.data = CREDS_VALUE;
-               oa->data[0].option.len = sizeof(CREDS_VALUE);
-               oa->data[0].value.data = (void *)creds;
-               oa->data[0].value.len = 0;
+       creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
+       if (!creds) {
+               kfree(oa->data);
+               return -ENOMEM;
        }
+
+       oa->data[0].option.data = CREDS_VALUE;
+       oa->data[0].option.len = sizeof(CREDS_VALUE);
+       oa->data[0].value.data = (void *)creds;
+       oa->data[0].value.len = 0;
+
        for (i = 0; i < count; i++) {
                gssx_buffer dummy = { 0, NULL };
                u32 length;
@@ -800,6 +792,7 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
                                struct xdr_stream *xdr,
                                struct gssx_res_accept_sec_context *res)
 {
+       u32 value_follows;
        int err;
 
        /* res->status */
@@ -808,7 +801,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
                return err;
 
        /* res->context_handle */
-       if (gssx_check_pointer(xdr)) {
+       err = gssx_dec_bool(xdr, &value_follows);
+       if (err)
+               return err;
+       if (value_follows) {
                err = gssx_dec_ctx(xdr, res->context_handle);
                if (err)
                        return err;
@@ -817,7 +813,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
        }
 
        /* res->output_token */
-       if (gssx_check_pointer(xdr)) {
+       err = gssx_dec_bool(xdr, &value_follows);
+       if (err)
+               return err;
+       if (value_follows) {
                err = gssx_dec_buffer(xdr, res->output_token);
                if (err)
                        return err;
@@ -826,7 +825,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
        }
 
        /* res->delegated_cred_handle */
-       if (gssx_check_pointer(xdr)) {
+       err = gssx_dec_bool(xdr, &value_follows);
+       if (err)
+               return err;
+       if (value_follows) {
                /* we do not support upcall servers sending this data. */
                return -EINVAL;
        }
index 871c73c921654b14b14eface6b4f2cfe0f27d5d2..29b4ba93ab3cb795fb8dd307f6e248bfcc3a1495 100644 (file)
@@ -1287,7 +1287,7 @@ static bool use_gss_proxy(struct net *net)
 
 #ifdef CONFIG_PROC_FS
 
-static bool set_gss_proxy(struct net *net, int type)
+static int set_gss_proxy(struct net *net, int type)
 {
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        int ret = 0;
@@ -1317,10 +1317,12 @@ static inline bool gssp_ready(struct sunrpc_net *sn)
        return false;
 }
 
-static int wait_for_gss_proxy(struct net *net)
+static int wait_for_gss_proxy(struct net *net, struct file *file)
 {
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 
+       if (file->f_flags & O_NONBLOCK && !gssp_ready(sn))
+               return -EAGAIN;
        return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn));
 }
 
@@ -1362,7 +1364,7 @@ static ssize_t read_gssp(struct file *file, char __user *buf,
        size_t len;
        int ret;
 
-       ret = wait_for_gss_proxy(net);
+       ret = wait_for_gss_proxy(net, file);
        if (ret)
                return ret;
 
index 7111a4c9113baceef38042da1ff41a100ff58b21..74d948f5d5a1d399d13061ee5c910c2cacc6f720 100644 (file)
@@ -28,7 +28,11 @@ struct sunrpc_net {
        wait_queue_head_t gssp_wq;
        struct rpc_clnt *gssp_clnt;
        int use_gss_proxy;
+       int pipe_version;
+       atomic_t pipe_users;
        struct proc_dir_entry *use_gssp_proc;
+
+       unsigned int gssd_running;
 };
 
 extern int sunrpc_net_id;
index a9129f8d70706f5e33ac8f39cb3ea8a3e5825ca2..e7ce4b3eb0bdde4f209ba2cdf6cd1e3dbcca03f9 100644 (file)
@@ -216,11 +216,14 @@ rpc_destroy_inode(struct inode *inode)
 static int
 rpc_pipe_open(struct inode *inode, struct file *filp)
 {
+       struct net *net = inode->i_sb->s_fs_info;
+       struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
        struct rpc_pipe *pipe;
        int first_open;
        int res = -ENXIO;
 
        mutex_lock(&inode->i_mutex);
+       sn->gssd_running = 1;
        pipe = RPC_I(inode)->pipe;
        if (pipe == NULL)
                goto out;
@@ -1069,6 +1072,8 @@ void rpc_pipefs_init_net(struct net *net)
        struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 
        mutex_init(&sn->pipefs_sb_lock);
+       sn->gssd_running = 1;
+       sn->pipe_version = -1;
 }
 
 /*
index f8529fc8e54275c5b7b9809f0219f20608ffb472..5356b120dbf8e2fe61ba88081d1fab1941f135a9 100644 (file)
@@ -324,11 +324,17 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
  * Note: If the task is ASYNC, and is being made runnable after sitting on an
  * rpc_wait_queue, this must be called with the queue spinlock held to protect
  * the wait queue operation.
+ * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
+ * which is needed to ensure that __rpc_execute() doesn't loop (due to the
+ * lockless RPC_IS_QUEUED() test) before we've had a chance to test
+ * the RPC_TASK_RUNNING flag.
  */
 static void rpc_make_runnable(struct rpc_task *task)
 {
+       bool need_wakeup = !rpc_test_and_set_running(task);
+
        rpc_clear_queued(task);
-       if (rpc_test_and_set_running(task))
+       if (!need_wakeup)
                return;
        if (RPC_IS_ASYNC(task)) {
                INIT_WORK(&task->u.tk_work, rpc_async_schedule);
index c3f9e1ef7f531857f432993896d13d0ae8442876..06bdf5a1082c850030650fc3957f47ef75ebda8a 100644 (file)
@@ -810,11 +810,15 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
                goto badcred;
        argv->iov_base = (void*)((__be32*)argv->iov_base + slen);       /* skip machname */
        argv->iov_len -= slen*4;
-
+       /*
+        * Note: we skip uid_valid()/gid_valid() checks here for
+        * backwards compatibility with clients that use -1 id's.
+        * Instead, -1 uid or gid is later mapped to the
+        * (export-specific) anonymous id by nfsd_setuser.
+        * Supplementary gid's will be left alone.
+        */
        cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */
        cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */
-       if (!uid_valid(cred->cr_uid) || !gid_valid(cred->cr_gid))
-               goto badcred;
        slen = svc_getnl(argv);                 /* gids length */
        if (slen > 16 || (len -= (slen + 2)*4) < 0)
                goto badcred;
@@ -823,8 +827,6 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
                return SVC_CLOSE;
        for (i = 0; i < slen; i++) {
                kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
-               if (!gid_valid(kgid))
-                       goto badcred;
                GROUP_AT(cred->cr_group_info, i) = kgid;
        }
        if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
index bcfda8921b5bf944d38ac3e087eb1cccfac327fe..0cf003dfa8fcd3d4f2f974f21649b164f76429b3 100644 (file)
@@ -64,6 +64,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
 
                if (unlikely(x->km.state != XFRM_STATE_VALID)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
+                       err = -EINVAL;
                        goto error;
                }
 
index 23cea0f74336c72b332e909994e242d216e5e2ae..ea970b8002a20a614a5bc9fe89afa3c39b131180 100644 (file)
@@ -2557,11 +2557,12 @@ static void __xfrm_garbage_collect(struct net *net)
        }
 }
 
-static void xfrm_garbage_collect(struct net *net)
+void xfrm_garbage_collect(struct net *net)
 {
        flow_cache_flush();
        __xfrm_garbage_collect(net);
 }
+EXPORT_SYMBOL(xfrm_garbage_collect);
 
 static void xfrm_garbage_collect_deferred(struct net *net)
 {
index aa778748c56592728866ce24096bf46447c2d13c..3f565e495ac68cea83e1d52cf7db7e820fe777ad 100644 (file)
@@ -1681,6 +1681,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
 
 out:
        xfrm_pol_put(xp);
+       if (delete && err == 0)
+               xfrm_garbage_collect(net);
        return err;
 }
 
index 51bb3de680b67b78071ae7f9dcc03ae387c52b60..8337663aa298a0259d6c08ae93a7b7581d2691a5 100644 (file)
@@ -264,7 +264,7 @@ $(obj)/%.dtb.S: $(obj)/%.dtb
 quiet_cmd_dtc = DTC     $@
 cmd_dtc = $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
        $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 \
-               -i $(srctree)/arch/$(SRCARCH)/boot/dts $(DTC_FLAGS) \
+               -i $(dir $<) $(DTC_FLAGS) \
                -d $(depfile).dtc $(dtc-tmp) ; \
        cat $(depfile).pre $(depfile).dtc > $(depfile)
 
index bb4d3deb6d1c0add7815115a14a574212c0f3d90..a65ecbbdd32a90fbadf50aa9622ce555294cf350 100755 (executable)
@@ -105,7 +105,7 @@ while [ "$1" != "" ] ; do
                ;;
        --refresh)
                ;;
-       --*-after)
+       --*-after|-E|-D|-M)
                checkarg "$1"
                A=$ARG
                checkarg "$2"
index 48d382e7e3746bebd850114d4f7557dbc37c49db..38cd69c5660e5163bfc402617de0d702e3372bd6 100644 (file)
@@ -303,10 +303,11 @@ int dialog_menu(const char *title, const char *prompt,
                                }
                }
 
-               if (i < max_choice ||
-                   key == KEY_UP || key == KEY_DOWN ||
-                   key == '-' || key == '+' ||
-                   key == KEY_PPAGE || key == KEY_NPAGE) {
+               if (item_count() != 0 &&
+                   (i < max_choice ||
+                    key == KEY_UP || key == KEY_DOWN ||
+                    key == '-' || key == '+' ||
+                    key == KEY_PPAGE || key == KEY_NPAGE)) {
                        /* Remove highligt of current item */
                        print_item(scroll + choice, choice, FALSE);
 
index 387dc8daf7b2d43ff96b735f6bd60b4c650f85a8..a69cbd78fb38e62f3f6c3978ef6a3935551aaabc 100644 (file)
@@ -670,11 +670,12 @@ static void conf(struct menu *menu, struct menu *active_menu)
                                  active_menu, &s_scroll);
                if (res == 1 || res == KEY_ESC || res == -ERRDISPLAYTOOSMALL)
                        break;
-               if (!item_activate_selected())
-                       continue;
-               if (!item_tag())
-                       continue;
-
+               if (item_count() != 0) {
+                       if (!item_activate_selected())
+                               continue;
+                       if (!item_tag())
+                               continue;
+               }
                submenu = item_data();
                active_menu = item_data();
                if (submenu)
index b5c7d90df9df801dac0ca12d64b609e686e74b62..fd3f0180e08fbafb537e128c9e46641288c68774 100644 (file)
@@ -146,11 +146,24 @@ struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *e
                        struct menu *menu = current_entry;
 
                        while ((menu = menu->parent) != NULL) {
+                               struct expr *dup_expr;
+
                                if (!menu->visibility)
                                        continue;
+                               /*
+                                * Do not add a reference to the
+                                * menu's visibility expression but
+                                * use a copy of it.  Otherwise the
+                                * expression reduction functions
+                                * will modify expressions that have
+                                * multiple references which can
+                                * cause unwanted side effects.
+                                */
+                               dup_expr = expr_copy(menu->visibility);
+
                                prop->visible.expr
                                        = expr_alloc_and(prop->visible.expr,
-                                                        menu->visibility);
+                                                        dup_expr);
                        }
                }
 
index 84a406070f6f2b3f7ca48998bdce1a7d1ad7c3b4..a4f31c900fa6a44ab75d1a4f3916d2bb1075c448 100644 (file)
@@ -63,7 +63,7 @@ binrpm-pkg: FORCE
        mv -f $(objtree)/.tmp_version $(objtree)/.version
 
        $(RPM) $(RPMOPTS) --define "_builddir $(objtree)" --target \
-               $(UTS_MACHINE) -bb $<
+               $(UTS_MACHINE) -bb $(objtree)/binkernel.spec
        rm binkernel.spec
 
 # Deb target
index 8ab2951545170e6e81fb71fc7ee1e8cad5c2dd72..d030818862146732ebe30c8cc3f266d485ef0677 100644 (file)
@@ -316,6 +316,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
 
                memcpy(new_ctx, old_ctx, sizeof(*new_ctx));
                memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len);
+               atomic_inc(&selinux_xfrm_refcount);
                *new_ctxp = new_ctx;
        }
        return 0;
@@ -326,6 +327,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
  */
 void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
 {
+       atomic_dec(&selinux_xfrm_refcount);
        kfree(ctx);
 }
 
@@ -335,17 +337,13 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
 int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
 {
        const struct task_security_struct *tsec = current_security();
-       int rc = 0;
 
-       if (ctx) {
-               rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
-                                 SECCLASS_ASSOCIATION,
-                                 ASSOCIATION__SETCONTEXT, NULL);
-               if (rc == 0)
-                       atomic_dec(&selinux_xfrm_refcount);
-       }
+       if (!ctx)
+               return 0;
 
-       return rc;
+       return avc_has_perm(tsec->sid, ctx->ctx_sid,
+                           SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+                           NULL);
 }
 
 /*
@@ -370,8 +368,8 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct
  */
 void selinux_xfrm_state_free(struct xfrm_state *x)
 {
-       struct xfrm_sec_ctx *ctx = x->security;
-       kfree(ctx);
+       atomic_dec(&selinux_xfrm_refcount);
+       kfree(x->security);
 }
 
  /*
@@ -381,17 +379,13 @@ int selinux_xfrm_state_delete(struct xfrm_state *x)
 {
        const struct task_security_struct *tsec = current_security();
        struct xfrm_sec_ctx *ctx = x->security;
-       int rc = 0;
 
-       if (ctx) {
-               rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
-                                 SECCLASS_ASSOCIATION,
-                                 ASSOCIATION__SETCONTEXT, NULL);
-               if (rc == 0)
-                       atomic_dec(&selinux_xfrm_refcount);
-       }
+       if (!ctx)
+               return 0;
 
-       return rc;
+       return avc_has_perm(tsec->sid, ctx->ctx_sid,
+                           SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+                           NULL);
 }
 
 /*
index 552b97afbca5346a96ca6187602ba0326ce4ef28..61ab640e195f3cbf6ee30d574116c797eead5a99 100644 (file)
@@ -113,6 +113,7 @@ MODULE_ALIAS("sound-layout-100");
 MODULE_ALIAS("aoa-device-id-14");
 MODULE_ALIAS("aoa-device-id-22");
 MODULE_ALIAS("aoa-device-id-35");
+MODULE_ALIAS("aoa-device-id-44");
 
 /* onyx with all but microphone connected */
 static struct codec_connection onyx_connections_nomic[] = {
@@ -361,6 +362,13 @@ static struct layout layouts[] = {
                .connections = tas_connections_nolineout,
          },
        },
+       /* PowerBook6,5 */
+       { .device_id = 44,
+         .codecs[0] = {
+               .name = "tas",
+               .connections = tas_connections_all,
+         },
+       },
        /* PowerBook6,7 */
        { .layout_id = 80,
          .codecs[0] = {
index 010658335881cf133b918d2159817267c375a522..15e76131b5015dd88cccf93ea08837694b1f03c7 100644 (file)
@@ -200,7 +200,8 @@ static int i2sbus_add_dev(struct macio_dev *macio,
                         * We probably cannot handle all device-id machines,
                         * so restrict to those we do handle for now.
                         */
-                       if (id && (*id == 22 || *id == 14 || *id == 35)) {
+                       if (id && (*id == 22 || *id == 14 || *id == 35 ||
+                                  *id == 44)) {
                                snprintf(dev->sound.modalias, 32,
                                         "aoa-device-id-%d", *id);
                                ok = 1;
index 071ce1b5f2b40bb7b6d1558ee28f42fc1a99dd96..872d59e35ee23583cc60a2be24afa5ba1bf3c88d 100644 (file)
@@ -583,8 +583,6 @@ static int atmel_abdac_remove(struct platform_device *pdev)
        free_irq(dac->irq, dac);
        snd_card_free(card);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 6b7e2b5a72dead0fe1e542fa47460e3184929a0d..ae63d22c0f883e48ddb0b37ee6aad40ac6f8943c 100644 (file)
@@ -1199,8 +1199,6 @@ static int atmel_ac97c_remove(struct platform_device *pdev)
        snd_card_set_dev(card, NULL);
        snd_card_free(card);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 7420c59444ab41c442cb774be05f869019c5fc86..2b7f6e8bdd24343bd41ce854bb93383815386d6b 100644 (file)
@@ -922,7 +922,6 @@ static int hal2_remove(struct platform_device *pdev)
        struct snd_card *card = platform_get_drvdata(pdev);
 
        snd_card_free(card);
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 01a03efdc8b042059668e24274ac25799af63029..cfe99ae149fed9f55d70dc10f23e12e59fa4ee34 100644 (file)
@@ -963,7 +963,6 @@ static int snd_sgio2audio_remove(struct platform_device *pdev)
        struct snd_card *card = platform_get_drvdata(pdev);
 
        snd_card_free(card);
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 5849b129e50d2c73802f53369bbae3776cf4b08d..1a9640254433e22ce61264a006fd6d52db0aad97 100644 (file)
@@ -250,6 +250,7 @@ config MSND_FIFOSIZE
 menuconfig SOUND_OSS
        tristate "OSS sound modules"
        depends on ISA_DMA_API && VIRT_TO_BUS
+       depends on !GENERIC_ISA_DMA_SUPPORT_BROKEN
        help
          OSS is the Open Sound System suite of sound card drivers.  They make
          sound programming easier since they provide a common API.  Say Y or
index 6f9b64700f6e48ec859bb7153a5bc119812062b4..55108b5fb2919c597177d9dc5f28b56cb34d28a2 100644 (file)
@@ -681,6 +681,9 @@ int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
        struct hda_bus_unsolicited *unsol;
        unsigned int wp;
 
+       if (!bus || !bus->workq)
+               return 0;
+
        trace_hda_unsol_event(bus, res, res_ex);
        unsol = bus->unsol;
        if (!unsol)
@@ -1580,7 +1583,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
                    "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n",
                    nid, stream_tag, channel_id, format);
        p = get_hda_cvt_setup(codec, nid);
-       if (!p || p->active)
+       if (!p)
                return;
 
        if (codec->pcm_format_first)
@@ -1627,7 +1630,7 @@ void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid,
 
        snd_printdd("hda_codec_cleanup_stream: NID=0x%x\n", nid);
        p = get_hda_cvt_setup(codec, nid);
-       if (p && p->active) {
+       if (p) {
                /* here we just clear the active flag when do_now isn't set;
                 * actual clean-ups will be done later in
                 * purify_inactive_streams() called from snd_hda_codec_prpapre()
index ac079f93c5354ae576eca7745047883e4b0d051c..ae85bbd2e6f842a8fcc0feac57ecc709cfd4b6b3 100644 (file)
@@ -606,6 +606,10 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid,
        return false;
 }
 
+/* check whether the NID is referred by any active paths */
+#define is_active_nid_for_any(codec, nid) \
+       is_active_nid(codec, nid, HDA_OUTPUT, 0)
+
 /* get the default amp value for the target state */
 static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
                                   int dir, unsigned int caps, bool enable)
@@ -759,7 +763,8 @@ static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path)
 
        for (i = 0; i < path->depth; i++) {
                hda_nid_t nid = path->path[i];
-               if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3)) {
+               if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3) &&
+                   !is_active_nid_for_any(codec, nid)) {
                        snd_hda_codec_write(codec, nid, 0,
                                            AC_VERB_SET_POWER_STATE,
                                            AC_PWRST_D3);
@@ -4157,7 +4162,7 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
                return power_state;
        if (get_wcaps_type(get_wcaps(codec, nid)) >= AC_WID_POWER)
                return power_state;
-       if (is_active_nid(codec, nid, HDA_OUTPUT, 0))
+       if (is_active_nid_for_any(codec, nid))
                return power_state;
        return AC_PWRST_D3;
 }
index 7b213d589ef654ba27b7056a4dd77b5b091de49c..de18722c487346858783fa5d5e9f9fd574b70b39 100644 (file)
@@ -615,7 +615,7 @@ enum {
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
        (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\
-        AZX_DCAPS_ALIGN_BUFSIZE)
+        AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT)
 
 #define AZX_DCAPS_PRESET_CTHDA \
        (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
index 84b81c874a4a10413eaa807443b27330117463e5..b314d3e6d7fae5d0a576ccbe14eb45c54dfde2f9 100644 (file)
@@ -64,6 +64,7 @@ struct conexant_spec {
        /* extra EAPD pins */
        unsigned int num_eapds;
        hda_nid_t eapds[4];
+       bool dynamic_eapd;
 
 #ifdef ENABLE_CXT_STATIC_QUIRKS
        const struct snd_kcontrol_new *mixers[5];
@@ -3155,7 +3156,7 @@ static void cx_auto_parse_eapd(struct hda_codec *codec)
         * thus it might control over all pins.
         */
        if (spec->num_eapds > 2)
-               spec->gen.own_eapd_ctl = 1;
+               spec->dynamic_eapd = 1;
 }
 
 static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins,
@@ -3194,10 +3195,19 @@ static int cx_auto_build_controls(struct hda_codec *codec)
        return 0;
 }
 
+static int cx_auto_init(struct hda_codec *codec)
+{
+       struct conexant_spec *spec = codec->spec;
+       snd_hda_gen_init(codec);
+       if (!spec->dynamic_eapd)
+               cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
+       return 0;
+}
+
 static const struct hda_codec_ops cx_auto_patch_ops = {
        .build_controls = cx_auto_build_controls,
        .build_pcms = snd_hda_gen_build_pcms,
-       .init = snd_hda_gen_init,
+       .init = cx_auto_init,
        .free = snd_hda_gen_free,
        .unsol_event = snd_hda_jack_unsol_event,
 #ifdef CONFIG_PM
@@ -3348,7 +3358,8 @@ static int patch_conexant_auto(struct hda_codec *codec)
 
        cx_auto_parse_beep(codec);
        cx_auto_parse_eapd(codec);
-       if (spec->gen.own_eapd_ctl)
+       spec->gen.own_eapd_ctl = 1;
+       if (spec->dynamic_eapd)
                spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook;
 
        switch (codec->vendor_id) {
index 32930e66885452f0c32ad483842cb9f8b1a57701..e12f7a030c58efae2c5a93829ec6bfce1fb17ac7 100644 (file)
@@ -1832,12 +1832,10 @@ static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
 #define INTEL_EN_ALL_PIN_CVTS  0x01 /* enable 2nd & 3rd pins and convertors */
 
 static void intel_haswell_enable_all_pins(struct hda_codec *codec,
-                                       const struct hda_fixup *fix, int action)
+                                         bool update_tree)
 {
        unsigned int vendor_param;
 
-       if (action != HDA_FIXUP_ACT_PRE_PROBE)
-               return;
        vendor_param = snd_hda_codec_read(codec, INTEL_VENDOR_NID, 0,
                                INTEL_GET_VENDOR_VERB, 0);
        if (vendor_param == -1 || vendor_param & INTEL_EN_ALL_PIN_CVTS)
@@ -1849,8 +1847,8 @@ static void intel_haswell_enable_all_pins(struct hda_codec *codec,
        if (vendor_param == -1)
                return;
 
-       snd_hda_codec_update_widgets(codec);
-       return;
+       if (update_tree)
+               snd_hda_codec_update_widgets(codec);
 }
 
 static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec)
@@ -1868,30 +1866,20 @@ static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec)
                                INTEL_SET_VENDOR_VERB, vendor_param);
 }
 
+/* Haswell needs to re-issue the vendor-specific verbs before turning to D0.
+ * Otherwise you may get severe h/w communication errors.
+ */
+static void haswell_set_power_state(struct hda_codec *codec, hda_nid_t fg,
+                               unsigned int power_state)
+{
+       if (power_state == AC_PWRST_D0) {
+               intel_haswell_enable_all_pins(codec, false);
+               intel_haswell_fixup_enable_dp12(codec);
+       }
 
-
-/* available models for fixup */
-enum {
-       INTEL_HASWELL,
-};
-
-static const struct hda_model_fixup hdmi_models[] = {
-       {.id = INTEL_HASWELL, .name = "Haswell"},
-       {}
-};
-
-static const struct snd_pci_quirk hdmi_fixup_tbl[] = {
-       SND_PCI_QUIRK(0x8086, 0x2010, "Haswell", INTEL_HASWELL),
-       {} /* terminator */
-};
-
-static const struct hda_fixup hdmi_fixups[] = {
-       [INTEL_HASWELL] = {
-               .type = HDA_FIXUP_FUNC,
-               .v.func = intel_haswell_enable_all_pins,
-       },
-};
-
+       snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE, power_state);
+       snd_hda_codec_set_power_to_all(codec, fg, power_state);
+}
 
 static int patch_generic_hdmi(struct hda_codec *codec)
 {
@@ -1904,11 +1892,10 @@ static int patch_generic_hdmi(struct hda_codec *codec)
        codec->spec = spec;
        hdmi_array_init(spec, 4);
 
-       snd_hda_pick_fixup(codec, hdmi_models, hdmi_fixup_tbl, hdmi_fixups);
-       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
-
-       if (codec->vendor_id == 0x80862807)
+       if (codec->vendor_id == 0x80862807) {
+               intel_haswell_enable_all_pins(codec, true);
                intel_haswell_fixup_enable_dp12(codec);
+       }
 
        if (hdmi_parse_codec(codec) < 0) {
                codec->spec = NULL;
@@ -1916,6 +1903,9 @@ static int patch_generic_hdmi(struct hda_codec *codec)
                return -EINVAL;
        }
        codec->patch_ops = generic_hdmi_patch_ops;
+       if (codec->vendor_id == 0x80862807)
+               codec->patch_ops.set_power_state = haswell_set_power_state;
+
        generic_hdmi_init_per_pins(codec);
 
        init_channel_allocations();
index 6bf47f7326ad4964fc3b4f3403360df6ef52bfd8..59d2e91a9ab6d066d529a76bec681359d3b51f19 100644 (file)
@@ -3482,6 +3482,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x05c9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
index 114f69a0c6290b43cb90ab591e02af3f14cb4f9d..306d0bc8455faeb202f4c13d87892355350fa0b3 100644 (file)
 
 /* AB8500_ADSLOTSELX */
 #define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_ODD  0x00
-#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD  0x01
-#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD  0x02
-#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD  0x03
-#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD  0x04
-#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD  0x05
-#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD  0x06
-#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD  0x07
-#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD   0x08
-#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0x0F
+#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD  0x10
+#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD  0x20
+#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD  0x30
+#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD  0x40
+#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD  0x50
+#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD  0x60
+#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD  0x70
+#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD   0x80
+#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0xF0
 #define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_EVEN 0x00
-#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x10
-#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x20
-#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x30
-#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x40
-#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x50
-#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x60
-#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x70
-#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN  0x80
-#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN        0xF0
+#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x01
+#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x02
+#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x03
+#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x04
+#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x05
+#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x06
+#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x07
+#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN  0x08
+#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN        0x0F
 #define AB8500_ADSLOTSELX_EVEN_SHIFT           0
 #define AB8500_ADSLOTSELX_ODD_SHIFT            4
 
index 0f6f481cec09cd941688ed790ecd31e10e207496..030f53c96ec0b2b0f14b86d4d428e5e1ec07f505 100644 (file)
@@ -86,7 +86,7 @@ static const struct reg_default cs42l52_reg_defaults[] = {
        { CS42L52_BEEP_VOL, 0x00 },     /* r1D Beep Volume off Time */
        { CS42L52_BEEP_TONE_CTL, 0x00 },        /* r1E Beep Tone Cfg. */
        { CS42L52_TONE_CTL, 0x00 },     /* r1F Tone Ctl */
-       { CS42L52_MASTERA_VOL, 0x88 },  /* r20 Master A Volume */
+       { CS42L52_MASTERA_VOL, 0x00 },  /* r20 Master A Volume */
        { CS42L52_MASTERB_VOL, 0x00 },  /* r21 Master B Volume */
        { CS42L52_HPA_VOL, 0x00 },      /* r22 Headphone A Volume */
        { CS42L52_HPB_VOL, 0x00 },      /* r23 Headphone B Volume */
@@ -225,7 +225,7 @@ static const char * const mic_bias_level_text[] = {
 };
 
 static const struct soc_enum mic_bias_level_enum =
-       SOC_ENUM_SINGLE(CS42L52_IFACE_CTL1, 0,
+       SOC_ENUM_SINGLE(CS42L52_IFACE_CTL2, 0,
                        ARRAY_SIZE(mic_bias_level_text), mic_bias_level_text);
 
 static const char * const cs42l52_mic_text[] = { "Single", "Differential" };
@@ -413,7 +413,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
        SOC_ENUM("Headphone Analog Gain", hp_gain_enum),
 
        SOC_DOUBLE_R_SX_TLV("Speaker Volume", CS42L52_SPKA_VOL,
-                             CS42L52_SPKB_VOL, 7, 0x1, 0xff, hl_tlv),
+                             CS42L52_SPKB_VOL, 0, 0x1, 0xff, hl_tlv),
 
        SOC_DOUBLE_R_SX_TLV("Bypass Volume", CS42L52_PASSTHRUA_VOL,
                              CS42L52_PASSTHRUB_VOL, 6, 0x18, 0x90, pga_tlv),
@@ -441,7 +441,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
 
        SOC_DOUBLE_R_SX_TLV("PCM Mixer Volume",
                            CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL,
-                               6, 0x7f, 0x19, hl_tlv),
+                               0, 0x7f, 0x19, hl_tlv),
        SOC_DOUBLE_R("PCM Mixer Switch",
                     CS42L52_PCMA_MIXER_VOL, CS42L52_PCMB_MIXER_VOL, 7, 1, 1),
 
index 60985c0590711e728fe2229fe459706d42d1ee57..4277012c471978ef1905a196c2d51cc43c636703 100644 (file)
 #define CS42L52_PB_CTL1_INV_PCMA               (1 << 2)
 #define CS42L52_PB_CTL1_MSTB_MUTE              (1 << 1)
 #define CS42L52_PB_CTL1_MSTA_MUTE              (1 << 0)
-#define CS42L52_PB_CTL1_MUTE_MASK              0xFFFD
+#define CS42L52_PB_CTL1_MUTE_MASK              0x03
 #define CS42L52_PB_CTL1_MUTE                   3
 #define CS42L52_PB_CTL1_UNMUTE                 0
 
index 41230ad1c3e038e8337c053f5948af2c60745eee..4a6f1daf911fc0685f16ea5f54fe774bc58581ac 100644 (file)
@@ -1488,17 +1488,17 @@ static int da7213_probe(struct snd_soc_codec *codec)
                                     DA7213_DMIC_DATA_SEL_SHIFT);
                        break;
                }
-               switch (pdata->dmic_data_sel) {
+               switch (pdata->dmic_samplephase) {
                case DA7213_DMIC_SAMPLE_ON_CLKEDGE:
                case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE:
-                       dmic_cfg |= (pdata->dmic_data_sel <<
+                       dmic_cfg |= (pdata->dmic_samplephase <<
                                     DA7213_DMIC_SAMPLEPHASE_SHIFT);
                        break;
                }
-               switch (pdata->dmic_data_sel) {
+               switch (pdata->dmic_clk_rate) {
                case DA7213_DMIC_CLK_3_0MHZ:
                case DA7213_DMIC_CLK_1_5MHZ:
-                       dmic_cfg |= (pdata->dmic_data_sel <<
+                       dmic_cfg |= (pdata->dmic_clk_rate <<
                                     DA7213_DMIC_CLK_RATE_SHIFT);
                        break;
                }
index ce0d36412c97d73eaa27c59de88c0ee44ce6066c..8d14a76c7249ade32bdc627fee92a5cefe6eaf91 100644 (file)
@@ -2233,7 +2233,7 @@ static int max98090_probe(struct snd_soc_codec *codec)
        dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
 
        ret = request_threaded_irq(max98090->irq, NULL,
-               max98090_interrupt, IRQF_TRIGGER_FALLING,
+               max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                "max98090_interrupt", codec);
        if (ret < 0) {
                dev_err(codec->dev, "request_irq failed: %d\n",
index 8df2b6e1a1a6c7fa74cb6cb0b8f94afced8efc2e..370af0cbcc9a97393c695a55936fb22b6ec6e103 100644 (file)
@@ -667,6 +667,7 @@ static int wm0010_boot(struct snd_soc_codec *codec)
                /* On wm0010 only the CLKCTRL1 value is used */
                pll_rec.clkctrl1 = wm0010->pll_clkctrl1;
 
+               ret = -ENOMEM;
                len = pll_rec.length + 8;
                out = kzalloc(len, GFP_KERNEL);
                if (!out) {
index 731884e04776289449e0a135905eb883a217cb80..ba38f0679662784769aed8c0c6293f0da5d7e4ca 100644 (file)
@@ -190,7 +190,7 @@ ARIZONA_MIXER_CONTROLS("DSP2R", ARIZONA_DSP2RMIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("DSP3L", ARIZONA_DSP3LMIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("DSP3R", ARIZONA_DSP3RMIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("DSP4L", ARIZONA_DSP4LMIX_INPUT_1_SOURCE),
-ARIZONA_MIXER_CONTROLS("DSP5R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DSP4R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE),
 
 ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE),
@@ -976,6 +976,8 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
        if (ret != 0)
                return ret;
 
+       arizona_init_spk(codec);
+
        snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS");
 
        priv->core.arizona->dapm = &codec->dapm;
index 14094f558e031999a9e791843d95fd415296f36c..dfd997aaadfca3077882c2bcd24e8d8f475cd70f 100644 (file)
@@ -383,6 +383,8 @@ static int wm8994_get_drc_enum(struct snd_kcontrol *kcontrol,
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
        int drc = wm8994_get_drc(kcontrol->id.name);
 
+       if (drc < 0)
+               return drc;
        ucontrol->value.enumerated.item[0] = wm8994->drc_cfg[drc];
 
        return 0;
@@ -488,6 +490,9 @@ static int wm8994_get_retune_mobile_enum(struct snd_kcontrol *kcontrol,
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
        int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
 
+       if (block < 0)
+               return block;
+
        ucontrol->value.enumerated.item[0] = wm8994->retune_mobile_cfg[block];
 
        return 0;
@@ -1031,7 +1036,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
 {
        struct snd_soc_codec *codec = w->codec;
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
-       struct wm8994 *control = codec->control_data;
+       struct wm8994 *control = wm8994->wm8994;
        int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
        int i;
        int dac;
@@ -2882,6 +2887,7 @@ static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
                default:
                        return 0;
                }
+               break;
        default:
                return 0;
        }
@@ -3832,6 +3838,11 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
                        dev_dbg(codec->dev, "Ignoring removed jack\n");
                        return IRQ_HANDLED;
                }
+       } else if (!(reg & WM8958_MICD_STS)) {
+               snd_soc_jack_report(wm8994->micdet[0].jack, 0,
+                                   SND_JACK_MECHANICAL | SND_JACK_HEADSET |
+                                   wm8994->btn_mask);
+               goto out;
        }
 
        if (wm8994->mic_detecting)
index 8b85049daab08aa0592760cb3c4f67f2ab5bf7f0..81490febac6dc108decb890ac318ffbec9c2ab8b 100644 (file)
@@ -505,7 +505,10 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
                mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
 
-               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, ACLKX | AFSX);
+               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+                               ACLKX | ACLKR);
+               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+                               AFSX | AFSR);
                break;
        case SND_SOC_DAIFMT_CBM_CFS:
                /* codec is clock master and frame slave */
@@ -565,7 +568,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
                mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
 
-               mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+               mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
                mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
                break;
 
@@ -628,7 +631,8 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
                                       int word_length)
 {
        u32 fmt;
-       u32 rotate = (word_length / 4) & 0x7;
+       u32 tx_rotate = (word_length / 4) & 0x7;
+       u32 rx_rotate = (32 - word_length) / 4;
        u32 mask = (1ULL << word_length) - 1;
 
        /*
@@ -652,9 +656,9 @@ static int davinci_config_channel_size(struct davinci_audio_dev *dev,
                mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
                                TXSSZ(fmt), TXSSZ(0x0F));
                mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
-                               TXROT(rotate), TXROT(7));
+                               TXROT(tx_rotate), TXROT(7));
                mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG,
-                               RXROT(rotate), RXROT(7));
+                               RXROT(rx_rotate), RXROT(7));
                mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG,
                                mask);
        }
index 902fab02b8512aab0288788b18781ff032da8798..c6fa03e2114ab985186cb8f86a3ee6f4317dc4bc 100644 (file)
@@ -540,11 +540,6 @@ static int imx_ssi_probe(struct platform_device *pdev)
        clk_prepare_enable(ssi->clk);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               ret = -ENODEV;
-               goto failed_get_resource;
-       }
-
        ssi->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(ssi->base)) {
                ret = PTR_ERR(ssi->base);
@@ -633,7 +628,6 @@ static int imx_ssi_probe(struct platform_device *pdev)
        snd_soc_unregister_component(&pdev->dev);
 failed_register:
        release_mem_region(res->start, resource_size(res));
-failed_get_resource:
        clk_disable_unprepare(ssi->clk);
 failed_clk:
 
index befe68f59285df63b653436a4d2d0dd2d1202ec7..4c9dad3263c5f7b754d108b3bf66a3ebdb6d67f3 100644 (file)
@@ -471,11 +471,6 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
        dev_set_drvdata(&pdev->dev, priv);
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!mem) {
-               dev_err(&pdev->dev, "platform_get_resource failed\n");
-               return -ENXIO;
-       }
-
        priv->io = devm_ioremap_resource(&pdev->dev, mem);
        if (IS_ERR(priv->io))
                return PTR_ERR(priv->io);
index 3853f7eb3f2843f35c8f908cf47caab47931952a..06a8000aa07bedd1c47beb401d26e9052512fc54 100644 (file)
@@ -220,8 +220,12 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
                        goto err;
        }
 
-       snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
-                               SND_SOC_DAPM_STREAM_START);
+       if (cstream->direction == SND_COMPRESS_PLAYBACK)
+               snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
+                                       SND_SOC_DAPM_STREAM_START);
+       else
+               snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE,
+                                       SND_SOC_DAPM_STREAM_START);
 
        /* cancel any delayed stream shutdown that is pending */
        rtd->pop_wait = 0;
index 21779a6a781a26ddf8a860588a468ac52ef12780..a80c883bb8be29eeba512d86e46af833d472c833 100644 (file)
@@ -1095,9 +1095,9 @@ int dapm_clock_event(struct snd_soc_dapm_widget *w,
 
 #ifdef CONFIG_HAVE_CLK
        if (SND_SOC_DAPM_EVENT_ON(event)) {
-               return clk_enable(w->clk);
+               return clk_prepare_enable(w->clk);
        } else {
-               clk_disable(w->clk);
+               clk_disable_unprepare(w->clk);
                return 0;
        }
 #endif
index a1d9b0792a1e5b1b68816ad794a77fb76bae2398..b9defcdeb7ef805af05a6453ce309a2eb64bdb18 100644 (file)
@@ -42,8 +42,8 @@ static const u8 ep_w_max_packet_size[] = {
        0x94, 0x01, 0x5c, 0x02  /* alt 3: 404 EP2 and 604 EP6 (25 fpp) */
 };
 
-static const u8 known_fw_versions[][4] = {
-       { 0x03, 0x01, 0x0b, 0x00 }
+static const u8 known_fw_versions[][2] = {
+       { 0x03, 0x01 }
 };
 
 struct ihex_record {
@@ -343,7 +343,7 @@ static int usb6fire_fw_check(u8 *version)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(known_fw_versions); i++)
-               if (!memcmp(version, known_fw_versions + i, 4))
+               if (!memcmp(version, known_fw_versions + i, 2))
                        return 0;
 
        snd_printk(KERN_ERR PREFIX "invalid fimware version in device: %*ph. "
index 135c7687106303fe53a156decdf434150aaba02e..5f761ab34c0140b1b62673e46fc59659488b8cc7 100644 (file)
@@ -116,21 +116,22 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s
 }
 
 static void proc_dump_ep_status(struct snd_usb_substream *subs,
-                               struct snd_usb_endpoint *ep,
+                               struct snd_usb_endpoint *data_ep,
+                               struct snd_usb_endpoint *sync_ep,
                                struct snd_info_buffer *buffer)
 {
-       if (!ep)
+       if (!data_ep)
                return;
-       snd_iprintf(buffer, "    Packet Size = %d\n", ep->curpacksize);
+       snd_iprintf(buffer, "    Packet Size = %d\n", data_ep->curpacksize);
        snd_iprintf(buffer, "    Momentary freq = %u Hz (%#x.%04x)\n",
                    subs->speed == USB_SPEED_FULL
-                   ? get_full_speed_hz(ep->freqm)
-                   : get_high_speed_hz(ep->freqm),
-                   ep->freqm >> 16, ep->freqm & 0xffff);
-       if (ep->freqshift != INT_MIN) {
-               int res = 16 - ep->freqshift;
+                   ? get_full_speed_hz(data_ep->freqm)
+                   : get_high_speed_hz(data_ep->freqm),
+                   data_ep->freqm >> 16, data_ep->freqm & 0xffff);
+       if (sync_ep && data_ep->freqshift != INT_MIN) {
+               int res = 16 - data_ep->freqshift;
                snd_iprintf(buffer, "    Feedback Format = %d.%d\n",
-                           (ep->syncmaxsize > 3 ? 32 : 24) - res, res);
+                           (sync_ep->syncmaxsize > 3 ? 32 : 24) - res, res);
        }
 }
 
@@ -140,8 +141,7 @@ static void proc_dump_substream_status(struct snd_usb_substream *subs, struct sn
                snd_iprintf(buffer, "  Status: Running\n");
                snd_iprintf(buffer, "    Interface = %d\n", subs->interface);
                snd_iprintf(buffer, "    Altset = %d\n", subs->altset_idx);
-               proc_dump_ep_status(subs, subs->data_endpoint, buffer);
-               proc_dump_ep_status(subs, subs->sync_endpoint, buffer);
+               proc_dump_ep_status(subs, subs->data_endpoint, subs->sync_endpoint, buffer);
        } else {
                snd_iprintf(buffer, "  Status: Stop\n");
        }
index a4ffc95000238cde8f086970f9d23d042ba99c59..b5740599aabd38ee1eebdbc2ce3f66b58a0437a6 100755 (executable)
@@ -15,35 +15,38 @@ kallsyms = []
 
 def get_kallsyms_table():
        global kallsyms
+
        try:
                f = open("/proc/kallsyms", "r")
-               linecount = 0
-               for line in f:
-                       linecount = linecount+1
-               f.seek(0)
        except:
                return
 
-
-       j = 0
        for line in f:
                loc = int(line.split()[0], 16)
                name = line.split()[2]
-               j = j +1
-               if ((j % 100) == 0):
-                       print "\r" + str(j) + "/" + str(linecount),
-               kallsyms.append({ 'loc': loc, 'name' : name})
-
-       print "\r" + str(j) + "/" + str(linecount)
+               kallsyms.append((loc, name))
        kallsyms.sort()
-       return
 
 def get_sym(sloc):
        loc = int(sloc)
-       for i in kallsyms:
-               if (i['loc'] >= loc):
-                       return (i['name'], i['loc']-loc)
-       return (None, 0)
+
+       # Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
+       #            kallsyms[i][0] > loc for all end <= i < len(kallsyms)
+       start, end = -1, len(kallsyms)
+       while end != start + 1:
+               pivot = (start + end) // 2
+               if loc < kallsyms[pivot][0]:
+                       end = pivot
+               else:
+                       start = pivot
+
+       # Now (start == -1 or kallsyms[start][0] <= loc)
+       # and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
+       if start >= 0:
+               symloc, name = kallsyms[start]
+               return (name, loc - symloc)
+       else:
+               return (None, 0)
 
 def print_drop_table():
        print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
@@ -64,7 +67,7 @@ def trace_end():
 
 # called from perf, when it finds a correspoinding event
 def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
-                       skbaddr, protocol, location):
+                  skbaddr, location, protocol):
        slocation = str(location)
        try:
                drop_log[slocation] = drop_log[slocation] + 1
index 321e066a07533bcf7a18e72c471e01cc1bef9527..9e9d348711953a44006b7c377a898e98c15b131d 100644 (file)
@@ -46,6 +46,7 @@ unsigned int skip_c0;
 unsigned int skip_c1;
 unsigned int do_nhm_cstates;
 unsigned int do_snb_cstates;
+unsigned int do_c8_c9_c10;
 unsigned int has_aperf;
 unsigned int has_epb;
 unsigned int units = 1000000000;       /* Ghz etc */
@@ -120,6 +121,9 @@ struct pkg_data {
        unsigned long long pc3;
        unsigned long long pc6;
        unsigned long long pc7;
+       unsigned long long pc8;
+       unsigned long long pc9;
+       unsigned long long pc10;
        unsigned int package_id;
        unsigned int energy_pkg;        /* MSR_PKG_ENERGY_STATUS */
        unsigned int energy_dram;       /* MSR_DRAM_ENERGY_STATUS */
@@ -282,6 +286,11 @@ void print_header(void)
                outp += sprintf(outp, "   %%pc6");
        if (do_snb_cstates)
                outp += sprintf(outp, "   %%pc7");
+       if (do_c8_c9_c10) {
+               outp += sprintf(outp, "   %%pc8");
+               outp += sprintf(outp, "   %%pc9");
+               outp += sprintf(outp, "  %%pc10");
+       }
 
        if (do_rapl & RAPL_PKG)
                outp += sprintf(outp, "  Pkg_W");
@@ -336,6 +345,9 @@ int dump_counters(struct thread_data *t, struct core_data *c,
                fprintf(stderr, "pc3: %016llX\n", p->pc3);
                fprintf(stderr, "pc6: %016llX\n", p->pc6);
                fprintf(stderr, "pc7: %016llX\n", p->pc7);
+               fprintf(stderr, "pc8: %016llX\n", p->pc8);
+               fprintf(stderr, "pc9: %016llX\n", p->pc9);
+               fprintf(stderr, "pc10: %016llX\n", p->pc10);
                fprintf(stderr, "Joules PKG: %0X\n", p->energy_pkg);
                fprintf(stderr, "Joules COR: %0X\n", p->energy_cores);
                fprintf(stderr, "Joules GFX: %0X\n", p->energy_gfx);
@@ -493,6 +505,11 @@ int format_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
        if (do_snb_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
+       if (do_c8_c9_c10) {
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc8/t->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc9/t->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc10/t->tsc);
+       }
 
        /*
         * If measurement interval exceeds minimum RAPL Joule Counter range,
@@ -569,6 +586,9 @@ delta_package(struct pkg_data *new, struct pkg_data *old)
        old->pc3 = new->pc3 - old->pc3;
        old->pc6 = new->pc6 - old->pc6;
        old->pc7 = new->pc7 - old->pc7;
+       old->pc8 = new->pc8 - old->pc8;
+       old->pc9 = new->pc9 - old->pc9;
+       old->pc10 = new->pc10 - old->pc10;
        old->pkg_temp_c = new->pkg_temp_c;
 
        DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
@@ -702,6 +722,9 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
        p->pc3 = 0;
        p->pc6 = 0;
        p->pc7 = 0;
+       p->pc8 = 0;
+       p->pc9 = 0;
+       p->pc10 = 0;
 
        p->energy_pkg = 0;
        p->energy_dram = 0;
@@ -740,6 +763,9 @@ int sum_counters(struct thread_data *t, struct core_data *c,
        average.packages.pc3 += p->pc3;
        average.packages.pc6 += p->pc6;
        average.packages.pc7 += p->pc7;
+       average.packages.pc8 += p->pc8;
+       average.packages.pc9 += p->pc9;
+       average.packages.pc10 += p->pc10;
 
        average.packages.energy_pkg += p->energy_pkg;
        average.packages.energy_dram += p->energy_dram;
@@ -781,6 +807,10 @@ void compute_average(struct thread_data *t, struct core_data *c,
        average.packages.pc3 /= topo.num_packages;
        average.packages.pc6 /= topo.num_packages;
        average.packages.pc7 /= topo.num_packages;
+
+       average.packages.pc8 /= topo.num_packages;
+       average.packages.pc9 /= topo.num_packages;
+       average.packages.pc10 /= topo.num_packages;
 }
 
 static unsigned long long rdtsc(void)
@@ -880,6 +910,14 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
                        return -12;
        }
+       if (do_c8_c9_c10) {
+               if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
+                       return -13;
+               if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
+                       return -13;
+               if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
+                       return -13;
+       }
        if (do_rapl & RAPL_PKG) {
                if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
                        return -13;
@@ -1762,6 +1800,19 @@ int is_snb(unsigned int family, unsigned int model)
        return 0;
 }
 
+int has_c8_c9_c10(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+
+       switch (model) {
+       case 0x45:
+               return 1;
+       }
+       return 0;
+}
+
+
 double discover_bclk(unsigned int family, unsigned int model)
 {
        if (is_snb(family, model))
@@ -1918,6 +1969,7 @@ void check_cpuid()
        do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
        do_smi = do_nhm_cstates;
        do_snb_cstates = is_snb(family, model);
+       do_c8_c9_c10 = has_c8_c9_c10(family, model);
        bclk = discover_bclk(family, model);
 
        do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
@@ -2279,7 +2331,7 @@ int main(int argc, char **argv)
        cmdline(argc, argv);
 
        if (verbose)
-               fprintf(stderr, "turbostat v3.3 March 15, 2013"
+               fprintf(stderr, "turbostat v3.4 April 17, 2013"
                        " - Len Brown <lenb@kernel.org>\n");
 
        turbostat_init();
index d4abc59ce1d9e519c1be61628d0433602163dddb..0a63658065f0e9e41285f521665b80b975cfe86c 100644 (file)
@@ -6,7 +6,6 @@ TARGETS += memory-hotplug
 TARGETS += mqueue
 TARGETS += net
 TARGETS += ptrace
-TARGETS += soft-dirty
 TARGETS += vm
 
 all:
diff --git a/tools/testing/selftests/soft-dirty/Makefile b/tools/testing/selftests/soft-dirty/Makefile
deleted file mode 100644 (file)
index a9cdc82..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-CFLAGS += -iquote../../../../include/uapi -Wall
-soft-dirty: soft-dirty.c
-
-all: soft-dirty
-
-clean:
-       rm -f soft-dirty
-
-run_tests: all
-       @./soft-dirty || echo "soft-dirty selftests: [FAIL]"
diff --git a/tools/testing/selftests/soft-dirty/soft-dirty.c b/tools/testing/selftests/soft-dirty/soft-dirty.c
deleted file mode 100644 (file)
index aba4f87..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/mman.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/types.h>
-
-typedef unsigned long long u64;
-
-#define PME_PRESENT    (1ULL << 63)
-#define PME_SOFT_DIRTY (1Ull << 55)
-
-#define PAGES_TO_TEST  3
-#ifndef PAGE_SIZE
-#define PAGE_SIZE      4096
-#endif
-
-static void get_pagemap2(char *mem, u64 *map)
-{
-       int fd;
-
-       fd = open("/proc/self/pagemap2", O_RDONLY);
-       if (fd < 0) {
-               perror("Can't open pagemap2");
-               exit(1);
-       }
-
-       lseek(fd, (unsigned long)mem / PAGE_SIZE * sizeof(u64), SEEK_SET);
-       read(fd, map, sizeof(u64) * PAGES_TO_TEST);
-       close(fd);
-}
-
-static inline char map_p(u64 map)
-{
-       return map & PME_PRESENT ? 'p' : '-';
-}
-
-static inline char map_sd(u64 map)
-{
-       return map & PME_SOFT_DIRTY ? 'd' : '-';
-}
-
-static int check_pte(int step, int page, u64 *map, u64 want)
-{
-       if ((map[page] & want) != want) {
-               printf("Step %d Page %d has %c%c, want %c%c\n",
-                               step, page,
-                               map_p(map[page]), map_sd(map[page]),
-                               map_p(want), map_sd(want));
-               return 1;
-       }
-
-       return 0;
-}
-
-static void clear_refs(void)
-{
-       int fd;
-       char *v = "4";
-
-       fd = open("/proc/self/clear_refs", O_WRONLY);
-       if (write(fd, v, 3) < 3) {
-               perror("Can't clear soft-dirty bit");
-               exit(1);
-       }
-       close(fd);
-}
-
-int main(void)
-{
-       char *mem, x;
-       u64 map[PAGES_TO_TEST];
-
-       mem = mmap(NULL, PAGES_TO_TEST * PAGE_SIZE,
-                       PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0);
-
-       x = mem[0];
-       mem[2 * PAGE_SIZE] = 'c';
-       get_pagemap2(mem, map);
-
-       if (check_pte(1, 0, map, PME_PRESENT))
-               return 1;
-       if (check_pte(1, 1, map, 0))
-               return 1;
-       if (check_pte(1, 2, map, PME_PRESENT | PME_SOFT_DIRTY))
-               return 1;
-
-       clear_refs();
-       get_pagemap2(mem, map);
-
-       if (check_pte(2, 0, map, PME_PRESENT))
-               return 1;
-       if (check_pte(2, 1, map, 0))
-               return 1;
-       if (check_pte(2, 2, map, PME_PRESENT))
-               return 1;
-
-       mem[0] = 'a';
-       mem[PAGE_SIZE] = 'b';
-       x = mem[2 * PAGE_SIZE];
-       get_pagemap2(mem, map);
-
-       if (check_pte(3, 0, map, PME_PRESENT | PME_SOFT_DIRTY))
-               return 1;
-       if (check_pte(3, 1, map, PME_PRESENT | PME_SOFT_DIRTY))
-               return 1;
-       if (check_pte(3, 2, map, PME_PRESENT))
-               return 1;
-
-       (void)x; /* gcc warn */
-
-       printf("PASS\n");
-       return 0;
-}
index 45f09362ee7be02df67171efa3508fa225129499..302681c4aa4465bb21b69524d7c0d3a5341f4a4e 100644 (file)
@@ -1978,7 +1978,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
        if (vcpu->kvm->mm != current->mm)
                return -EIO;
 
-#if defined(CONFIG_S390) || defined(CONFIG_PPC)
+#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
        /*
         * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
         * so vcpu_load() would break it.
@@ -3105,13 +3105,21 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
        int r;
        int cpu;
 
-       r = kvm_irqfd_init();
-       if (r)
-               goto out_irqfd;
        r = kvm_arch_init(opaque);
        if (r)
                goto out_fail;
 
+       /*
+        * kvm_arch_init makes sure there's at most one caller
+        * for architectures that support multiple implementations,
+        * like intel and amd on x86.
+        * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
+        * conflicts in case kvm is already setup for another implementation.
+        */
+       r = kvm_irqfd_init();
+       if (r)
+               goto out_irqfd;
+
        if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
                r = -ENOMEM;
                goto out_free_0;
@@ -3186,10 +3194,10 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 out_free_0a:
        free_cpumask_var(cpus_hardware_enabled);
 out_free_0:
-       kvm_arch_exit();
-out_fail:
        kvm_irqfd_exit();
 out_irqfd:
+       kvm_arch_exit();
+out_fail:
        return r;
 }
 EXPORT_SYMBOL_GPL(kvm_init);