]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag 'drm-fixes-for-4.8-rc4' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 28 Aug 2016 21:31:36 +0000 (14:31 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 28 Aug 2016 21:31:36 +0000 (14:31 -0700)
Pull drm fixes from Dave Airlie:
 "A bunch of fixes covering i915, amdgpu, one tegra and some core DRM
  ones.  Nothing too strange at this point"

* tag 'drm-fixes-for-4.8-rc4' of git://people.freedesktop.org/~airlied/linux: (21 commits)
  drm/atomic: Don't potentially reset color_mgmt_changed on successive property updates.
  drm: Protect fb_defio in drivers with CONFIG_KMS_FBDEV_EMULATION
  drm/amdgpu: skip TV/CV in display parsing
  drm/amdgpu: avoid a possible array overflow
  drm/amdgpu: fix lru size grouping v2
  drm/tegra: dsi: Enhance runtime power management
  drm/i915: Fix botched merge that downgrades CSR versions.
  drm/i915/skl: Ensure pipes with changed wms get added to the state
  drm/i915/gen9: Only copy WM results for changed pipes to skl_hw
  drm/i915/skl: Add support for the SAGV, fix underrun hangs
  drm/i915/gen6+: Interpret mailbox error flags
  drm/i915: Reattach comment, complete type specification
  drm/i915: Unconditionally flush any chipset buffers before execbuf
  drm/i915/gen9: Drop invalid WARN() during data rate calculation
  drm/i915/gen9: Initialize intel_state->active_crtcs during WM sanitization (v2)
  drm: Reject page_flip for !DRIVER_MODESET
  drm/amdgpu: fix timeout value check in amd_sched_job_recovery
  drm/amdgpu: fix sdma_v2_4_ring_test_ib
  drm/amdgpu: fix amdgpu_move_blit on 32bit systems
  drm/radeon: fix radeon_move_blit on 32bit systems
  ...

185 files changed:
Documentation/PCI/MSI-HOWTO.txt
Documentation/arm64/silicon-errata.txt
Documentation/devicetree/bindings/sound/omap-mcpdm.txt
Documentation/devicetree/bindings/thermal/thermal.txt
Documentation/kernel-parameters.txt
MAINTAINERS
arch/arc/include/asm/entry.h
arch/arc/include/asm/irqflags-compact.h
arch/arc/include/asm/pgtable.h
arch/arc/include/uapi/asm/elf.h
arch/arc/kernel/arcksyms.c
arch/arc/kernel/process.c
arch/arc/kernel/setup.c
arch/arc/mm/cache.c
arch/arc/mm/highmem.c
arch/arm/kvm/mmu.c
arch/arm/xen/enlighten.c
arch/arm64/kernel/head.S
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/sys_regs.c
arch/mips/include/asm/page.h
arch/mips/kvm/mmu.c
arch/s390/kernel/setup.c
arch/um/include/asm/common.lds.S
arch/x86/crypto/sha256-mb/sha256_mb.c
arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
arch/x86/crypto/sha512-mb/sha512_mb.c
arch/x86/kernel/apic/apic.c
arch/x86/kvm/vmx.c
arch/x86/mm/kaslr.c
arch/x86/pci/vmd.c
arch/x86/xen/enlighten.c
block/bio.c
block/blk-core.c
block/blk-merge.c
block/blk-mq.c
block/elevator.c
drivers/block/floppy.c
drivers/block/xen-blkfront.c
drivers/clocksource/pxa_timer.c
drivers/clocksource/sun4i_timer.c
drivers/clocksource/time-pistachio.c
drivers/clocksource/timer-atmel-pit.c
drivers/dax/pmem.c
drivers/gpio/Kconfig
drivers/gpio/gpio-max730x.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/hfi1/affinity.c
drivers/infiniband/hw/hfi1/debugfs.c
drivers/infiniband/hw/hfi1/driver.c
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/mad.c
drivers/infiniband/hw/hfi1/qp.c
drivers/infiniband/hw/hfi1/qsfp.c
drivers/infiniband/hw/hfi1/qsfp.h
drivers/infiniband/hw/i40iw/i40iw.h
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/i40iw/i40iw_utils.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qib/qib_debugfs.c
drivers/infiniband/hw/qib/qib_fs.c
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/hw/usnic/usnic_ib_main.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/input/keyboard/tegra-kbc.c
drivers/input/rmi4/rmi_driver.c
drivers/input/serio/i8042.c
drivers/input/touchscreen/ads7846.c
drivers/input/touchscreen/silead.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-mips-gic.c
drivers/md/bcache/super.c
drivers/md/dm-flakey.c
drivers/md/dm-log.c
drivers/mmc/card/block.c
drivers/mmc/card/queue.c
drivers/mmc/card/queue.h
drivers/nvme/host/core.c
drivers/pci/msi.c
drivers/thermal/cpu_cooling.c
drivers/thermal/imx_thermal.c
drivers/thermal/int340x_thermal/int3406_thermal.c
drivers/vhost/scsi.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/block_dev.c
fs/btrfs/backref.c
fs/btrfs/ctree.h
fs/btrfs/delayed-ref.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/qgroup.h
fs/btrfs/relocation.c
fs/btrfs/root-tree.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/tree-log.h
fs/btrfs/volumes.c
fs/dlm/debug_fs.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/node.c
fs/f2fs/super.c
fs/seq_file.c
fs/ubifs/tnc_commit.c
fs/ubifs/xattr.c
include/linux/bio.h
include/linux/blkdev.h
include/linux/compiler-gcc.h
include/linux/irqchip/arm-gic-v3.h
include/linux/pci.h
include/linux/sysctl.h
include/rdma/ib_verbs.h
include/xen/xen-ops.h
kernel/events/core.c
kernel/irq/affinity.c
kernel/irq/chip.c
kernel/irq/manage.c
kernel/printk/braille.c
kernel/sysctl.c
kernel/time/timekeeping.c
kernel/time/timekeeping_debug.c
kernel/trace/blktrace.c
mm/Kconfig
mm/huge_memory.c
mm/memcontrol.c
mm/readahead.c
mm/usercopy.c
scripts/get_maintainer.pl
sound/pci/hda/patch_realtek.c
sound/soc/atmel/atmel_ssc_dai.c
sound/soc/codecs/da7213.c
sound/soc/codecs/max98371.c
sound/soc/codecs/nau8825.c
sound/soc/codecs/wm2000.c
sound/soc/generic/Makefile
sound/soc/generic/simple-card-utils.c
sound/soc/intel/skylake/skl-sst-utils.c
sound/soc/intel/skylake/skl.c
sound/soc/omap/omap-abe-twl6040.c
sound/soc/omap/omap-mcpdm.c
sound/soc/samsung/s3c24xx_uda134x.c
sound/soc/sh/rcar/src.c
sound/soc/soc-compress.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/usb/line6/pcm.c
sound/usb/line6/pod.c
tools/gpio/gpio-event-mon.c
tools/include/linux/string.h
tools/perf/util/evsel.c
tools/perf/util/unwind-libdw.c
tools/perf/util/unwind-libunwind-local.c
virt/kvm/arm/arch_timer.c
virt/kvm/arm/vgic/vgic-its.c
virt/kvm/arm/vgic/vgic-mmio-v3.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic.c
virt/kvm/arm/vgic/vgic.h

index c55df2911136c90d37944616cfeebaeca4954a96..cd9c9f6a7cd9e94de08cc7b4f97c232ff040cd38 100644 (file)
@@ -94,14 +94,11 @@ has a requirements for a minimum number of vectors the driver can pass a
 min_vecs argument set to this limit, and the PCI core will return -ENOSPC
 if it can't meet the minimum number of vectors.
 
-The flags argument should normally be set to 0, but can be used to pass the
-PCI_IRQ_NOMSI and PCI_IRQ_NOMSIX flag in case a device claims to support
-MSI or MSI-X, but the support is broken, or to pass PCI_IRQ_NOLEGACY in
-case the device does not support legacy interrupt lines.
-
-By default this function will spread the interrupts around the available
-CPUs, but this feature can be disabled by passing the PCI_IRQ_NOAFFINITY
-flag.
+The flags argument is used to specify which type of interrupt can be used
+by the device and the driver (PCI_IRQ_LEGACY, PCI_IRQ_MSI, PCI_IRQ_MSIX).
+A convenient short-hand (PCI_IRQ_ALL_TYPES) is also available to ask for
+any possible kind of interrupt.  If the PCI_IRQ_AFFINITY flag is set,
+pci_alloc_irq_vectors() will spread the interrupts around the available CPUs.
 
 To get the Linux IRQ numbers passed to request_irq() and free_irq() and the
 vectors, use the following function:
@@ -131,7 +128,7 @@ larger than the number supported by the device it will automatically be
 capped to the supported limit, so there is no need to query the number of
 vectors supported beforehand:
 
-       nvec = pci_alloc_irq_vectors(pdev, 1, nvec, 0);
+       nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_ALL_TYPES)
        if (nvec < 0)
                goto out_err;
 
@@ -140,7 +137,7 @@ interrupts it can request a particular number of interrupts by passing that
 number to pci_alloc_irq_vectors() function as both 'min_vecs' and
 'max_vecs' parameters:
 
-       ret = pci_alloc_irq_vectors(pdev, nvec, nvec, 0);
+       ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_ALL_TYPES);
        if (ret < 0)
                goto out_err;
 
@@ -148,15 +145,14 @@ The most notorious example of the request type described above is enabling
 the single MSI mode for a device.  It could be done by passing two 1s as
 'min_vecs' and 'max_vecs':
 
-       ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
+       ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
        if (ret < 0)
                goto out_err;
 
 Some devices might not support using legacy line interrupts, in which case
-the PCI_IRQ_NOLEGACY flag can be used to fail the request if the platform
-can't provide MSI or MSI-X interrupts:
+the driver can specify that only MSI or MSI-X is acceptable:
 
-       nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_NOLEGACY);
+       nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI | PCI_IRQ_MSIX);
        if (nvec < 0)
                goto out_err;
 
index 4da60b463995464a71c04245e7f90e7ced4f51d2..ccc60324e7388eb5756de1782174246708715c36 100644 (file)
@@ -53,6 +53,7 @@ stable kernels.
 | ARM            | Cortex-A57      | #832075         | ARM64_ERRATUM_832075    |
 | ARM            | Cortex-A57      | #852523         | N/A                     |
 | ARM            | Cortex-A57      | #834220         | ARM64_ERRATUM_834220    |
+| ARM            | Cortex-A72      | #853709         | N/A                     |
 | ARM            | MMU-500         | #841119,#826419 | N/A                     |
 |                |                 |                 |                         |
 | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375    |
index 6f6c2f8e908db15ff37567413c20b19a5ed68d17..0741dff048dd39ba3336f67c6186918c306a563f 100644 (file)
@@ -8,8 +8,6 @@ Required properties:
 - interrupts: Interrupt number for McPDM
 - interrupt-parent: The parent interrupt controller
 - ti,hwmods: Name of the hwmod associated to the McPDM
-- clocks:  phandle for the pdmclk provider, likely <&twl6040>
-- clock-names: Must be "pdmclk"
 
 Example:
 
@@ -21,11 +19,3 @@ mcpdm: mcpdm@40132000 {
        interrupt-parent = <&gic>;
        ti,hwmods = "mcpdm";
 };
-
-In board DTS file the pdmclk needs to be added:
-
-&mcpdm {
-       clocks = <&twl6040>;
-       clock-names = "pdmclk";
-       status = "okay";
-};
index 41b817f7b6706515025b9a6e4af05304a62d5c25..88b6ea1ad2903cadd73650ff6dbfa0eb54aac39f 100644 (file)
@@ -62,7 +62,7 @@ For more examples of cooling devices, refer to the example sections below.
 Required properties:
 - #cooling-cells:      Used to provide cooling device specific information
   Type: unsigned       while referring to it. Must be at least 2, in order
-  Size: one cell       to specify minimum and maximum cooling state used
+  Size: one cell       to specify minimum and maximum cooling state used
                        in the reference. The first cell is the minimum
                        cooling state requested and the second cell is
                        the maximum cooling state requested in the reference.
@@ -119,7 +119,7 @@ Required properties:
 Optional property:
 - contribution:                The cooling contribution to the thermal zone of the
   Type: unsigned       referred cooling device at the referred trip point.
-  Size: one cell       The contribution is a ratio of the sum
+  Size: one cell       The contribution is a ratio of the sum
                        of all cooling contributions within a thermal zone.
 
 Note: Using the THERMAL_NO_LIMIT (-1UL) constant in the cooling-device phandle
@@ -145,7 +145,7 @@ Required properties:
   Size: one cell
 
 - thermal-sensors:     A list of thermal sensor phandles and sensor specifier
-  Type: list of        used while monitoring the thermal zone.
+  Type: list of                used while monitoring the thermal zone.
   phandles + sensor
   specifier
 
@@ -473,7 +473,7 @@ thermal-zones {
                                  <&adc>;       /* pcb north */
 
                /* hotspot = 100 * bandgap - 120 * adc + 484 */
-               coefficients =          <100    -120    484>;
+               coefficients =          <100    -120    484>;
 
                trips {
                        ...
@@ -502,7 +502,7 @@ from the ADC sensor. The binding would be then:
         thermal-sensors =  <&adc>;
 
                /* hotspot = 1 * adc + 6000 */
-       coefficients =          <1      6000>;
+       coefficients =          <1      6000>;
 
 (d) - Board thermal
 
index 46c030a49186faabaa588afd2d284975c72c2017..a4f4d693e2c1287d533c0bc05d9f292f9b529720 100644 (file)
@@ -3032,6 +3032,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                PAGE_SIZE is used as alignment.
                                PCI-PCI bridge can be specified, if resource
                                windows need to be expanded.
+                               To specify the alignment for several
+                               instances of a device, the PCI vendor,
+                               device, subvendor, and subdevice may be
+                               specified, e.g., 4096@pci:8086:9c22:103c:198f
                ecrc=           Enable/disable PCIe ECRC (transaction layer
                                end-to-end CRC checking).
                                bios: Use BIOS/firmware settings. This is the
index 0bbe4b105c346893db530be25031ab8ce0350b56..71aa5daeae8f43f616ce5680320889fdf0666fa7 100644 (file)
@@ -881,6 +881,15 @@ S: Supported
 F:     drivers/gpu/drm/arc/
 F:     Documentation/devicetree/bindings/display/snps,arcpgu.txt
 
+ARM ARCHITECTED TIMER DRIVER
+M:     Mark Rutland <mark.rutland@arm.com>
+M:     Marc Zyngier <marc.zyngier@arm.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/include/asm/arch_timer.h
+F:     arch/arm64/include/asm/arch_timer.h
+F:     drivers/clocksource/arm_arch_timer.c
+
 ARM HDLCD DRM DRIVER
 M:     Liviu Dudau <liviu.dudau@arm.com>
 S:     Supported
@@ -7661,7 +7670,7 @@ L:        linux-rdma@vger.kernel.org
 S:     Supported
 W:     https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
 Q:     http://patchwork.kernel.org/project/linux-rdma/list/
-F:     drivers/infiniband/hw/rxe/
+F:     drivers/infiniband/sw/rxe/
 F:     include/uapi/rdma/rdma_user_rxe.h
 
 MEMBARRIER SUPPORT
index ad7860c5ce153c731264f770067e653b70568fd5..51597f344a62aced8c98cb2035c9ec40eff55e6e 100644 (file)
 
 #ifdef CONFIG_ARC_CURR_IN_REG
        ; Retrieve orig r25 and save it with rest of callee_regs
-       ld.as   r12, [r12, PT_user_r25]
+       ld      r12, [r12, PT_user_r25]
        PUSH    r12
 #else
        PUSH    r25
 
        ; SP is back to start of pt_regs
 #ifdef CONFIG_ARC_CURR_IN_REG
-       st.as   r12, [sp, PT_user_r25]
+       st      r12, [sp, PT_user_r25]
 #endif
 .endm
 
index c1d36458bfb7aa665acb30a2234660034a19aecd..4c6eed80cd8ba3bd935e60b30ffadf147326aeca 100644 (file)
@@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void)
 .endm
 
 .macro IRQ_ENABLE  scratch
+       TRACE_ASM_IRQ_ENABLE
        lr      \scratch, [status32]
        or      \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
        flag    \scratch
-       TRACE_ASM_IRQ_ENABLE
 .endm
 
 #endif /* __ASSEMBLY__ */
index 0f92d97432a21a705af12efdff99597046b80cd9..89eeb372005180802181d860be8f215768701659 100644 (file)
@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
 
 #define pte_page(pte)          pfn_to_page(pte_pfn(pte))
 #define mk_pte(page, prot)     pfn_pte(page_to_pfn(page), prot)
-#define pfn_pte(pfn, prot)     (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 
 /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
 #define pte_pfn(pte)           (pte_val(pte) >> PAGE_SHIFT)
index 0f99ac8fcbb23f051753c4fb8e551a0e13541394..0037a587320d51c4262e7c3b7b504a8dafb34ddc 100644 (file)
 
 /* Machine specific ELF Hdr flags */
 #define EF_ARC_OSABI_MSK       0x00000f00
-#define EF_ARC_OSABI_ORIG      0x00000000   /* MUST be zero for back-compat */
-#define EF_ARC_OSABI_CURRENT   0x00000300   /* v3 (no legacy syscalls) */
+
+#define EF_ARC_OSABI_V3                0x00000300   /* v3 (no legacy syscalls) */
+#define EF_ARC_OSABI_V4                0x00000400   /* v4 (64bit data any reg align) */
+
+#if __GNUC__ < 6
+#define EF_ARC_OSABI_CURRENT   EF_ARC_OSABI_V3
+#else
+#define EF_ARC_OSABI_CURRENT   EF_ARC_OSABI_V4
+#endif
 
 typedef unsigned long elf_greg_t;
 typedef unsigned long elf_fpregset_t;
index 4d9e77724bedef063bb5a9e5ee019bb271fbee6a..000dd041ab42e587620710e045f7f4b31cb0e537 100644 (file)
@@ -28,6 +28,7 @@ extern void __muldf3(void);
 extern void __divdf3(void);
 extern void __floatunsidf(void);
 extern void __floatunsisf(void);
+extern void __udivdi3(void);
 
 EXPORT_SYMBOL(__ashldi3);
 EXPORT_SYMBOL(__ashrdi3);
@@ -45,6 +46,7 @@ EXPORT_SYMBOL(__muldf3);
 EXPORT_SYMBOL(__divdf3);
 EXPORT_SYMBOL(__floatunsidf);
 EXPORT_SYMBOL(__floatunsisf);
+EXPORT_SYMBOL(__udivdi3);
 
 /* ARC optimised assembler routines */
 EXPORT_SYMBOL(memset);
index b5db9e7fd649204d1e0da5d772274544b8483c2a..be1972bd2729e7a41013d521e9349cd4ac028499 100644 (file)
@@ -199,7 +199,7 @@ int elf_check_arch(const struct elf32_hdr *x)
        }
 
        eflags = x->e_flags;
-       if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
+       if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
                pr_err("ABI mismatch - you need newer toolchain\n");
                force_sigsegv(SIGSEGV, current);
                return 0;
index a946400a86d0dc509858ed441edc2de367650dce..f52a0d0dc462213c9070c229a680f342897dafd5 100644 (file)
@@ -291,8 +291,10 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
                               cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
                               cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
 
-       n += scnprintf(buf + n, len - n,
-                      "OS ABI [v3]\t: no-legacy-syscalls\n");
+       n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
+                       EF_ARC_OSABI_CURRENT >> 8,
+                       EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
+                       "no-legacy-syscalls" : "64-bit data any register aligned");
 
        return buf;
 }
index 5a294b2c3cb307ac591293afae8c8a72409c97c5..0b10efe3a6a752c59d3a3d40a74759c30e380eaa 100644 (file)
@@ -921,6 +921,15 @@ void arc_cache_init(void)
 
        printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
 
+       /*
+        * Only master CPU needs to execute rest of function:
+        *  - Assume SMP so all cores will have same cache config so
+        *    any geomtry checks will be same for all
+        *  - IOC setup / dma callbacks only need to be setup once
+        */
+       if (cpu)
+               return;
+
        if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
                struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
 
index 04f83322c9fdaf09444bb55eb4d095a68190cf9f..77ff64a874a1b4649d79e579054febd4dfdbb605 100644 (file)
@@ -61,6 +61,7 @@ void *kmap(struct page *page)
 
        return kmap_high(page);
 }
+EXPORT_SYMBOL(kmap);
 
 void *kmap_atomic(struct page *page)
 {
index bda27b6b1aa2b5dc5d2e20e18f24ae41d3958ed4..29d0b23af2a9dec3b2199ac3851c9ae74b0731ee 100644 (file)
@@ -1309,7 +1309,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        smp_rmb();
 
        pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
-       if (is_error_pfn(pfn))
+       if (is_error_noslot_pfn(pfn))
                return -EFAULT;
 
        if (kvm_is_device_pfn(pfn)) {
index b0b82f5ea33825943fb75066cf1af0d52f17b3c2..3d2cef6488ea292155b2f2a2cee43046c70873ae 100644 (file)
@@ -50,7 +50,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
 static struct vcpu_info __percpu *xen_vcpu_info;
 
 /* Linux <-> Xen vCPU id mapping */
-DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
+DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
 EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
 
 /* These are unused until we support booting "pre-ballooned" */
index b77f58355da11c43ab872f24dbcec134305191ed..3e7b050e99dcee0fecfac38c628eaa5c38991db3 100644 (file)
@@ -757,6 +757,9 @@ ENTRY(__enable_mmu)
        isb
        bl      __create_page_tables            // recreate kernel mapping
 
+       tlbi    vmalle1                         // Remove any stale TLB entries
+       dsb     nsh
+
        msr     sctlr_el1, x19                  // re-enable the MMU
        isb
        ic      iallu                           // flush instructions fetched
index ae7855f16ec2966f6cde3c2639fe3af360e9a676..5a84b45626032ea1260eea2536da42231dcbfa4f 100644 (file)
@@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
 
        /*
         * We must restore the 32-bit state before the sysregs, thanks
-        * to Cortex-A57 erratum #852523.
+        * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
         */
        __sysreg32_restore_state(vcpu);
        __sysreg_restore_guest_state(guest_ctxt);
index b0b225ceca18f956a805c2862f299d0ee9575749..e51367d159d0250aaa7e53d33803caddfbae9de8 100644 (file)
@@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  * Architected system registers.
  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
  *
- * We could trap ID_DFR0 and tell the guest we don't support performance
- * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
- * NAKed, so it will read the PMCR anyway.
- *
- * Therefore we tell the guest we have 0 counters.  Unfortunately, we
- * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
- * all PM registers, which doesn't crash the guest kernel at least.
- *
  * Debug handling: We do trap most, if not all debug related system
  * registers. The implementation is good enough to ensure that a guest
  * can use these with minimal performance degradation. The drawback is
@@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = {
        { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
 
        /* ICC_SRE */
-       { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
+       { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
 
        { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
 
index ea0cd9773914cc06c96fa4dbc783cd84f441c856..5f987598054f906dea71ad364b2be9cc8c74355e 100644 (file)
@@ -164,7 +164,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
  */
 static inline unsigned long ___pa(unsigned long x)
 {
-       if (config_enabled(CONFIG_64BIT)) {
+       if (IS_ENABLED(CONFIG_64BIT)) {
                /*
                 * For MIPS64 the virtual address may either be in one of
                 * the compatibility segements ckseg0 or ckseg1, or it may
@@ -173,7 +173,7 @@ static inline unsigned long ___pa(unsigned long x)
                return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
        }
 
-       if (!config_enabled(CONFIG_EVA)) {
+       if (!IS_ENABLED(CONFIG_EVA)) {
                /*
                 * We're using the standard MIPS32 legacy memory map, ie.
                 * the address x is going to be in kseg0 or kseg1. We can
index 6cfdcf55572d6aaa747998683f06bbf7f5a3e5ab..121008c0fcc92e81847176b0fa016947f8ef8cd0 100644 (file)
@@ -40,7 +40,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
        srcu_idx = srcu_read_lock(&kvm->srcu);
        pfn = gfn_to_pfn(kvm, gfn);
 
-       if (is_error_pfn(pfn)) {
+       if (is_error_noslot_pfn(pfn)) {
                kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
                err = -EFAULT;
                goto out;
index ba5f456edaa9078261d3ce0b2572aa7eda5de843..7f7ba5f23f130fae0639b44af7e93c84ed85a654 100644 (file)
@@ -204,11 +204,9 @@ static void __init conmode_default(void)
 #endif
                }
        } else if (MACHINE_IS_KVM) {
-               if (sclp.has_vt220 &&
-                   config_enabled(CONFIG_SCLP_VT220_CONSOLE))
+               if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
                        SET_CONSOLE_VT220;
-               else if (sclp.has_linemode &&
-                        config_enabled(CONFIG_SCLP_CONSOLE))
+               else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
                        SET_CONSOLE_SCLP;
                else
                        SET_CONSOLE_HVC;
index 1dd5bd8a8c599360ae1626b66010c1a18b12145b..133055311dce1c1ea0d4e2ae26195cc7da4c5647 100644 (file)
@@ -81,7 +81,7 @@
   .altinstr_replacement : { *(.altinstr_replacement) }
   /* .exit.text is discard at runtime, not link time, to deal with references
      from .altinstructions and .eh_frame */
-  .exit.text : { *(.exit.text) }
+  .exit.text : { EXIT_TEXT }
   .exit.data : { *(.exit.data) }
 
   .preinit_array : {
index 89fa85e8b10ca3331f48110393747bf44f750c2b..6f97fb33ae216d9129d42261b5a10226868caab0 100644 (file)
@@ -485,10 +485,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
 
                        req = cast_mcryptd_ctx_to_req(req_ctx);
                        if (irqs_disabled())
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                        else {
                                local_bh_disable();
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                                local_bh_enable();
                        }
                }
index b691da981cd9cf08e718a4ff81ad3d5dc5d7199d..a78a0694ddef37be737599dbf6ba305eb88b3cba 100644 (file)
@@ -265,13 +265,14 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
        vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
        vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
        vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
-       movl    _args_digest+4*32(state, idx, 4), tmp2_w
+       vmovd   _args_digest(state , idx, 4) , %xmm0
        vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
        vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
        vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
 
-       vmovdqu %xmm0, _result_digest(job_rax)
-       movl    tmp2_w, _result_digest+1*16(job_rax)
+        vmovdqu %xmm0, _result_digest(job_rax)
+        offset =  (_result_digest + 1*16)
+        vmovdqu %xmm1, offset(job_rax)
 
        pop     %rbx
 
index f4cf5b78fd360ee0593d9aefa1266e9ffc687325..d210174a52b0e933983f8b253e679eb6b4336632 100644 (file)
@@ -497,10 +497,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
 
                        req = cast_mcryptd_ctx_to_req(req_ctx);
                        if (irqs_disabled())
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                        else {
                                local_bh_disable();
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                                local_bh_enable();
                        }
                }
index cea4fc19e8447d14fd6dcba5df94aa91f7a2332d..50c95af0f017d7ab73a5cad5dc226b1bf5d9cf60 100644 (file)
@@ -1623,6 +1623,9 @@ void __init enable_IR_x2apic(void)
        unsigned long flags;
        int ret, ir_stat;
 
+       if (skip_ioapic_setup)
+               return;
+
        ir_stat = irq_remapping_prepare();
        if (ir_stat < 0 && !x2apic_supported())
                return;
index a45d8580f91e7e8459666d7c1f0076ab1803b821..5cede40e255241a1d8db2cc52f24505a5ced65ca 100644 (file)
@@ -422,6 +422,7 @@ struct nested_vmx {
        struct list_head vmcs02_pool;
        int vmcs02_num;
        u64 vmcs01_tsc_offset;
+       bool change_vmcs01_virtual_x2apic_mode;
        /* L2 must run next, and mustn't decide to exit to L1. */
        bool nested_run_pending;
        /*
@@ -435,6 +436,8 @@ struct nested_vmx {
        bool pi_pending;
        u16 posted_intr_nv;
 
+       unsigned long *msr_bitmap;
+
        struct hrtimer preemption_timer;
        bool preemption_timer_expired;
 
@@ -924,7 +927,6 @@ static unsigned long *vmx_msr_bitmap_legacy;
 static unsigned long *vmx_msr_bitmap_longmode;
 static unsigned long *vmx_msr_bitmap_legacy_x2apic;
 static unsigned long *vmx_msr_bitmap_longmode_x2apic;
-static unsigned long *vmx_msr_bitmap_nested;
 static unsigned long *vmx_vmread_bitmap;
 static unsigned long *vmx_vmwrite_bitmap;
 
@@ -2198,6 +2200,12 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
                        new.control) != old.control);
 }
 
+static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
+{
+       vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
+       vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
+}
+
 /*
  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
  * vcpu mutex is already taken.
@@ -2256,10 +2264,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        /* Setup TSC multiplier */
        if (kvm_has_tsc_control &&
-           vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
-               vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
-               vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
-       }
+           vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
+               decache_tsc_multiplier(vmx);
 
        vmx_vcpu_pi_load(vcpu, cpu);
        vmx->host_pkru = read_pkru();
@@ -2508,7 +2514,7 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
        unsigned long *msr_bitmap;
 
        if (is_guest_mode(vcpu))
-               msr_bitmap = vmx_msr_bitmap_nested;
+               msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
        else if (cpu_has_secondary_exec_ctrls() &&
                 (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
                  SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
@@ -6363,13 +6369,6 @@ static __init int hardware_setup(void)
        if (!vmx_msr_bitmap_longmode_x2apic)
                goto out4;
 
-       if (nested) {
-               vmx_msr_bitmap_nested =
-                       (unsigned long *)__get_free_page(GFP_KERNEL);
-               if (!vmx_msr_bitmap_nested)
-                       goto out5;
-       }
-
        vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
        if (!vmx_vmread_bitmap)
                goto out6;
@@ -6392,8 +6391,6 @@ static __init int hardware_setup(void)
 
        memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
        memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
-       if (nested)
-               memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
 
        if (setup_vmcs_config(&vmcs_config) < 0) {
                r = -EIO;
@@ -6529,9 +6526,6 @@ static __init int hardware_setup(void)
 out7:
        free_page((unsigned long)vmx_vmread_bitmap);
 out6:
-       if (nested)
-               free_page((unsigned long)vmx_msr_bitmap_nested);
-out5:
        free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
 out4:
        free_page((unsigned long)vmx_msr_bitmap_longmode);
@@ -6557,8 +6551,6 @@ static __exit void hardware_unsetup(void)
        free_page((unsigned long)vmx_io_bitmap_a);
        free_page((unsigned long)vmx_vmwrite_bitmap);
        free_page((unsigned long)vmx_vmread_bitmap);
-       if (nested)
-               free_page((unsigned long)vmx_msr_bitmap_nested);
 
        free_kvm_area();
 }
@@ -6995,16 +6987,21 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
                return 1;
        }
 
+       if (cpu_has_vmx_msr_bitmap()) {
+               vmx->nested.msr_bitmap =
+                               (unsigned long *)__get_free_page(GFP_KERNEL);
+               if (!vmx->nested.msr_bitmap)
+                       goto out_msr_bitmap;
+       }
+
        vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
        if (!vmx->nested.cached_vmcs12)
-               return -ENOMEM;
+               goto out_cached_vmcs12;
 
        if (enable_shadow_vmcs) {
                shadow_vmcs = alloc_vmcs();
-               if (!shadow_vmcs) {
-                       kfree(vmx->nested.cached_vmcs12);
-                       return -ENOMEM;
-               }
+               if (!shadow_vmcs)
+                       goto out_shadow_vmcs;
                /* mark vmcs as shadow */
                shadow_vmcs->revision_id |= (1u << 31);
                /* init shadow vmcs */
@@ -7024,6 +7021,15 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
        skip_emulated_instruction(vcpu);
        nested_vmx_succeed(vcpu);
        return 1;
+
+out_shadow_vmcs:
+       kfree(vmx->nested.cached_vmcs12);
+
+out_cached_vmcs12:
+       free_page((unsigned long)vmx->nested.msr_bitmap);
+
+out_msr_bitmap:
+       return -ENOMEM;
 }
 
 /*
@@ -7098,6 +7104,10 @@ static void free_nested(struct vcpu_vmx *vmx)
        vmx->nested.vmxon = false;
        free_vpid(vmx->nested.vpid02);
        nested_release_vmcs12(vmx);
+       if (vmx->nested.msr_bitmap) {
+               free_page((unsigned long)vmx->nested.msr_bitmap);
+               vmx->nested.msr_bitmap = NULL;
+       }
        if (enable_shadow_vmcs)
                free_vmcs(vmx->nested.current_shadow_vmcs);
        kfree(vmx->nested.cached_vmcs12);
@@ -8419,6 +8429,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
 {
        u32 sec_exec_control;
 
+       /* Postpone execution until vmcs01 is the current VMCS. */
+       if (is_guest_mode(vcpu)) {
+               to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
+               return;
+       }
+
        /*
         * There is not point to enable virtualize x2apic without enable
         * apicv
@@ -9472,8 +9488,10 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
 {
        int msr;
        struct page *page;
-       unsigned long *msr_bitmap;
+       unsigned long *msr_bitmap_l1;
+       unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap;
 
+       /* This shortcut is ok because we support only x2APIC MSRs so far. */
        if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
                return false;
 
@@ -9482,63 +9500,37 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
                WARN_ON(1);
                return false;
        }
-       msr_bitmap = (unsigned long *)kmap(page);
-       if (!msr_bitmap) {
+       msr_bitmap_l1 = (unsigned long *)kmap(page);
+       if (!msr_bitmap_l1) {
                nested_release_page_clean(page);
                WARN_ON(1);
                return false;
        }
 
+       memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
+
        if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
                if (nested_cpu_has_apic_reg_virt(vmcs12))
                        for (msr = 0x800; msr <= 0x8ff; msr++)
                                nested_vmx_disable_intercept_for_msr(
-                                       msr_bitmap,
-                                       vmx_msr_bitmap_nested,
+                                       msr_bitmap_l1, msr_bitmap_l0,
                                        msr, MSR_TYPE_R);
-               /* TPR is allowed */
-               nested_vmx_disable_intercept_for_msr(msr_bitmap,
-                               vmx_msr_bitmap_nested,
+
+               nested_vmx_disable_intercept_for_msr(
+                               msr_bitmap_l1, msr_bitmap_l0,
                                APIC_BASE_MSR + (APIC_TASKPRI >> 4),
                                MSR_TYPE_R | MSR_TYPE_W);
+
                if (nested_cpu_has_vid(vmcs12)) {
-                       /* EOI and self-IPI are allowed */
                        nested_vmx_disable_intercept_for_msr(
-                               msr_bitmap,
-                               vmx_msr_bitmap_nested,
+                               msr_bitmap_l1, msr_bitmap_l0,
                                APIC_BASE_MSR + (APIC_EOI >> 4),
                                MSR_TYPE_W);
                        nested_vmx_disable_intercept_for_msr(
-                               msr_bitmap,
-                               vmx_msr_bitmap_nested,
+                               msr_bitmap_l1, msr_bitmap_l0,
                                APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
                                MSR_TYPE_W);
                }
-       } else {
-               /*
-                * Enable reading intercept of all the x2apic
-                * MSRs. We should not rely on vmcs12 to do any
-                * optimizations here, it may have been modified
-                * by L1.
-                */
-               for (msr = 0x800; msr <= 0x8ff; msr++)
-                       __vmx_enable_intercept_for_msr(
-                               vmx_msr_bitmap_nested,
-                               msr,
-                               MSR_TYPE_R);
-
-               __vmx_enable_intercept_for_msr(
-                               vmx_msr_bitmap_nested,
-                               APIC_BASE_MSR + (APIC_TASKPRI >> 4),
-                               MSR_TYPE_W);
-               __vmx_enable_intercept_for_msr(
-                               vmx_msr_bitmap_nested,
-                               APIC_BASE_MSR + (APIC_EOI >> 4),
-                               MSR_TYPE_W);
-               __vmx_enable_intercept_for_msr(
-                               vmx_msr_bitmap_nested,
-                               APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
-                               MSR_TYPE_W);
        }
        kunmap(page);
        nested_release_page_clean(page);
@@ -9957,10 +9949,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
        }
 
        if (cpu_has_vmx_msr_bitmap() &&
-           exec_control & CPU_BASED_USE_MSR_BITMAPS) {
-               nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
-               /* MSR_BITMAP will be set by following vmx_set_efer. */
-       else
+           exec_control & CPU_BASED_USE_MSR_BITMAPS &&
+           nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
+               /* MSR_BITMAP will be set by following vmx_set_efer. */
+       else
                exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
 
        /*
@@ -10011,6 +10003,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
                        vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
        else
                vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+       if (kvm_has_tsc_control)
+               decache_tsc_multiplier(vmx);
 
        if (enable_vpid) {
                /*
@@ -10767,6 +10761,14 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
        else
                vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
                              PIN_BASED_VMX_PREEMPTION_TIMER);
+       if (kvm_has_tsc_control)
+               decache_tsc_multiplier(vmx);
+
+       if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
+               vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
+               vmx_set_virtual_x2apic_mode(vcpu,
+                               vcpu->arch.apic_base & X2APIC_ENABLE);
+       }
 
        /* This is needed for same reason as it was needed in prepare_vmcs02 */
        vmx->host_rsp = 0;
index ec8654f117d8e5152f359c66a23740f9d1a85160..bda8d5eef04d5594630114541d4b1ce043ff890a 100644 (file)
@@ -77,7 +77,7 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
  */
 static inline bool kaslr_memory_enabled(void)
 {
-       return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
+       return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
 }
 
 /* Initialize base and padding for each memory region randomized with KASLR */
index b814ca675131ec1bf647a91290cff0a76a308814..7948be342ee990fe0e7c9358a2b639483be4eac7 100644 (file)
@@ -41,6 +41,7 @@ static DEFINE_RAW_SPINLOCK(list_lock);
  * @node:      list item for parent traversal.
  * @rcu:       RCU callback item for freeing.
  * @irq:       back pointer to parent.
+ * @enabled:   true if driver enabled IRQ
  * @virq:      the virtual IRQ value provided to the requesting driver.
  *
  * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
@@ -50,6 +51,7 @@ struct vmd_irq {
        struct list_head        node;
        struct rcu_head         rcu;
        struct vmd_irq_list     *irq;
+       bool                    enabled;
        unsigned int            virq;
 };
 
@@ -122,7 +124,9 @@ static void vmd_irq_enable(struct irq_data *data)
        unsigned long flags;
 
        raw_spin_lock_irqsave(&list_lock, flags);
+       WARN_ON(vmdirq->enabled);
        list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
+       vmdirq->enabled = true;
        raw_spin_unlock_irqrestore(&list_lock, flags);
 
        data->chip->irq_unmask(data);
@@ -136,8 +140,10 @@ static void vmd_irq_disable(struct irq_data *data)
        data->chip->irq_mask(data);
 
        raw_spin_lock_irqsave(&list_lock, flags);
-       list_del_rcu(&vmdirq->node);
-       INIT_LIST_HEAD_RCU(&vmdirq->node);
+       if (vmdirq->enabled) {
+               list_del_rcu(&vmdirq->node);
+               vmdirq->enabled = false;
+       }
        raw_spin_unlock_irqrestore(&list_lock, flags);
 }
 
index 8ffb089b19a5690596637d4eb9d0d8026eb88255..b86ebb1a9a7f45cff133b5e99b90e34734252d16 100644 (file)
@@ -118,7 +118,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 
 /* Linux <-> Xen vCPU id mapping */
-DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
+DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
 EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
 
 enum xen_domain_type xen_domain_type = XEN_NATIVE;
index f39477538fef0a8759ce966b40e5034c0cb43bba..aa7354088008ba5f73d3ba1af83eb902b0629ec6 100644 (file)
@@ -667,18 +667,19 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
        bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
        bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
 
-       if (bio_op(bio) == REQ_OP_DISCARD)
-               goto integrity_clone;
-
-       if (bio_op(bio) == REQ_OP_WRITE_SAME) {
+       switch (bio_op(bio)) {
+       case REQ_OP_DISCARD:
+       case REQ_OP_SECURE_ERASE:
+               break;
+       case REQ_OP_WRITE_SAME:
                bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
-               goto integrity_clone;
+               break;
+       default:
+               bio_for_each_segment(bv, bio_src, iter)
+                       bio->bi_io_vec[bio->bi_vcnt++] = bv;
+               break;
        }
 
-       bio_for_each_segment(bv, bio_src, iter)
-               bio->bi_io_vec[bio->bi_vcnt++] = bv;
-
-integrity_clone:
        if (bio_integrity(bio_src)) {
                int ret;
 
@@ -1788,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
         * Discards need a mutable bio_vec to accommodate the payload
         * required by the DSM TRIM and UNMAP commands.
         */
-       if (bio_op(bio) == REQ_OP_DISCARD)
+       if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
                split = bio_clone_bioset(bio, gfp, bs);
        else
                split = bio_clone_fast(bio, gfp, bs);
index 999442ec4601487a35c76dad156dfa438a2a35b3..36c7ac328d8c17bd1967f898c91ff49f7a9b050d 100644 (file)
@@ -515,7 +515,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 
 void blk_set_queue_dying(struct request_queue *q)
 {
-       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+       spin_lock_irq(q->queue_lock);
+       queue_flag_set(QUEUE_FLAG_DYING, q);
+       spin_unlock_irq(q->queue_lock);
 
        if (q->mq_ops)
                blk_mq_wake_waiters(q);
index 3eec75a9e91d8e172cfc0a413f74e52a8004aee3..2642e5fc8b69a03494b62638d4eca98ee07b7edc 100644 (file)
@@ -94,8 +94,30 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
        bool do_split = true;
        struct bio *new = NULL;
        const unsigned max_sectors = get_max_io_size(q, bio);
+       unsigned bvecs = 0;
 
        bio_for_each_segment(bv, bio, iter) {
+               /*
+                * With arbitrary bio size, the incoming bio may be very
+                * big. We have to split the bio into small bios so that
+                * each holds at most BIO_MAX_PAGES bvecs because
+                * bio_clone() can fail to allocate big bvecs.
+                *
+                * It should have been better to apply the limit per
+                * request queue in which bio_clone() is involved,
+                * instead of globally. The biggest blocker is the
+                * bio_clone() in bio bounce.
+                *
+                * If bio is splitted by this reason, we should have
+                * allowed to continue bios merging, but don't do
+                * that now for making the change simple.
+                *
+                * TODO: deal with bio bounce's bio_clone() gracefully
+                * and convert the global limit into per-queue limit.
+                */
+               if (bvecs++ >= BIO_MAX_PAGES)
+                       goto split;
+
                /*
                 * If the queue doesn't support SG gaps and adding this
                 * offset would create a gap, disallow it.
@@ -172,12 +194,18 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
        struct bio *split, *res;
        unsigned nsegs;
 
-       if (bio_op(*bio) == REQ_OP_DISCARD)
+       switch (bio_op(*bio)) {
+       case REQ_OP_DISCARD:
+       case REQ_OP_SECURE_ERASE:
                split = blk_bio_discard_split(q, *bio, bs, &nsegs);
-       else if (bio_op(*bio) == REQ_OP_WRITE_SAME)
+               break;
+       case REQ_OP_WRITE_SAME:
                split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
-       else
+               break;
+       default:
                split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
+               break;
+       }
 
        /* physical segments can be figured out during splitting */
        res = split ? split : *bio;
@@ -213,7 +241,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
         * This should probably be returning 0, but blk_add_request_payload()
         * (Christoph!!!!)
         */
-       if (bio_op(bio) == REQ_OP_DISCARD)
+       if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
                return 1;
 
        if (bio_op(bio) == REQ_OP_WRITE_SAME)
@@ -385,7 +413,9 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
        nsegs = 0;
        cluster = blk_queue_cluster(q);
 
-       if (bio_op(bio) == REQ_OP_DISCARD) {
+       switch (bio_op(bio)) {
+       case REQ_OP_DISCARD:
+       case REQ_OP_SECURE_ERASE:
                /*
                 * This is a hack - drivers should be neither modifying the
                 * biovec, nor relying on bi_vcnt - but because of
@@ -393,19 +423,16 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
                 * a payload we need to set up here (thank you Christoph) and
                 * bi_vcnt is really the only way of telling if we need to.
                 */
-
-               if (bio->bi_vcnt)
-                       goto single_segment;
-
-               return 0;
-       }
-
-       if (bio_op(bio) == REQ_OP_WRITE_SAME) {
-single_segment:
+               if (!bio->bi_vcnt)
+                       return 0;
+               /* Fall through */
+       case REQ_OP_WRITE_SAME:
                *sg = sglist;
                bvec = bio_iovec(bio);
                sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
                return 1;
+       default:
+               break;
        }
 
        for_each_bio(bio)
index e931a0e8e73dfb23761a0fa2f2819e43207dc07c..13f5a6c1de76827c3aa2eaab28061581971e0c5c 100644 (file)
@@ -793,11 +793,12 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
        struct list_head *dptr;
        int queued;
 
-       WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
-
        if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
                return;
 
+       WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
+               cpu_online(hctx->next_cpu));
+
        hctx->run++;
 
        /*
@@ -1036,10 +1037,11 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
 EXPORT_SYMBOL(blk_mq_delay_queue);
 
 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
-                                           struct blk_mq_ctx *ctx,
                                            struct request *rq,
                                            bool at_head)
 {
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
+
        trace_block_rq_insert(hctx->queue, rq);
 
        if (at_head)
@@ -1053,20 +1055,16 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
 {
        struct blk_mq_ctx *ctx = rq->mq_ctx;
 
-       __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
+       __blk_mq_insert_req_list(hctx, rq, at_head);
        blk_mq_hctx_mark_pending(hctx, ctx);
 }
 
 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
-               bool async)
+                          bool async)
 {
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
        struct request_queue *q = rq->q;
        struct blk_mq_hw_ctx *hctx;
-       struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
-
-       current_ctx = blk_mq_get_ctx(q);
-       if (!cpu_online(ctx->cpu))
-               rq->mq_ctx = ctx = current_ctx;
 
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
@@ -1076,8 +1074,6 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
 
        if (run_queue)
                blk_mq_run_hw_queue(hctx, async);
-
-       blk_mq_put_ctx(current_ctx);
 }
 
 static void blk_mq_insert_requests(struct request_queue *q,
@@ -1088,14 +1084,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
 
 {
        struct blk_mq_hw_ctx *hctx;
-       struct blk_mq_ctx *current_ctx;
 
        trace_block_unplug(q, depth, !from_schedule);
 
-       current_ctx = blk_mq_get_ctx(q);
-
-       if (!cpu_online(ctx->cpu))
-               ctx = current_ctx;
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
        /*
@@ -1107,15 +1098,14 @@ static void blk_mq_insert_requests(struct request_queue *q,
                struct request *rq;
 
                rq = list_first_entry(list, struct request, queuelist);
+               BUG_ON(rq->mq_ctx != ctx);
                list_del_init(&rq->queuelist);
-               rq->mq_ctx = ctx;
-               __blk_mq_insert_req_list(hctx, ctx, rq, false);
+               __blk_mq_insert_req_list(hctx, rq, false);
        }
        blk_mq_hctx_mark_pending(hctx, ctx);
        spin_unlock(&ctx->lock);
 
        blk_mq_run_hw_queue(hctx, from_schedule);
-       blk_mq_put_ctx(current_ctx);
 }
 
 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
@@ -1630,16 +1620,17 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
        return 0;
 }
 
+/*
+ * 'cpu' is going away. splice any existing rq_list entries from this
+ * software queue to the hw queue dispatch list, and ensure that it
+ * gets run.
+ */
 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
 {
-       struct request_queue *q = hctx->queue;
        struct blk_mq_ctx *ctx;
        LIST_HEAD(tmp);
 
-       /*
-        * Move ctx entries to new CPU, if this one is going away.
-        */
-       ctx = __blk_mq_get_ctx(q, cpu);
+       ctx = __blk_mq_get_ctx(hctx->queue, cpu);
 
        spin_lock(&ctx->lock);
        if (!list_empty(&ctx->rq_list)) {
@@ -1651,24 +1642,11 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
        if (list_empty(&tmp))
                return NOTIFY_OK;
 
-       ctx = blk_mq_get_ctx(q);
-       spin_lock(&ctx->lock);
-
-       while (!list_empty(&tmp)) {
-               struct request *rq;
-
-               rq = list_first_entry(&tmp, struct request, queuelist);
-               rq->mq_ctx = ctx;
-               list_move_tail(&rq->queuelist, &ctx->rq_list);
-       }
-
-       hctx = q->mq_ops->map_queue(q, ctx->cpu);
-       blk_mq_hctx_mark_pending(hctx, ctx);
-
-       spin_unlock(&ctx->lock);
+       spin_lock(&hctx->lock);
+       list_splice_tail_init(&tmp, &hctx->dispatch);
+       spin_unlock(&hctx->lock);
 
        blk_mq_run_hw_queue(hctx, true);
-       blk_mq_put_ctx(ctx);
        return NOTIFY_OK;
 }
 
index 7096c22041e7e6680b320ca08e4be831591b6477..f7d973a56fd75076d816d8a52bc1169b7dc2ae36 100644 (file)
@@ -366,7 +366,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
        list_for_each_prev(entry, &q->queue_head) {
                struct request *pos = list_entry_rq(entry);
 
-               if ((req_op(rq) == REQ_OP_DISCARD) != (req_op(pos) == REQ_OP_DISCARD))
+               if (req_op(rq) != req_op(pos))
                        break;
                if (rq_data_dir(rq) != rq_data_dir(pos))
                        break;
index b71a9c76700956e1ddfda4e742a2053b0f0769fe..e3d8e4ced4a23c57d1f36da593e80a64d8b1d142 100644 (file)
@@ -3706,22 +3706,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
        if (UFDCS->rawcmd == 1)
                UFDCS->rawcmd = 2;
 
-       if (mode & (FMODE_READ|FMODE_WRITE)) {
-               UDRS->last_checked = 0;
-               clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
-               check_disk_change(bdev);
-               if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
-                       goto out;
-               if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+       if (!(mode & FMODE_NDELAY)) {
+               if (mode & (FMODE_READ|FMODE_WRITE)) {
+                       UDRS->last_checked = 0;
+                       clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+                       check_disk_change(bdev);
+                       if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+                               goto out;
+                       if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+                               goto out;
+               }
+               res = -EROFS;
+               if ((mode & FMODE_WRITE) &&
+                   !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
                        goto out;
        }
-
-       res = -EROFS;
-
-       if ((mode & FMODE_WRITE) &&
-                       !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
-               goto out;
-
        mutex_unlock(&open_lock);
        mutex_unlock(&floppy_mutex);
        return 0;
index be4fea6a5dd33695df30f87a1fea5341eadbd709..88ef6d4729b46594b8542fa81467a45763f2f343 100644 (file)
@@ -189,6 +189,8 @@ struct blkfront_info
        struct mutex mutex;
        struct xenbus_device *xbdev;
        struct gendisk *gd;
+       u16 sector_size;
+       unsigned int physical_sector_size;
        int vdevice;
        blkif_vdev_t handle;
        enum blkif_state connected;
@@ -910,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = {
        .map_queue = blk_mq_map_queue,
 };
 
+static void blkif_set_queue_limits(struct blkfront_info *info)
+{
+       struct request_queue *rq = info->rq;
+       struct gendisk *gd = info->gd;
+       unsigned int segments = info->max_indirect_segments ? :
+                               BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
+       queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
+
+       if (info->feature_discard) {
+               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
+               blk_queue_max_discard_sectors(rq, get_capacity(gd));
+               rq->limits.discard_granularity = info->discard_granularity;
+               rq->limits.discard_alignment = info->discard_alignment;
+               if (info->feature_secdiscard)
+                       queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
+       }
+
+       /* Hard sector size and max sectors impersonate the equiv. hardware. */
+       blk_queue_logical_block_size(rq, info->sector_size);
+       blk_queue_physical_block_size(rq, info->physical_sector_size);
+       blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
+
+       /* Each segment in a request is up to an aligned page in size. */
+       blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
+       blk_queue_max_segment_size(rq, PAGE_SIZE);
+
+       /* Ensure a merged request will fit in a single I/O ring slot. */
+       blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
+
+       /* Make sure buffer addresses are sector-aligned. */
+       blk_queue_dma_alignment(rq, 511);
+
+       /* Make sure we don't use bounce buffers. */
+       blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
+}
+
 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
-                               unsigned int physical_sector_size,
-                               unsigned int segments)
+                               unsigned int physical_sector_size)
 {
        struct request_queue *rq;
        struct blkfront_info *info = gd->private_data;
@@ -944,36 +982,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
        }
 
        rq->queuedata = info;
-       queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
-
-       if (info->feature_discard) {
-               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
-               blk_queue_max_discard_sectors(rq, get_capacity(gd));
-               rq->limits.discard_granularity = info->discard_granularity;
-               rq->limits.discard_alignment = info->discard_alignment;
-               if (info->feature_secdiscard)
-                       queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
-       }
-
-       /* Hard sector size and max sectors impersonate the equiv. hardware. */
-       blk_queue_logical_block_size(rq, sector_size);
-       blk_queue_physical_block_size(rq, physical_sector_size);
-       blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
-
-       /* Each segment in a request is up to an aligned page in size. */
-       blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
-       blk_queue_max_segment_size(rq, PAGE_SIZE);
-
-       /* Ensure a merged request will fit in a single I/O ring slot. */
-       blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
-
-       /* Make sure buffer addresses are sector-aligned. */
-       blk_queue_dma_alignment(rq, 511);
-
-       /* Make sure we don't use bounce buffers. */
-       blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
-
-       gd->queue = rq;
+       info->rq = gd->queue = rq;
+       info->gd = gd;
+       info->sector_size = sector_size;
+       info->physical_sector_size = physical_sector_size;
+       blkif_set_queue_limits(info);
 
        return 0;
 }
@@ -1136,16 +1149,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
        gd->private_data = info;
        set_capacity(gd, capacity);
 
-       if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
-                                info->max_indirect_segments ? :
-                                BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
+       if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
                del_gendisk(gd);
                goto release;
        }
 
-       info->rq = gd->queue;
-       info->gd = gd;
-
        xlvbd_flush(info);
 
        if (vdisk_info & VDISK_READONLY)
@@ -1315,7 +1323,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
                        rinfo->ring_ref[i] = GRANT_INVALID_REF;
                }
        }
-       free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
+       free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
        rinfo->ring.sring = NULL;
 
        if (rinfo->irq)
@@ -2007,8 +2015,10 @@ static int blkif_recover(struct blkfront_info *info)
        struct split_bio *split_bio;
 
        blkfront_gather_backend_features(info);
+       /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
+       blkif_set_queue_limits(info);
        segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
-       blk_queue_max_segments(info->rq, segs);
+       blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
 
        for (r_index = 0; r_index < info->nr_rings; r_index++) {
                struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
@@ -2432,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info)
        if (err) {
                xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
                                 info->xbdev->otherend);
-               return;
+               goto fail;
        }
 
        xenbus_switch_state(info->xbdev, XenbusStateConnected);
@@ -2445,6 +2455,11 @@ static void blkfront_connect(struct blkfront_info *info)
        device_add_disk(&info->xbdev->dev, info->gd);
 
        info->is_ready = 1;
+       return;
+
+fail:
+       blkif_free(info, 0);
+       return;
 }
 
 /**
index 937e10b84d5850311d878988780fb7aefee069e6..3e1cb512f3ce962a81b32b5c5ea7ddc1ea5d1b0b 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/of_irq.h>
 #include <linux/sched_clock.h>
 
+#include <clocksource/pxa.h>
+
 #include <asm/div64.h>
 
 #define OSMR0          0x00    /* OS Timer 0 Match Register */
index 97669ee4df2a6625f9e69b83d6d479898aee8515..c83452cacb41223638e6075e206345c11f655240 100644 (file)
@@ -123,12 +123,16 @@ static struct clock_event_device sun4i_clockevent = {
        .set_next_event = sun4i_clkevt_next_event,
 };
 
+static void sun4i_timer_clear_interrupt(void)
+{
+       writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG);
+}
 
 static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evt = (struct clock_event_device *)dev_id;
 
-       writel(0x1, timer_base + TIMER_IRQ_ST_REG);
+       sun4i_timer_clear_interrupt();
        evt->event_handler(evt);
 
        return IRQ_HANDLED;
@@ -208,6 +212,9 @@ static int __init sun4i_timer_init(struct device_node *node)
        /* Make sure timer is stopped before playing with interrupts */
        sun4i_clkevt_time_stop(0);
 
+       /* clear timer0 interrupt */
+       sun4i_timer_clear_interrupt();
+
        sun4i_clockevent.cpumask = cpu_possible_mask;
        sun4i_clockevent.irq = irq;
 
index a7d9a08e4b0e31b24640d4e188746aa25139bc1b..a8e6c7df853d2c64914c04af5785f61a51d56880 100644 (file)
@@ -202,10 +202,10 @@ static int __init pistachio_clksrc_of_init(struct device_node *node)
        rate = clk_get_rate(fast_clk);
 
        /* Disable irq's for clocksource usage */
-       gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
-       gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
-       gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
-       gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
+       gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
+       gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
+       gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
+       gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
 
        /* Enable timer block */
        writel(TIMER_ME_GLOBAL, pcs_gpt.base);
index 1ffac0cb0cb78496d684e987d7b17f3b99a28775..3494bc5a21d556bef082b1b7e44297db93ed98fb 100644 (file)
@@ -261,6 +261,12 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
                return PTR_ERR(data->mck);
        }
 
+       ret = clk_prepare_enable(data->mck);
+       if (ret) {
+               pr_err("Unable to enable mck\n");
+               return ret;
+       }
+
        /* Get the interrupts property */
        data->irq = irq_of_parse_and_map(node, 0);
        if (!data->irq) {
index dfb168568af1a6d2ba163a7010d611ec0ad3a3f0..1f01e98c83c7ddcccecd50e11671e3b519b2c136 100644 (file)
@@ -116,6 +116,9 @@ static int dax_pmem_probe(struct device *dev)
        if (rc)
                return rc;
 
+       /* adjust the dax_region resource to the start of data */
+       res.start += le64_to_cpu(pfn_sb->dataoff);
+
        nd_region = to_nd_region(dev->parent);
        dax_region = alloc_dax_region(dev, nd_region->id, &res,
                        le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
index 98dd47a30fc754af1aede05d31dc2c6179d6d2b1..66a94103798bbed169fd9632692fa074549c8006 100644 (file)
@@ -50,6 +50,7 @@ config GPIO_DEVRES
 config OF_GPIO
        def_bool y
        depends on OF
+       depends on HAS_IOMEM
 
 config GPIO_ACPI
        def_bool y
@@ -188,7 +189,7 @@ config GPIO_EP93XX
 config GPIO_ETRAXFS
        bool "Axis ETRAX FS General I/O"
        depends on CRIS || COMPILE_TEST
-       depends on OF
+       depends on OF_GPIO
        select GPIO_GENERIC
        select GPIOLIB_IRQCHIP
        help
@@ -214,7 +215,7 @@ config GPIO_GENERIC_PLATFORM
 
 config GPIO_GRGPIO
        tristate "Aeroflex Gaisler GRGPIO support"
-       depends on OF
+       depends on OF_GPIO
        select GPIO_GENERIC
        select IRQ_DOMAIN
        help
@@ -312,7 +313,7 @@ config GPIO_MPC8XXX
 config GPIO_MVEBU
        def_bool y
        depends on PLAT_ORION
-       depends on OF
+       depends on OF_GPIO
        select GENERIC_IRQ_CHIP
 
 config GPIO_MXC
@@ -405,7 +406,7 @@ config GPIO_TEGRA
        bool "NVIDIA Tegra GPIO support"
        default ARCH_TEGRA
        depends on ARCH_TEGRA || COMPILE_TEST
-       depends on OF
+       depends on OF_GPIO
        help
          Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
 
@@ -1099,7 +1100,7 @@ menu "SPI GPIO expanders"
 
 config GPIO_74X164
        tristate "74x164 serial-in/parallel-out 8-bits shift register"
-       depends on OF
+       depends on OF_GPIO
        help
          Driver for 74x164 compatible serial-in/parallel-out 8-outputs
          shift registers. This driver can be used to provide access
index 08807368f0078337bff7ba6c63bebadf72f5e018..946d09195598f4dacde27162193e1ee99490481f 100644 (file)
@@ -192,6 +192,10 @@ int __max730x_probe(struct max7301 *ts)
        ts->chip.parent = dev;
        ts->chip.owner = THIS_MODULE;
 
+       ret = gpiochip_add_data(&ts->chip, ts);
+       if (ret)
+               goto exit_destroy;
+
        /*
         * initialize pullups according to platform data and cache the
         * register values for later use.
@@ -213,10 +217,6 @@ int __max730x_probe(struct max7301 *ts)
                }
        }
 
-       ret = gpiochip_add_data(&ts->chip, ts);
-       if (ret)
-               goto exit_destroy;
-
        return ret;
 
 exit_destroy:
index e6dfa1bd3defae37e1ee549e10886c2edeece697..5f65a78b27c9c56c52414dddbe2d16e12f123905 100644 (file)
@@ -2462,18 +2462,24 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
 
        if (addr->dev_addr.bound_dev_if) {
                ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
-               if (!ndev)
-                       return -ENODEV;
+               if (!ndev) {
+                       ret = -ENODEV;
+                       goto err2;
+               }
 
                if (ndev->flags & IFF_LOOPBACK) {
                        dev_put(ndev);
-                       if (!id_priv->id.device->get_netdev)
-                               return -EOPNOTSUPP;
+                       if (!id_priv->id.device->get_netdev) {
+                               ret = -EOPNOTSUPP;
+                               goto err2;
+                       }
 
                        ndev = id_priv->id.device->get_netdev(id_priv->id.device,
                                                              id_priv->id.port_num);
-                       if (!ndev)
-                               return -ENODEV;
+                       if (!ndev) {
+                               ret = -ENODEV;
+                               goto err2;
+                       }
                }
 
                route->path_rec->net = &init_net;
index 3aca7f6171b428eac865567978ff21d59ab39dbf..b6a953aed7e83d6058164ca66af2516ab0981e23 100644 (file)
@@ -1827,8 +1827,12 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
                                (ep->mpa_pkt + sizeof(*mpa));
                        ep->ird = ntohs(mpa_v2_params->ird) &
                                MPA_V2_IRD_ORD_MASK;
+                       ep->ird = min_t(u32, ep->ird,
+                                       cur_max_read_depth(ep->com.dev));
                        ep->ord = ntohs(mpa_v2_params->ord) &
                                MPA_V2_IRD_ORD_MASK;
+                       ep->ord = min_t(u32, ep->ord,
+                                       cur_max_read_depth(ep->com.dev));
                        PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
                             ep->ord);
                        if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
@@ -3136,7 +3140,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
                if (conn_param->ord > ep->ird) {
                        if (RELAXED_IRD_NEGOTIATION) {
-                               ep->ord = ep->ird;
+                               conn_param->ord = ep->ird;
                        } else {
                                ep->ird = conn_param->ird;
                                ep->ord = conn_param->ord;
index 812ab7278b8eec477183f451100b7c900adbeaec..ac926c942fee7bc72036e13f269e84156ded95d4 100644 (file)
@@ -1016,15 +1016,15 @@ int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
 {
        struct c4iw_cq *chp;
-       int ret;
+       int ret = 0;
        unsigned long flag;
 
        chp = to_c4iw_cq(ibcq);
        spin_lock_irqsave(&chp->lock, flag);
-       ret = t4_arm_cq(&chp->cq,
-                       (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+       t4_arm_cq(&chp->cq,
+                 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+       if (flags & IB_CQ_REPORT_MISSED_EVENTS)
+               ret = t4_cq_notempty(&chp->cq);
        spin_unlock_irqrestore(&chp->lock, flag);
-       if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
-               ret = 0;
        return ret;
 }
index 6126bbe36095c21021f9cc5cdca3b75da299dc85..02173f4315fa62bec5e67171a45ab864a1e096da 100644 (file)
@@ -634,6 +634,11 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
        return (CQE_GENBIT(cqe) == cq->gen);
 }
 
+static inline int t4_cq_notempty(struct t4_cq *cq)
+{
+       return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
+}
+
 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
 {
        int ret;
index 79575ee873f21af3361264b711419c267b6a23d0..0566393e5aba689c8ea73563a7e29bf54003da07 100644 (file)
@@ -47,7 +47,6 @@
 #include <linux/topology.h>
 #include <linux/cpumask.h>
 #include <linux/module.h>
-#include <linux/cpumask.h>
 
 #include "hfi.h"
 #include "affinity.h"
@@ -682,7 +681,7 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
                           size_t count)
 {
        struct hfi1_affinity_node *entry;
-       struct cpumask mask;
+       cpumask_var_t mask;
        int ret, i;
 
        spin_lock(&node_affinity.lock);
@@ -692,19 +691,24 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
        if (!entry)
                return -EINVAL;
 
-       ret = cpulist_parse(buf, &mask);
+       ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+       if (!ret)
+               return -ENOMEM;
+
+       ret = cpulist_parse(buf, mask);
        if (ret)
-               return ret;
+               goto out;
 
-       if (!cpumask_subset(&mask, cpu_online_mask) || cpumask_empty(&mask)) {
+       if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
                dd_dev_warn(dd, "Invalid CPU mask\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        mutex_lock(&sdma_affinity_mutex);
        /* reset the SDMA interrupt affinity details */
        init_cpu_mask_set(&entry->def_intr);
-       cpumask_copy(&entry->def_intr.mask, &mask);
+       cpumask_copy(&entry->def_intr.mask, mask);
        /*
         * Reassign the affinity for each SDMA interrupt.
         */
@@ -720,8 +724,9 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
                if (ret)
                        break;
        }
-
        mutex_unlock(&sdma_affinity_mutex);
+out:
+       free_cpumask_var(mask);
        return ret ? ret : strnlen(buf, PAGE_SIZE);
 }
 
index dbab9d9cc288c7edb296ab25d890d3d79c93ef22..a49cc88f08a2aa6b063903fa88291c070849143e 100644 (file)
@@ -223,28 +223,32 @@ DEBUGFS_SEQ_FILE_OPEN(ctx_stats)
 DEBUGFS_FILE_OPS(ctx_stats);
 
 static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
+       __acquires(RCU)
 {
        struct qp_iter *iter;
        loff_t n = *pos;
 
-       rcu_read_lock();
        iter = qp_iter_init(s->private);
+
+       /* stop calls rcu_read_unlock */
+       rcu_read_lock();
+
        if (!iter)
                return NULL;
 
-       while (n--) {
+       do {
                if (qp_iter_next(iter)) {
                        kfree(iter);
                        return NULL;
                }
-       }
+       } while (n--);
 
        return iter;
 }
 
 static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
                                loff_t *pos)
+       __must_hold(RCU)
 {
        struct qp_iter *iter = iter_ptr;
 
@@ -259,7 +263,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 }
 
 static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
-__releases(RCU)
+       __releases(RCU)
 {
        rcu_read_unlock();
 }
index 8246dc7d0573a1be768fb7d65fab2fba01c7c686..303f10555729715a9a21fd5b240eb8daba39efc0 100644 (file)
@@ -888,14 +888,15 @@ void set_all_slowpath(struct hfi1_devdata *dd)
 }
 
 static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
-                                     struct hfi1_packet packet,
+                                     struct hfi1_packet *packet,
                                      struct hfi1_devdata *dd)
 {
        struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
-       struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd,
-                                                            packet.rhf_addr);
+       struct hfi1_message_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
+                                                            packet->rhf_addr);
+       u8 etype = rhf_rcv_type(packet->rhf);
 
-       if (hdr2sc(hdr, packet.rhf) != 0xf) {
+       if (etype == RHF_RCV_TYPE_IB && hdr2sc(hdr, packet->rhf) != 0xf) {
                int hwstate = read_logical_state(dd);
 
                if (hwstate != LSTATE_ACTIVE) {
@@ -979,7 +980,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
                        /* Auto activate link on non-SC15 packet receive */
                        if (unlikely(rcd->ppd->host_link_state ==
                                     HLS_UP_ARMED) &&
-                           set_armed_to_active(rcd, packet, dd))
+                           set_armed_to_active(rcd, &packet, dd))
                                goto bail;
                        last = process_rcv_packet(&packet, thread);
                }
index 1ecbec1923589c3ec96d0958ec45541aa2b5724b..7e03ccd2554db3a63f5f79966f0a21c1617e45c7 100644 (file)
@@ -183,6 +183,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
        if (fd) {
                fd->rec_cpu_num = -1; /* no cpu affinity by default */
                fd->mm = current->mm;
+               atomic_inc(&fd->mm->mm_count);
        }
 
        fp->private_data = fd;
@@ -222,7 +223,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
                ret = assign_ctxt(fp, &uinfo);
                if (ret < 0)
                        return ret;
-               setup_ctxt(fp);
+               ret = setup_ctxt(fp);
                if (ret)
                        return ret;
                ret = user_init(fp);
@@ -779,6 +780,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
        mutex_unlock(&hfi1_mutex);
        hfi1_free_ctxtdata(dd, uctxt);
 done:
+       mmdrop(fdata->mm);
        kobject_put(&dd->kobj);
        kfree(fdata);
        return 0;
index 1000e0fd96d9b4972cec3327739f5ca7ff59b7b3..a021e660d482dd5e4cbd87ae1c010d25f2e1975f 100644 (file)
@@ -1272,9 +1272,26 @@ static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
               ((!!(rhf_dc_info(rhf))) << 4);
 }
 
+#define HFI1_JKEY_WIDTH       16
+#define HFI1_JKEY_MASK        (BIT(16) - 1)
+#define HFI1_ADMIN_JKEY_RANGE 32
+
+/*
+ * J_KEYs are split and allocated in the following groups:
+ *   0 - 31    - users with administrator privileges
+ *  32 - 63    - kernel protocols using KDETH packets
+ *  64 - 65535 - all other users using KDETH packets
+ */
 static inline u16 generate_jkey(kuid_t uid)
 {
-       return from_kuid(current_user_ns(), uid) & 0xffff;
+       u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK;
+
+       if (capable(CAP_SYS_ADMIN))
+               jkey &= HFI1_ADMIN_JKEY_RANGE - 1;
+       else if (jkey < 64)
+               jkey |= BIT(HFI1_JKEY_WIDTH - 1);
+
+       return jkey;
 }
 
 /*
@@ -1656,7 +1673,6 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *,
                                  const struct pci_device_id *);
 void hfi1_free_devdata(struct hfi1_devdata *);
-void cc_state_reclaim(struct rcu_head *rcu);
 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
 
 /* LED beaconing functions */
index a358d23ecd54d563d05a14e10c0cd885f6546b8f..b7935451093c6d833112e985d13421b408268b32 100644 (file)
@@ -1333,7 +1333,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
                spin_unlock(&ppd->cc_state_lock);
 
                if (cc_state)
-                       call_rcu(&cc_state->rcu, cc_state_reclaim);
+                       kfree_rcu(cc_state, rcu);
        }
 
        free_credit_return(dd);
index 1263abe01999e3e84306b5594921bc4b11019fca..39e42c373a01c5e9afc276bb2073b214cde4515f 100644 (file)
@@ -1819,6 +1819,11 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
        u32 len = OPA_AM_CI_LEN(am) + 1;
        int ret;
 
+       if (dd->pport->port_type != PORT_TYPE_QSFP) {
+               smp->status |= IB_SMP_INVALID_FIELD;
+               return reply((struct ib_mad_hdr *)smp);
+       }
+
 #define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
 #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
 #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
@@ -3398,7 +3403,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
 
        spin_unlock(&ppd->cc_state_lock);
 
-       call_rcu(&old_cc_state->rcu, cc_state_reclaim);
+       kfree_rcu(old_cc_state, rcu);
 }
 
 static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
@@ -3553,13 +3558,6 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
        return reply((struct ib_mad_hdr *)smp);
 }
 
-void cc_state_reclaim(struct rcu_head *rcu)
-{
-       struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu);
-
-       kfree(cc_state);
-}
-
 static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
                                   struct ib_device *ibdev, u8 port,
                                   u32 *resp_len)
index a5aa3517e7d5c537d36786fd0d512cef6d60544c..4e4d8317c281cde45278c34104e29aca00b284f2 100644 (file)
@@ -656,10 +656,6 @@ struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
 
        iter->dev = dev;
        iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
-       if (qp_iter_next(iter)) {
-               kfree(iter);
-               return NULL;
-       }
 
        return iter;
 }
index a207717ade2aac0da5fbf34e8adb70a53efc2a42..4e95ad810847ab91b76829b00e305bba48dd2057 100644 (file)
@@ -706,8 +706,8 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
                   u8 *data)
 {
        struct hfi1_pportdata *ppd;
-       u32 excess_len = 0;
-       int ret = 0;
+       u32 excess_len = len;
+       int ret = 0, offset = 0;
 
        if (port_num > dd->num_pports || port_num < 1) {
                dd_dev_info(dd, "%s: Invalid port number %d\n",
@@ -740,6 +740,34 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
        }
 
        memcpy(data, &ppd->qsfp_info.cache[addr], len);
+
+       if (addr <= QSFP_MONITOR_VAL_END &&
+           (addr + len) >= QSFP_MONITOR_VAL_START) {
+               /* Overlap with the dynamic channel monitor range */
+               if (addr < QSFP_MONITOR_VAL_START) {
+                       if (addr + len <= QSFP_MONITOR_VAL_END)
+                               len = addr + len - QSFP_MONITOR_VAL_START;
+                       else
+                               len = QSFP_MONITOR_RANGE;
+                       offset = QSFP_MONITOR_VAL_START - addr;
+                       addr = QSFP_MONITOR_VAL_START;
+               } else if (addr == QSFP_MONITOR_VAL_START) {
+                       offset = 0;
+                       if (addr + len > QSFP_MONITOR_VAL_END)
+                               len = QSFP_MONITOR_RANGE;
+               } else {
+                       offset = 0;
+                       if (addr + len > QSFP_MONITOR_VAL_END)
+                               len = QSFP_MONITOR_VAL_END - addr + 1;
+               }
+               /* Refresh the values of the dynamic monitors from the cable */
+               ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len);
+               if (ret != len) {
+                       ret = -EAGAIN;
+                       goto set_zeroes;
+               }
+       }
+
        return 0;
 
 set_zeroes:
index 69275ebd9597322b599e97fce5804d17efa526f1..36cf52359848afdc5180d03602d587d3813f5b71 100644 (file)
@@ -74,6 +74,9 @@
 /* Defined fields that Intel requires of qualified cables */
 /* Byte 0 is Identifier, not checked */
 /* Byte 1 is reserved "status MSB" */
+#define QSFP_MONITOR_VAL_START 22
+#define QSFP_MONITOR_VAL_END 81
+#define QSFP_MONITOR_RANGE (QSFP_MONITOR_VAL_END - QSFP_MONITOR_VAL_START + 1)
 #define QSFP_TX_CTRL_BYTE_OFFS 86
 #define QSFP_PWR_CTRL_BYTE_OFFS 93
 #define QSFP_CDR_CTRL_BYTE_OFFS 98
index b738acdb9b027705f11257c8ab25cbaeba652159..8ec09e470f848334e746b50326c6eadcf57785cd 100644 (file)
@@ -232,7 +232,7 @@ struct i40iw_device {
        struct i40e_client *client;
        struct i40iw_hw hw;
        struct i40iw_cm_core cm_core;
-       unsigned long *mem_resources;
+       u8 *mem_resources;
        unsigned long *allocated_qps;
        unsigned long *allocated_cqs;
        unsigned long *allocated_mrs;
@@ -435,8 +435,8 @@ static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,
        *next = resource_num + 1;
        if (*next == max_resources)
                *next = 0;
-       spin_unlock_irqrestore(&iwdev->resource_lock, flags);
        *req_resource_num = resource_num;
+       spin_unlock_irqrestore(&iwdev->resource_lock, flags);
 
        return 0;
 }
index 5026dc79978a7c2ea2a1040861913a3aeaf8d7d9..7ca0638579c0b31d9cd0fdc71ea6858b77dba7ea 100644 (file)
@@ -535,8 +535,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
                buf += hdr_len;
        }
 
-       if (pd_len)
-               memcpy(buf, pdata->addr, pd_len);
+       if (pdata && pdata->addr)
+               memcpy(buf, pdata->addr, pdata->size);
 
        atomic_set(&sqbuf->refcount, 1);
 
@@ -3346,26 +3346,6 @@ int i40iw_cm_disconn(struct i40iw_qp *iwqp)
        return 0;
 }
 
-/**
- * i40iw_loopback_nop - Send a nop
- * @qp: associated hw qp
- */
-static void i40iw_loopback_nop(struct i40iw_sc_qp *qp)
-{
-       u64 *wqe;
-       u64 header;
-
-       wqe = qp->qp_uk.sq_base->elem;
-       set_64bit_val(wqe, 0, 0);
-       set_64bit_val(wqe, 8, 0);
-       set_64bit_val(wqe, 16, 0);
-
-       header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
-           LS_64(0, I40IWQPSQ_SIGCOMPL) |
-           LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
-       set_64bit_val(wqe, 24, header);
-}
-
 /**
  * i40iw_qp_disconnect - free qp and close cm
  * @iwqp: associate qp for the connection
@@ -3638,7 +3618,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        } else {
                if (iwqp->page)
                        iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
-               i40iw_loopback_nop(&iwqp->sc_qp);
+               dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
        }
 
        if (iwqp->page)
index 6e9081380a276cbb78da8687820b9c7092f684af..0cbbe40382982479e608aca0edbe5a9ab1c267d7 100644 (file)
@@ -1558,6 +1558,10 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
        enum i40iw_status_code status;
        struct i40iw_handler *hdl;
 
+       hdl = i40iw_find_netdev(ldev->netdev);
+       if (hdl)
+               return 0;
+
        hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
        if (!hdl)
                return -ENOMEM;
index 0e8db0a3514153d7659977ff31d4dbaf45a103d5..6fd043b1d71413fbcebe8a209121a9cee50a5586 100644 (file)
@@ -673,8 +673,11 @@ enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
 {
        if (!mem)
                return I40IW_ERR_PARAM;
+       /*
+        * mem->va points to the parent of mem, so both mem and mem->va
+        * can not be touched once mem->va is freed
+        */
        kfree(mem->va);
-       mem->va = NULL;
        return 0;
 }
 
index 2360338877bf68ca4a809d153f83a64326fa467c..6329c971c22fc383330099fa07819b4666ee48b8 100644 (file)
@@ -794,7 +794,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
        return &iwqp->ibqp;
 error:
        i40iw_free_qp_resources(iwdev, iwqp, qp_num);
-       kfree(mem);
        return ERR_PTR(err_code);
 }
 
@@ -1926,8 +1925,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
                }
                if (iwpbl->pbl_allocated)
                        i40iw_free_pble(iwdev->pble_rsrc, palloc);
-               kfree(iwpbl->iwmr);
-               iwpbl->iwmr = NULL;
+               kfree(iwmr);
                return 0;
        }
 
index d6fc8a6e8c3324fcef4d9081167cce9cc998f13b..006db6436e3b22511bcbb4d990a55e56ac8adcce 100644 (file)
@@ -576,8 +576,8 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
                checksum == cpu_to_be16(0xffff);
 }
 
-static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
-                          unsigned tail, struct mlx4_cqe *cqe, int is_eth)
+static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
+                           unsigned tail, struct mlx4_cqe *cqe, int is_eth)
 {
        struct mlx4_ib_proxy_sqp_hdr *hdr;
 
@@ -600,8 +600,6 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
                wc->slid        = be16_to_cpu(hdr->tun.slid_mac_47_32);
                wc->sl          = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
        }
-
-       return 0;
 }
 
 static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
@@ -692,7 +690,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
        if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
                     is_send)) {
                pr_warn("Completion for NOP opcode detected!\n");
-               return -EINVAL;
+               return -EAGAIN;
        }
 
        /* Resize CQ in progress */
@@ -723,7 +721,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
                if (unlikely(!mqp)) {
                        pr_warn("CQ %06x with entry for unknown QPN %06x\n",
                               cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
-                       return -EINVAL;
+                       return -EAGAIN;
                }
 
                *cur_qp = to_mibqp(mqp);
@@ -741,7 +739,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
                if (unlikely(!msrq)) {
                        pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
                                cq->mcq.cqn, srq_num);
-                       return -EINVAL;
+                       return -EAGAIN;
                }
        }
 
@@ -852,9 +850,11 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
                if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
                        if ((*cur_qp)->mlx4_ib_qp_type &
                            (MLX4_IB_QPT_PROXY_SMI_OWNER |
-                            MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
-                               return use_tunnel_data(*cur_qp, cq, wc, tail,
-                                                      cqe, is_eth);
+                            MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
+                               use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
+                                               is_eth);
+                               return 0;
+                       }
                }
 
                wc->slid           = be16_to_cpu(cqe->rlid);
index a84bb766fc62874bc45303268c25b961e45cd4f1..1b4094baa2deec0622f430aa57a44a7fb94724e7 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
-#include <linux/io-mapping.h>
 #if defined(CONFIG_X86)
 #include <asm/pat.h>
 #endif
index 16740dcb876bd4f93e2c4954d61aa51ba8579343..67fc0b6857e185ca4b4a17631741c7cac8a69922 100644 (file)
@@ -1156,18 +1156,18 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_srq =
                (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
                OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
-       attr->max_send_sge = ((rsp->max_write_send_sge &
+       attr->max_send_sge = ((rsp->max_recv_send_sge &
                               OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
                              OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
-       attr->max_recv_sge = (rsp->max_write_send_sge &
-                             OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
-           OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+       attr->max_recv_sge = (rsp->max_recv_send_sge &
+                             OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >>
+           OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT;
        attr->max_srq_sge = (rsp->max_srq_rqe_sge &
                              OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
-       attr->max_rdma_sge = (rsp->max_write_send_sge &
-                             OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
-           OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
+       attr->max_rdma_sge = (rsp->max_wr_rd_sge &
+                             OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >>
+           OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT;
        attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
                                OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
index 0efc9662c6d8758df6b269a26acb2c4f51e2b23e..37df4481bb8fe6d403fde89327216668a258825a 100644 (file)
@@ -554,9 +554,9 @@ enum {
        OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK               = 0x18,
        OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT         = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK          = 0xFFFF,
-       OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT        = 16,
-       OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK         = 0xFFFF <<
-                               OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
+       OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT = 16,
+       OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK          = 0xFFFF <<
+                               OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT,
 
        OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT       = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK        = 0xFFFF,
@@ -612,6 +612,8 @@ enum {
        OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET         = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK           = 0xFFFF <<
                                OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET,
+       OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT           = 0,
+       OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK            = 0xFFFF,
 };
 
 struct ocrdma_mbx_query_config {
@@ -619,7 +621,7 @@ struct ocrdma_mbx_query_config {
        struct ocrdma_mbx_rsp rsp;
        u32 qp_srq_cq_ird_ord;
        u32 max_pd_ca_ack_delay;
-       u32 max_write_send_sge;
+       u32 max_recv_send_sge;
        u32 max_ird_ord_per_qp;
        u32 max_shared_ird_ord;
        u32 max_mr;
@@ -639,6 +641,8 @@ struct ocrdma_mbx_query_config {
        u32 max_wqes_rqes_per_q;
        u32 max_cq_cqes_per_cq;
        u32 max_srq_rqe_sge;
+       u32 max_wr_rd_sge;
+       u32 ird_pgsz_num_pages;
 };
 
 struct ocrdma_fw_ver_rsp {
index b1a3d91fe8b94fe292fbe09a4c3820a8592e9f6e..0aa854737e74e98b871a2b714977c4d327f7cd17 100644 (file)
@@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
                                        IB_DEVICE_SYS_IMAGE_GUID |
                                        IB_DEVICE_LOCAL_DMA_LKEY |
                                        IB_DEVICE_MEM_MGT_EXTENSIONS;
-       attr->max_sge = dev->attr.max_send_sge;
-       attr->max_sge_rd = attr->max_sge;
+       attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge);
+       attr->max_sge_rd = dev->attr.max_rdma_sge;
        attr->max_cq = dev->attr.max_cq;
        attr->max_cqe = dev->attr.max_cqe;
        attr->max_mr = dev->attr.max_mr;
index 5e75b43c596b608adfacfeb48ea955ffa914082d..5bad8e3b40bb34517948e7a18b0497f79d4b7e16 100644 (file)
@@ -189,27 +189,32 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
 DEBUGFS_FILE(ctx_stats)
 
 static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
+       __acquires(RCU)
 {
        struct qib_qp_iter *iter;
        loff_t n = *pos;
 
-       rcu_read_lock();
        iter = qib_qp_iter_init(s->private);
+
+       /* stop calls rcu_read_unlock */
+       rcu_read_lock();
+
        if (!iter)
                return NULL;
 
-       while (n--) {
+       do {
                if (qib_qp_iter_next(iter)) {
                        kfree(iter);
                        return NULL;
                }
-       }
+       } while (n--);
 
        return iter;
 }
 
 static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
                                   loff_t *pos)
+       __must_hold(RCU)
 {
        struct qib_qp_iter *iter = iter_ptr;
 
@@ -224,6 +229,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
 }
 
 static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
+       __releases(RCU)
 {
        rcu_read_unlock();
 }
index fcdf37913a264d52b5204f771523b37a2b0c4985..c3edc033f7c4ca674380930f958c3b6b60c0cb0f 100644 (file)
@@ -328,26 +328,12 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
 
        pos = *ppos;
 
-       if (pos != 0) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (count != sizeof(struct qib_flash)) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       tmp = kmalloc(count, GFP_KERNEL);
-       if (!tmp) {
-               ret = -ENOMEM;
-               goto bail;
-       }
+       if (pos != 0 || count != sizeof(struct qib_flash))
+               return -EINVAL;
 
-       if (copy_from_user(tmp, buf, count)) {
-               ret = -EFAULT;
-               goto bail_tmp;
-       }
+       tmp = memdup_user(buf, count);
+       if (IS_ERR(tmp))
+               return PTR_ERR(tmp);
 
        dd = private2dd(file);
        if (qib_eeprom_write(dd, pos, tmp, count)) {
@@ -361,8 +347,6 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
 
 bail_tmp:
        kfree(tmp);
-
-bail:
        return ret;
 }
 
index 9cc0aae1d78191735b31eaed33bca1ee21de7d01..f9b8cd2354d1ba6f6b077aea249f00b2f3a8e8bb 100644 (file)
@@ -573,10 +573,6 @@ struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
                return NULL;
 
        iter->dev = dev;
-       if (qib_qp_iter_next(iter)) {
-               kfree(iter);
-               return NULL;
-       }
 
        return iter;
 }
index c229b9f4a52da65d8559d832d930e0c73d7109d9..0a89a955550b29ce327751ce5c2274638b1eccb9 100644 (file)
@@ -664,7 +664,8 @@ static int __init usnic_ib_init(void)
                return err;
        }
 
-       if (pci_register_driver(&usnic_ib_pci_driver)) {
+       err = pci_register_driver(&usnic_ib_pci_driver);
+       if (err) {
                usnic_err("Unable to register with PCI\n");
                goto out_umem_fini;
        }
index bdb540f25a888dcc24b24cf96268c6d8f8a3c479..870b4f212fbcfde8e09031897105614532eee244 100644 (file)
@@ -873,7 +873,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
        free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
 
 bail_rq_wq:
-       vfree(qp->r_rq.wq);
+       if (!qp->ip)
+               vfree(qp->r_rq.wq);
 
 bail_driver_priv:
        rdi->driver_f.qp_priv_free(rdi, qp);
index ba6be060a476b19d608c7797bed4526b7df04332..7914c14478cd190e24d30151a5560f01980e12d2 100644 (file)
@@ -448,7 +448,7 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
 
        isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
        if (!isert_conn->login_rsp_buf) {
-               isert_err("Unable to allocate isert_conn->login_rspbuf\n");
+               ret = -ENOMEM;
                goto out_unmap_login_req_buf;
        }
 
index dfa23b075a88469b73c3f199f73c8b8ed571187f..883bbfe08e0efa64d87ab2652b0e773516d54450 100644 (file)
@@ -522,6 +522,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
        if (ret)
                goto err_query_port;
 
+       snprintf(sport->port_guid, sizeof(sport->port_guid),
+               "0x%016llx%016llx",
+               be64_to_cpu(sport->gid.global.subnet_prefix),
+               be64_to_cpu(sport->gid.global.interface_id));
+
        if (!sport->mad_agent) {
                memset(&reg_req, 0, sizeof(reg_req));
                reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
@@ -2548,10 +2553,6 @@ static void srpt_add_one(struct ib_device *device)
                               sdev->device->name, i);
                        goto err_ring;
                }
-               snprintf(sport->port_guid, sizeof(sport->port_guid),
-                       "0x%016llx%016llx",
-                       be64_to_cpu(sport->gid.global.subnet_prefix),
-                       be64_to_cpu(sport->gid.global.interface_id));
        }
 
        spin_lock(&srpt_dev_lock);
index 7d61439be5f27aadb3480cee68a17b21ef17a5db..0c07e1023a4693ac705acea1b26068c65c54b39f 100644 (file)
@@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
        /* Reset the KBC controller to clear all previous status.*/
        reset_control_assert(kbc->rst);
        udelay(100);
-       reset_control_assert(kbc->rst);
+       reset_control_deassert(kbc->rst);
        udelay(100);
 
        tegra_kbc_config_pins(kbc);
index faa295ec4f313a0f629fa3f2fa9ce5267c929b10..c83bce89028b2a2d270ffa3aed232788b8b491b6 100644 (file)
@@ -553,7 +553,6 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr,
                goto free_struct_buff;
 
        reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
-       map_offset = 0;
        for (i = 0; i < rdesc->num_registers; i++) {
                struct rmi_register_desc_item *item = &rdesc->registers[i];
                int reg_size = struct_buf[offset];
@@ -576,6 +575,8 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr,
                item->reg = reg;
                item->reg_size = reg_size;
 
+               map_offset = 0;
+
                do {
                        for (b = 0; b < 7; b++) {
                                if (struct_buf[offset] & (0x1 << b))
index b4d34086e73f5aac0654e607028f13f59ef771be..405252a884dd41e233da4399ab109f213becb85a 100644 (file)
@@ -1305,6 +1305,7 @@ static int __init i8042_create_aux_port(int idx)
        serio->write            = i8042_aux_write;
        serio->start            = i8042_start;
        serio->stop             = i8042_stop;
+       serio->ps2_cmd_mutex    = &i8042_mutex;
        serio->port_data        = port;
        serio->dev.parent       = &i8042_platform_device->dev;
        if (idx < 0) {
index a61b2153ab8c20fb685d4af7595dba125df6023f..1ce3ecbe37f89153e7b863fab92b7d8f05f6ef14 100644 (file)
@@ -1473,7 +1473,6 @@ static int ads7846_remove(struct spi_device *spi)
 
        ads784x_hwmon_unregister(spi, ts);
 
-       regulator_disable(ts->reg);
        regulator_put(ts->reg);
 
        if (!ts->get_pendown_state) {
index 7379fe153cf9946397203aadc2bd9767081589f9..b2744a64e9331e2bd938a9ea60f40074118e62aa 100644 (file)
@@ -464,7 +464,7 @@ static int silead_ts_probe(struct i2c_client *client,
                return -ENODEV;
 
        /* Power GPIO pin */
-       data->gpio_power = gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
+       data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
        if (IS_ERR(data->gpio_power)) {
                if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER)
                        dev_err(dev, "Shutdown GPIO request failed\n");
index ce801170d5f2342f1b2c750750bea720d9fc0ff2..641e887613193c76e7afb48a81c82b1fd732cc2d 100644 (file)
@@ -879,7 +879,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
         * We may have concurrent producers, so we need to be careful
         * not to touch any of the shadow cmdq state.
         */
-       queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
+       queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
        dev_err(smmu->dev, "skipping command in error state:\n");
        for (i = 0; i < ARRAY_SIZE(cmd); ++i)
                dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
@@ -890,7 +890,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
                return;
        }
 
-       queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
+       queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
 }
 
 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
@@ -1034,6 +1034,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
                case STRTAB_STE_0_CFG_S2_TRANS:
                        ste_live = true;
                        break;
+               case STRTAB_STE_0_CFG_ABORT:
+                       if (disable_bypass)
+                               break;
                default:
                        BUG(); /* STE corruption */
                }
index 4f49fe29f2029ad0f1fdc4f7015ed0fa13e7c2bf..2db74ebc324060683aa39fc51a94b49c60b5daab 100644 (file)
@@ -686,8 +686,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = {
 
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
 {
-       int flags, ret;
-       u32 fsr, fsynr, resume;
+       u32 fsr, fsynr;
        unsigned long iova;
        struct iommu_domain *domain = dev;
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -701,34 +700,15 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
        if (!(fsr & FSR_FAULT))
                return IRQ_NONE;
 
-       if (fsr & FSR_IGN)
-               dev_err_ratelimited(smmu->dev,
-                                   "Unexpected context fault (fsr 0x%x)\n",
-                                   fsr);
-
        fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
-       flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
-
        iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
-       if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
-               ret = IRQ_HANDLED;
-               resume = RESUME_RETRY;
-       } else {
-               dev_err_ratelimited(smmu->dev,
-                   "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
-                   iova, fsynr, cfg->cbndx);
-               ret = IRQ_NONE;
-               resume = RESUME_TERMINATE;
-       }
-
-       /* Clear the faulting FSR */
-       writel(fsr, cb_base + ARM_SMMU_CB_FSR);
 
-       /* Retry or terminate any stalled transactions */
-       if (fsr & FSR_SS)
-               writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
+       dev_err_ratelimited(smmu->dev,
+       "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
+                           fsr, iova, fsynr, cfg->cbndx);
 
-       return ret;
+       writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+       return IRQ_HANDLED;
 }
 
 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
@@ -837,7 +817,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
        }
 
        /* SCTLR */
-       reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+       reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
        if (stage1)
                reg |= SCTLR_S1_ASIDPNE;
 #ifdef __BIG_ENDIAN
index 8c6139986d7d38f8be2ff3984978d813db56af24..def8ca1c982d5869a358ca69467a6f13763c46fb 100644 (file)
@@ -286,12 +286,14 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
        int prot = IOMMU_READ;
        arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
 
-       if (attr & ARM_V7S_PTE_AP_RDONLY)
+       if (!(attr & ARM_V7S_PTE_AP_RDONLY))
                prot |= IOMMU_WRITE;
        if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
                prot |= IOMMU_MMIO;
        else if (pte & ARM_V7S_ATTR_C)
                prot |= IOMMU_CACHE;
+       if (pte & ARM_V7S_ATTR_XN(lvl))
+               prot |= IOMMU_NOEXEC;
 
        return prot;
 }
index 7ceaba81efb44964a286536da93165157832674a..36b9c28a5c917bcebed23bd896cc102ea2a754ed 100644 (file)
@@ -1545,7 +1545,12 @@ static int its_force_quiescent(void __iomem *base)
        u32 val;
 
        val = readl_relaxed(base + GITS_CTLR);
-       if (val & GITS_CTLR_QUIESCENT)
+       /*
+        * GIC architecture specification requires the ITS to be both
+        * disabled and quiescent for writes to GITS_BASER<n> or
+        * GITS_CBASER to not have UNPREDICTABLE results.
+        */
+       if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
                return 0;
 
        /* Disable the generation of all interrupts to this ITS */
index 6fc56c3466b0039d6e3a51448b39a3c97ef53751..ede5672ab34d45b544ef9d35c53e0ca1e990835e 100644 (file)
@@ -667,13 +667,20 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 #endif
 
 #ifdef CONFIG_CPU_PM
+/* Check whether it's single security state view */
+static bool gic_dist_security_disabled(void)
+{
+       return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
+}
+
 static int gic_cpu_pm_notifier(struct notifier_block *self,
                               unsigned long cmd, void *v)
 {
        if (cmd == CPU_PM_EXIT) {
-               gic_enable_redist(true);
+               if (gic_dist_security_disabled())
+                       gic_enable_redist(true);
                gic_cpu_sys_reg_init();
-       } else if (cmd == CPU_PM_ENTER) {
+       } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
                gic_write_grpen1(0);
                gic_enable_redist(false);
        }
index c2cab572c5111c392c076d62232da4828cfa00a1..390fac59c6bca62fbb473e1fc7b091114b5371bf 100644 (file)
@@ -769,6 +769,13 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
        int cpu;
        unsigned long flags, map = 0;
 
+       if (unlikely(nr_cpu_ids == 1)) {
+               /* Only one CPU? let's do a self-IPI... */
+               writel_relaxed(2 << 24 | irq,
+                              gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+               return;
+       }
+
        raw_spin_lock_irqsave(&irq_controller_lock, flags);
 
        /* Convert our logical CPU mask into a physical one. */
index c5f33c3bd22851a2950bd7554e5e413110d0b019..83f498393a7f0b5d82264cfd3cd340f0cf6c3c1f 100644 (file)
@@ -713,9 +713,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
        unsigned long flags;
        int i;
 
-       irq_set_chip_and_handler(virq, &gic_level_irq_controller,
-                                handle_level_irq);
-
        spin_lock_irqsave(&gic_lock, flags);
        gic_map_to_pin(intr, gic_cpu_pin);
        gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
@@ -732,6 +729,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
 {
        if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
                return gic_local_irq_domain_map(d, virq, hw);
+
+       irq_set_chip_and_handler(virq, &gic_level_irq_controller,
+                                handle_level_irq);
+
        return gic_shared_irq_domain_map(d, virq, hw, 0);
 }
 
@@ -771,11 +772,13 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
                        hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
 
                        ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
-                                                           &gic_edge_irq_controller,
+                                                           &gic_level_irq_controller,
                                                            NULL);
                        if (ret)
                                goto error;
 
+                       irq_set_handler(virq + i, handle_level_irq);
+
                        ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
                        if (ret)
                                goto error;
@@ -890,10 +893,17 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
        return;
 }
 
+static void gic_dev_domain_activate(struct irq_domain *domain,
+                                   struct irq_data *d)
+{
+       gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
+}
+
 static struct irq_domain_ops gic_dev_domain_ops = {
        .xlate = gic_dev_domain_xlate,
        .alloc = gic_dev_domain_alloc,
        .free = gic_dev_domain_free,
+       .activate = gic_dev_domain_activate,
 };
 
 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
index 95a4ca6ce6fffa34da0551199a5acde29193da46..849ad441cd76ba6b04ba2fa2aaac51471acdfd67 100644 (file)
@@ -760,7 +760,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
        if (!d->nr_stripes ||
            d->nr_stripes > INT_MAX ||
            d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
-               pr_err("nr_stripes too large");
+               pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
+                       (unsigned)d->nr_stripes);
                return -ENOMEM;
        }
 
@@ -1820,7 +1821,7 @@ static int cache_alloc(struct cache *ca)
        free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
 
        if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
-           !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+           !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
            !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
            !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
            !init_fifo(&ca->free_inc,   free << 2, GFP_KERNEL) ||
@@ -1844,7 +1845,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
                                struct block_device *bdev, struct cache *ca)
 {
        char name[BDEVNAME_SIZE];
-       const char *err = NULL;
+       const char *err = NULL; /* must be set for any error case */
        int ret = 0;
 
        memcpy(&ca->sb, sb, sizeof(struct cache_sb));
@@ -1861,8 +1862,13 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
                ca->discard = CACHE_DISCARD(&ca->sb);
 
        ret = cache_alloc(ca);
-       if (ret != 0)
+       if (ret != 0) {
+               if (ret == -ENOMEM)
+                       err = "cache_alloc(): -ENOMEM";
+               else
+                       err = "cache_alloc(): unknown error";
                goto err;
+       }
 
        if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
                err = "error calling kobject_add";
index 97e446d54a151360fa1095311457ac149f426f38..6a2e8dd44a1b83b8fb45a4532458db3fb58db526 100644 (file)
@@ -289,15 +289,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
                pb->bio_submitted = true;
 
                /*
-                * Map reads as normal only if corrupt_bio_byte set.
+                * Error reads if neither corrupt_bio_byte or drop_writes are set.
+                * Otherwise, flakey_end_io() will decide if the reads should be modified.
                 */
                if (bio_data_dir(bio) == READ) {
-                       /* If flags were specified, only corrupt those that match. */
-                       if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
-                           all_corrupt_bio_flags_match(bio, fc))
-                               goto map_bio;
-                       else
+                       if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
                                return -EIO;
+                       goto map_bio;
                }
 
                /*
@@ -334,14 +332,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
        struct flakey_c *fc = ti->private;
        struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
 
-       /*
-        * Corrupt successful READs while in down state.
-        */
        if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
-               if (fc->corrupt_bio_byte)
+               if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
+                   all_corrupt_bio_flags_match(bio, fc)) {
+                       /*
+                        * Corrupt successful matching READs while in down state.
+                        */
                        corrupt_bio_data(bio, fc);
-               else
+
+               } else if (!test_bit(DROP_WRITES, &fc->flags)) {
+                       /*
+                        * Error read during the down_interval if drop_writes
+                        * wasn't configured.
+                        */
                        return -EIO;
+               }
        }
 
        return error;
index 4ca2d1df5b44c0811555d35651d59d62513a9584..07fc1ad42ec57c4c835c43990187410036e5e5bf 100644 (file)
@@ -291,9 +291,10 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis
        core->nr_regions = le64_to_cpu(disk->nr_regions);
 }
 
-static int rw_header(struct log_c *lc, int rw)
+static int rw_header(struct log_c *lc, int op)
 {
-       lc->io_req.bi_op = rw;
+       lc->io_req.bi_op = op;
+       lc->io_req.bi_op_flags = 0;
 
        return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
 }
@@ -316,7 +317,7 @@ static int read_header(struct log_c *log)
 {
        int r;
 
-       r = rw_header(log, READ);
+       r = rw_header(log, REQ_OP_READ);
        if (r)
                return r;
 
@@ -630,7 +631,7 @@ static int disk_resume(struct dm_dirty_log *log)
        header_to_disk(&lc->header, lc->disk_header);
 
        /* write the new header */
-       r = rw_header(lc, WRITE);
+       r = rw_header(lc, REQ_OP_WRITE);
        if (!r) {
                r = flush_header(lc);
                if (r)
@@ -698,7 +699,7 @@ static int disk_flush(struct dm_dirty_log *log)
                        log_clear_bit(lc, lc->clean_bits, i);
        }
 
-       r = rw_header(lc, WRITE);
+       r = rw_header(lc, REQ_OP_WRITE);
        if (r)
                fail_log_device(lc);
        else {
index 48a5dd740f3ba38b854ac8bfbbdfa93de4820f6a..2206d4477dbbdb5190906e277124580b600d71e0 100644 (file)
@@ -1726,6 +1726,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
                        break;
 
                if (req_op(next) == REQ_OP_DISCARD ||
+                   req_op(next) == REQ_OP_SECURE_ERASE ||
                    req_op(next) == REQ_OP_FLUSH)
                        break;
 
@@ -2150,6 +2151,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        struct mmc_card *card = md->queue.card;
        struct mmc_host *host = card->host;
        unsigned long flags;
+       bool req_is_special = mmc_req_is_special(req);
 
        if (req && !mq->mqrq_prev->req)
                /* claim host only for the first request */
@@ -2190,8 +2192,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        }
 
 out:
-       if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
-           mmc_req_is_special(req))
+       if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
                /*
                 * Release host when there are no more requests
                 * and after special request(discard, flush) is done.
index bf14642a576a515a50b51de61daf8656b8cfc443..708057261b38982fffa9f42204b841260ff67432 100644 (file)
@@ -33,7 +33,8 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
        /*
         * We only like normal block requests and discards.
         */
-       if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) {
+       if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
+           req_op(req) != REQ_OP_SECURE_ERASE) {
                blk_dump_rq_flags(req, "MMC bad request");
                return BLKPREP_KILL;
        }
@@ -64,6 +65,8 @@ static int mmc_queue_thread(void *d)
                spin_unlock_irq(q->queue_lock);
 
                if (req || mq->mqrq_prev->req) {
+                       bool req_is_special = mmc_req_is_special(req);
+
                        set_current_state(TASK_RUNNING);
                        mq->issue_fn(mq, req);
                        cond_resched();
@@ -79,7 +82,7 @@ static int mmc_queue_thread(void *d)
                         * has been finished. Do not assign it to previous
                         * request.
                         */
-                       if (mmc_req_is_special(req))
+                       if (req_is_special)
                                mq->mqrq_cur->req = NULL;
 
                        mq->mqrq_prev->brq.mrq.data = NULL;
index d62531124d542c0ff82893ed04226e2a0c04016e..fee5e127146519efdd2b6b3ed35fc5ea1858667a 100644 (file)
@@ -4,7 +4,9 @@
 static inline bool mmc_req_is_special(struct request *req)
 {
        return req &&
-               (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD);
+               (req_op(req) == REQ_OP_FLUSH ||
+                req_op(req) == REQ_OP_DISCARD ||
+                req_op(req) == REQ_OP_SECURE_ERASE);
 }
 
 struct request;
index 7ff2e820bbf473e8f9d31a19c17cfd6ec1ccb833..2feacc70bf61f10892ee4b7bddd97bb93e4fcd7d 100644 (file)
@@ -81,10 +81,12 @@ EXPORT_SYMBOL_GPL(nvme_cancel_request);
 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                enum nvme_ctrl_state new_state)
 {
-       enum nvme_ctrl_state old_state = ctrl->state;
+       enum nvme_ctrl_state old_state;
        bool changed = false;
 
        spin_lock_irq(&ctrl->lock);
+
+       old_state = ctrl->state;
        switch (new_state) {
        case NVME_CTRL_LIVE:
                switch (old_state) {
@@ -140,11 +142,12 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
        default:
                break;
        }
-       spin_unlock_irq(&ctrl->lock);
 
        if (changed)
                ctrl->state = new_state;
 
+       spin_unlock_irq(&ctrl->lock);
+
        return changed;
 }
 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
@@ -608,7 +611,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
 
        ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
                        NVME_QID_ANY, 0, 0);
-       if (ret >= 0)
+       if (ret >= 0 && result)
                *result = le32_to_cpu(cqe.result);
        return ret;
 }
@@ -628,7 +631,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
 
        ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
                        NVME_QID_ANY, 0, 0);
-       if (ret >= 0)
+       if (ret >= 0 && result)
                *result = le32_to_cpu(cqe.result);
        return ret;
 }
index eafa6138a6b81866a3bc09739ab3398513746253..98f12223c734f15835edf438a94d7eccf15c991d 100644 (file)
@@ -1069,7 +1069,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
                nvec = maxvec;
 
        for (;;) {
-               if (!(flags & PCI_IRQ_NOAFFINITY)) {
+               if (flags & PCI_IRQ_AFFINITY) {
                        dev->irq_affinity = irq_create_affinity_mask(&nvec);
                        if (nvec < minvec)
                                return -ENOSPC;
@@ -1105,7 +1105,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
  **/
 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
 {
-       return __pci_enable_msi_range(dev, minvec, maxvec, PCI_IRQ_NOAFFINITY);
+       return __pci_enable_msi_range(dev, minvec, maxvec, 0);
 }
 EXPORT_SYMBOL(pci_enable_msi_range);
 
@@ -1120,7 +1120,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
                return -ERANGE;
 
        for (;;) {
-               if (!(flags & PCI_IRQ_NOAFFINITY)) {
+               if (flags & PCI_IRQ_AFFINITY) {
                        dev->irq_affinity = irq_create_affinity_mask(&nvec);
                        if (nvec < minvec)
                                return -ENOSPC;
@@ -1160,8 +1160,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
                int minvec, int maxvec)
 {
-       return __pci_enable_msix_range(dev, entries, minvec, maxvec,
-                       PCI_IRQ_NOAFFINITY);
+       return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0);
 }
 EXPORT_SYMBOL(pci_enable_msix_range);
 
@@ -1187,22 +1186,25 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
 {
        int vecs = -ENOSPC;
 
-       if (!(flags & PCI_IRQ_NOMSIX)) {
+       if (flags & PCI_IRQ_MSIX) {
                vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
                                flags);
                if (vecs > 0)
                        return vecs;
        }
 
-       if (!(flags & PCI_IRQ_NOMSI)) {
+       if (flags & PCI_IRQ_MSI) {
                vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
                if (vecs > 0)
                        return vecs;
        }
 
        /* use legacy irq if allowed */
-       if (!(flags & PCI_IRQ_NOLEGACY) && min_vecs == 1)
+       if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) {
+               pci_intx(dev, 1);
                return 1;
+       }
+
        return vecs;
 }
 EXPORT_SYMBOL(pci_alloc_irq_vectors);
index 3788ed74c9abe59eaed8e8ce4621cac7ecaffe35..a32b41783b7778b7d32cd8fc44026fb8476ace0d 100644 (file)
@@ -740,12 +740,22 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
 }
 
 /* Bind cpufreq callbacks to thermal cooling device ops */
+
 static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
        .get_max_state = cpufreq_get_max_state,
        .get_cur_state = cpufreq_get_cur_state,
        .set_cur_state = cpufreq_set_cur_state,
 };
 
+static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
+       .get_max_state          = cpufreq_get_max_state,
+       .get_cur_state          = cpufreq_get_cur_state,
+       .set_cur_state          = cpufreq_set_cur_state,
+       .get_requested_power    = cpufreq_get_requested_power,
+       .state2power            = cpufreq_state2power,
+       .power2state            = cpufreq_power2state,
+};
+
 /* Notifier for cpufreq policy change */
 static struct notifier_block thermal_cpufreq_notifier_block = {
        .notifier_call = cpufreq_thermal_notifier,
@@ -795,6 +805,7 @@ __cpufreq_cooling_register(struct device_node *np,
        struct cpumask temp_mask;
        unsigned int freq, i, num_cpus;
        int ret;
+       struct thermal_cooling_device_ops *cooling_ops;
 
        cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
        policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
@@ -850,10 +861,6 @@ __cpufreq_cooling_register(struct device_node *np,
        cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
 
        if (capacitance) {
-               cpufreq_cooling_ops.get_requested_power =
-                       cpufreq_get_requested_power;
-               cpufreq_cooling_ops.state2power = cpufreq_state2power;
-               cpufreq_cooling_ops.power2state = cpufreq_power2state;
                cpufreq_dev->plat_get_static_power = plat_static_func;
 
                ret = build_dyn_power_table(cpufreq_dev, capacitance);
@@ -861,6 +868,10 @@ __cpufreq_cooling_register(struct device_node *np,
                        cool_dev = ERR_PTR(ret);
                        goto free_table;
                }
+
+               cooling_ops = &cpufreq_power_cooling_ops;
+       } else {
+               cooling_ops = &cpufreq_cooling_ops;
        }
 
        ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
@@ -885,7 +896,7 @@ __cpufreq_cooling_register(struct device_node *np,
                 cpufreq_dev->id);
 
        cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
-                                                     &cpufreq_cooling_ops);
+                                                     cooling_ops);
        if (IS_ERR(cool_dev))
                goto remove_idr;
 
index c5547bd711dbe2b4d681e54213e25e992afb51af..e473548b5d289d7dda47bdd8c423209fc1ab245f 100644 (file)
@@ -471,8 +471,6 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
 
 static int imx_thermal_probe(struct platform_device *pdev)
 {
-       const struct of_device_id *of_id =
-               of_match_device(of_imx_thermal_match, &pdev->dev);
        struct imx_thermal_data *data;
        struct regmap *map;
        int measure_freq;
@@ -490,7 +488,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
        }
        data->tempmon = map;
 
-       data->socdata = of_id->data;
+       data->socdata = of_device_get_match_data(&pdev->dev);
 
        /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */
        if (data->socdata->version == TEMPMON_IMX6SX) {
index a578cd257db4b57ef64751a2d85a5078355d5d1b..1891f34ab7fcd746405fb6f7b7696c36c54133f2 100644 (file)
@@ -225,7 +225,6 @@ static struct platform_driver int3406_thermal_driver = {
        .remove = int3406_thermal_remove,
        .driver = {
                   .name = "int3406 thermal",
-                  .owner = THIS_MODULE,
                   .acpi_match_table = int3406_thermal_match,
                   },
 };
index 9d6320e8ff3e07f55f9229ea9138624a09b6b488..6e29d053843d0b7f29a1aed672ac88ef8150b9d7 100644 (file)
@@ -88,7 +88,7 @@ struct vhost_scsi_cmd {
        struct scatterlist *tvc_prot_sgl;
        struct page **tvc_upages;
        /* Pointer to response header iovec */
-       struct iovec *tvc_resp_iov;
+       struct iovec tvc_resp_iov;
        /* Pointer to vhost_scsi for our device */
        struct vhost_scsi *tvc_vhost;
        /* Pointer to vhost_virtqueue for the cmd */
@@ -547,7 +547,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                memcpy(v_rsp.sense, cmd->tvc_sense_buf,
                       se_cmd->scsi_sense_length);
 
-               iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
+               iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
                              cmd->tvc_in_iovs, sizeof(v_rsp));
                ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
                if (likely(ret == sizeof(v_rsp))) {
@@ -1044,7 +1044,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                }
                cmd->tvc_vhost = vs;
                cmd->tvc_vq = vq;
-               cmd->tvc_resp_iov = &vq->iov[out];
+               cmd->tvc_resp_iov = vq->iov[out];
                cmd->tvc_in_iovs = in;
 
                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
index 7487971f9f788b12a637216c29cc853bb933c1ca..c1010f018bd857985b5bf0ada1f5286f0f68b2bf 100644 (file)
@@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type,
                        rc = -ENOMEM;
                        goto out;
                }
-       } else {
+       } else if (msg_type == XS_TRANSACTION_END) {
                list_for_each_entry(trans, &u->transactions, list)
                        if (trans->handle.id == u->u.msg.tx_id)
                                break;
index c3cdde87cc8c678542bab9c39f12477058986156..08ae99343d92f91586b49fbf56367906855415ff 100644 (file)
@@ -249,7 +249,8 @@ struct super_block *freeze_bdev(struct block_device *bdev)
                 * thaw_bdev drops it.
                 */
                sb = get_super(bdev);
-               drop_super(sb);
+               if (sb)
+                       drop_super(sb);
                mutex_unlock(&bdev->bd_fsfreeze_mutex);
                return sb;
        }
@@ -646,7 +647,7 @@ static struct dentry *bd_mount(struct file_system_type *fs_type,
 {
        struct dentry *dent;
        dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC);
-       if (dent)
+       if (!IS_ERR(dent))
                dent->d_sb->s_iflags |= SB_I_CGROUPWB;
        return dent;
 }
index 2b88439c2ee864ffcec7a1e43f6e620a729bdbff..455a6b2fd53957709ecfdf9aeff5386a4e60e2d0 100644 (file)
@@ -589,6 +589,7 @@ static void __merge_refs(struct list_head *head, int mode)
 
                        list_del(&ref2->list);
                        kmem_cache_free(btrfs_prelim_ref_cache, ref2);
+                       cond_resched();
                }
 
        }
index 2fe8f89091a3097aa8e55bcee4a20c7b43ab1ca2..eff3993c77b333f875ade96769e6f6dad5b47143 100644 (file)
@@ -1028,6 +1028,7 @@ struct btrfs_fs_info {
        struct btrfs_workqueue *qgroup_rescan_workers;
        struct completion qgroup_rescan_completion;
        struct btrfs_work qgroup_rescan_work;
+       bool qgroup_rescan_running;     /* protected by qgroup_rescan_lock */
 
        /* filesystem state */
        unsigned long fs_state;
@@ -1079,6 +1080,8 @@ struct btrfs_fs_info {
        struct list_head pinned_chunks;
 
        int creating_free_space_tree;
+       /* Used to record internally whether fs has been frozen */
+       int fs_frozen;
 };
 
 struct btrfs_subvolume_writers {
@@ -2578,7 +2581,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root,
                                   u64 root_objectid, u64 owner, u64 offset,
                                   struct btrfs_key *ins);
-int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes,
+int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
                         u64 min_alloc_size, u64 empty_size, u64 hint_byte,
                         struct btrfs_key *ins, int is_data, int delalloc);
 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
index d9ddcfc18c91f8acd0ac977171116e3659aa2242..ac02e041464bc87876cfd206a79f7e07de9846b7 100644 (file)
@@ -541,7 +541,6 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
        struct btrfs_delayed_ref_head *existing;
        struct btrfs_delayed_ref_head *head_ref = NULL;
        struct btrfs_delayed_ref_root *delayed_refs;
-       struct btrfs_qgroup_extent_record *qexisting;
        int count_mod = 1;
        int must_insert_reserved = 0;
 
@@ -606,10 +605,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
                qrecord->num_bytes = num_bytes;
                qrecord->old_roots = NULL;
 
-               qexisting = btrfs_qgroup_insert_dirty_extent(fs_info,
-                                                            delayed_refs,
-                                                            qrecord);
-               if (qexisting)
+               if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info,
+                                       delayed_refs, qrecord))
                        kfree(qrecord);
        }
 
index 59febfb8d04a3094fc8dfa6644a18d5a9aac6d42..54bc8c7c6bcd387ef48ffc9b69e9771de6e2ca2c 100644 (file)
@@ -559,8 +559,29 @@ static noinline int check_leaf(struct btrfs_root *root,
        u32 nritems = btrfs_header_nritems(leaf);
        int slot;
 
-       if (nritems == 0)
+       if (nritems == 0) {
+               struct btrfs_root *check_root;
+
+               key.objectid = btrfs_header_owner(leaf);
+               key.type = BTRFS_ROOT_ITEM_KEY;
+               key.offset = (u64)-1;
+
+               check_root = btrfs_get_fs_root(root->fs_info, &key, false);
+               /*
+                * The only reason we also check NULL here is that during
+                * open_ctree() some roots has not yet been set up.
+                */
+               if (!IS_ERR_OR_NULL(check_root)) {
+                       /* if leaf is the root, then it's fine */
+                       if (leaf->start !=
+                           btrfs_root_bytenr(&check_root->root_item)) {
+                               CORRUPT("non-root leaf's nritems is 0",
+                                       leaf, root, 0);
+                               return -EIO;
+                       }
+               }
                return 0;
+       }
 
        /* Check the 0 item */
        if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
@@ -612,6 +633,19 @@ static noinline int check_leaf(struct btrfs_root *root,
        return 0;
 }
 
+static int check_node(struct btrfs_root *root, struct extent_buffer *node)
+{
+       unsigned long nr = btrfs_header_nritems(node);
+
+       if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
+               btrfs_crit(root->fs_info,
+                          "corrupt node: block %llu root %llu nritems %lu",
+                          node->start, root->objectid, nr);
+               return -EIO;
+       }
+       return 0;
+}
+
 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
                                      u64 phy_offset, struct page *page,
                                      u64 start, u64 end, int mirror)
@@ -682,6 +716,9 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
                ret = -EIO;
        }
 
+       if (found_level > 0 && check_node(root, eb))
+               ret = -EIO;
+
        if (!ret)
                set_extent_buffer_uptodate(eb);
 err:
@@ -1618,8 +1655,8 @@ int btrfs_init_fs_root(struct btrfs_root *root)
        return ret;
 }
 
-static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
-                                              u64 root_id)
+struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+                                       u64 root_id)
 {
        struct btrfs_root *root;
 
@@ -2298,6 +2335,7 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
        fs_info->quota_enabled = 0;
        fs_info->pending_quota_state = 0;
        fs_info->qgroup_ulist = NULL;
+       fs_info->qgroup_rescan_running = false;
        mutex_init(&fs_info->qgroup_rescan_lock);
 }
 
@@ -2624,6 +2662,7 @@ int open_ctree(struct super_block *sb,
        atomic_set(&fs_info->qgroup_op_seq, 0);
        atomic_set(&fs_info->reada_works_cnt, 0);
        atomic64_set(&fs_info->tree_mod_seq, 0);
+       fs_info->fs_frozen = 0;
        fs_info->sb = sb;
        fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
        fs_info->metadata_ratio = 0;
@@ -3739,8 +3778,15 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
        if (btrfs_root_refs(&root->root_item) == 0)
                synchronize_srcu(&fs_info->subvol_srcu);
 
-       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
+       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
                btrfs_free_log(NULL, root);
+               if (root->reloc_root) {
+                       free_extent_buffer(root->reloc_root->node);
+                       free_extent_buffer(root->reloc_root->commit_root);
+                       btrfs_put_fs_root(root->reloc_root);
+                       root->reloc_root = NULL;
+               }
+       }
 
        if (root->free_ino_pinned)
                __btrfs_remove_free_space_cache(root->free_ino_pinned);
@@ -3851,7 +3897,7 @@ void close_ctree(struct btrfs_root *root)
        smp_mb();
 
        /* wait for the qgroup rescan worker to stop */
-       btrfs_qgroup_wait_for_completion(fs_info);
+       btrfs_qgroup_wait_for_completion(fs_info, false);
 
        /* wait for the uuid_scan task to finish */
        down(&fs_info->uuid_tree_rescan_sem);
index b3207a0e09f7966703e1d130e250f05f610b2a3f..f19a982f5a4f122ca7d42aa6278a6614e061a2b1 100644 (file)
@@ -68,6 +68,8 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
                                      struct btrfs_key *location);
 int btrfs_init_fs_root(struct btrfs_root *root);
+struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+                                       u64 root_id);
 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
                         struct btrfs_root *root);
 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
index 61b494e8e604e9e80f1c46c581ede724f56ea050..0450dc4105339bc0cf14cb1e861ee43d83e27ba6 100644 (file)
@@ -60,21 +60,6 @@ enum {
        CHUNK_ALLOC_FORCE = 2,
 };
 
-/*
- * Control how reservations are dealt with.
- *
- * RESERVE_FREE - freeing a reservation.
- * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
- *   ENOSPC accounting
- * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
- *   bytes_may_use as the ENOSPC accounting is done elsewhere
- */
-enum {
-       RESERVE_FREE = 0,
-       RESERVE_ALLOC = 1,
-       RESERVE_ALLOC_NO_ACCOUNT = 2,
-};
-
 static int update_block_group(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root, u64 bytenr,
                              u64 num_bytes, int alloc);
@@ -104,9 +89,10 @@ static int find_next_key(struct btrfs_path *path, int level,
                         struct btrfs_key *key);
 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
                            int dump_block_groups);
-static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
-                                      u64 num_bytes, int reserve,
-                                      int delalloc);
+static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
+                                   u64 ram_bytes, u64 num_bytes, int delalloc);
+static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
+                                    u64 num_bytes, int delalloc);
 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
                               u64 num_bytes);
 int btrfs_pin_extent(struct btrfs_root *root,
@@ -3501,7 +3487,6 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
                dcs = BTRFS_DC_SETUP;
        else if (ret == -ENOSPC)
                set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
-       btrfs_free_reserved_data_space(inode, 0, num_pages);
 
 out_put:
        iput(inode);
@@ -4472,6 +4457,15 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
        }
 }
 
+/*
+ * If force is CHUNK_ALLOC_FORCE:
+ *    - return 1 if it successfully allocates a chunk,
+ *    - return errors including -ENOSPC otherwise.
+ * If force is NOT CHUNK_ALLOC_FORCE:
+ *    - return 0 if it doesn't need to allocate a new chunk,
+ *    - return 1 if it successfully allocates a chunk,
+ *    - return errors including -ENOSPC otherwise.
+ */
 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
                          struct btrfs_root *extent_root, u64 flags, int force)
 {
@@ -4882,7 +4876,7 @@ static int flush_space(struct btrfs_root *root,
                                     btrfs_get_alloc_profile(root, 0),
                                     CHUNK_ALLOC_NO_FORCE);
                btrfs_end_transaction(trans, root);
-               if (ret == -ENOSPC)
+               if (ret > 0 || ret == -ENOSPC)
                        ret = 0;
                break;
        case COMMIT_TRANS:
@@ -6497,19 +6491,15 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
 }
 
 /**
- * btrfs_update_reserved_bytes - update the block_group and space info counters
+ * btrfs_add_reserved_bytes - update the block_group and space info counters
  * @cache:     The cache we are manipulating
+ * @ram_bytes:  The number of bytes of file content, and will be same to
+ *              @num_bytes except for the compress path.
  * @num_bytes: The number of bytes in question
- * @reserve:   One of the reservation enums
  * @delalloc:   The blocks are allocated for the delalloc write
  *
- * This is called by the allocator when it reserves space, or by somebody who is
- * freeing space that was never actually used on disk.  For example if you
- * reserve some space for a new leaf in transaction A and before transaction A
- * commits you free that leaf, you call this with reserve set to 0 in order to
- * clear the reservation.
- *
- * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
+ * This is called by the allocator when it reserves space. Metadata
+ * reservations should be called with RESERVE_ALLOC so we do the proper
  * ENOSPC accounting.  For data we handle the reservation through clearing the
  * delalloc bits in the io_tree.  We have to do this since we could end up
  * allocating less disk space for the amount of data we have reserved in the
@@ -6519,44 +6509,63 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
  * make the reservation and return -EAGAIN, otherwise this function always
  * succeeds.
  */
-static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
-                                      u64 num_bytes, int reserve, int delalloc)
+static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
+                                   u64 ram_bytes, u64 num_bytes, int delalloc)
 {
        struct btrfs_space_info *space_info = cache->space_info;
        int ret = 0;
 
        spin_lock(&space_info->lock);
        spin_lock(&cache->lock);
-       if (reserve != RESERVE_FREE) {
-               if (cache->ro) {
-                       ret = -EAGAIN;
-               } else {
-                       cache->reserved += num_bytes;
-                       space_info->bytes_reserved += num_bytes;
-                       if (reserve == RESERVE_ALLOC) {
-                               trace_btrfs_space_reservation(cache->fs_info,
-                                               "space_info", space_info->flags,
-                                               num_bytes, 0);
-                               space_info->bytes_may_use -= num_bytes;
-                       }
-
-                       if (delalloc)
-                               cache->delalloc_bytes += num_bytes;
-               }
+       if (cache->ro) {
+               ret = -EAGAIN;
        } else {
-               if (cache->ro)
-                       space_info->bytes_readonly += num_bytes;
-               cache->reserved -= num_bytes;
-               space_info->bytes_reserved -= num_bytes;
+               cache->reserved += num_bytes;
+               space_info->bytes_reserved += num_bytes;
 
+               trace_btrfs_space_reservation(cache->fs_info,
+                               "space_info", space_info->flags,
+                               ram_bytes, 0);
+               space_info->bytes_may_use -= ram_bytes;
                if (delalloc)
-                       cache->delalloc_bytes -= num_bytes;
+                       cache->delalloc_bytes += num_bytes;
        }
        spin_unlock(&cache->lock);
        spin_unlock(&space_info->lock);
        return ret;
 }
 
+/**
+ * btrfs_free_reserved_bytes - update the block_group and space info counters
+ * @cache:      The cache we are manipulating
+ * @num_bytes:  The number of bytes in question
+ * @delalloc:   The blocks are allocated for the delalloc write
+ *
+ * This is called by somebody who is freeing space that was never actually used
+ * on disk.  For example if you reserve some space for a new leaf in transaction
+ * A and before transaction A commits you free that leaf, you call this with
+ * reserve set to 0 in order to clear the reservation.
+ */
+
+static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
+                                    u64 num_bytes, int delalloc)
+{
+       struct btrfs_space_info *space_info = cache->space_info;
+       int ret = 0;
+
+       spin_lock(&space_info->lock);
+       spin_lock(&cache->lock);
+       if (cache->ro)
+               space_info->bytes_readonly += num_bytes;
+       cache->reserved -= num_bytes;
+       space_info->bytes_reserved -= num_bytes;
+
+       if (delalloc)
+               cache->delalloc_bytes -= num_bytes;
+       spin_unlock(&cache->lock);
+       spin_unlock(&space_info->lock);
+       return ret;
+}
 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root)
 {
@@ -7191,7 +7200,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
 
                btrfs_add_free_space(cache, buf->start, buf->len);
-               btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
+               btrfs_free_reserved_bytes(cache, buf->len, 0);
                btrfs_put_block_group(cache);
                trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
                pin = 0;
@@ -7416,9 +7425,9 @@ btrfs_release_block_group(struct btrfs_block_group_cache *cache,
  * the free space extent currently.
  */
 static noinline int find_free_extent(struct btrfs_root *orig_root,
-                                    u64 num_bytes, u64 empty_size,
-                                    u64 hint_byte, struct btrfs_key *ins,
-                                    u64 flags, int delalloc)
+                               u64 ram_bytes, u64 num_bytes, u64 empty_size,
+                               u64 hint_byte, struct btrfs_key *ins,
+                               u64 flags, int delalloc)
 {
        int ret = 0;
        struct btrfs_root *root = orig_root->fs_info->extent_root;
@@ -7430,8 +7439,6 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
        struct btrfs_space_info *space_info;
        int loop = 0;
        int index = __get_raid_index(flags);
-       int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
-               RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
        bool failed_cluster_refill = false;
        bool failed_alloc = false;
        bool use_cluster = true;
@@ -7763,8 +7770,8 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
                                             search_start - offset);
                BUG_ON(offset > search_start);
 
-               ret = btrfs_update_reserved_bytes(block_group, num_bytes,
-                                                 alloc_type, delalloc);
+               ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
+                               num_bytes, delalloc);
                if (ret == -EAGAIN) {
                        btrfs_add_free_space(block_group, offset, num_bytes);
                        goto loop;
@@ -7936,7 +7943,7 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
        up_read(&info->groups_sem);
 }
 
-int btrfs_reserve_extent(struct btrfs_root *root,
+int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
                         u64 num_bytes, u64 min_alloc_size,
                         u64 empty_size, u64 hint_byte,
                         struct btrfs_key *ins, int is_data, int delalloc)
@@ -7948,8 +7955,8 @@ int btrfs_reserve_extent(struct btrfs_root *root,
        flags = btrfs_get_alloc_profile(root, is_data);
 again:
        WARN_ON(num_bytes < root->sectorsize);
-       ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
-                              flags, delalloc);
+       ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
+                              hint_byte, ins, flags, delalloc);
        if (!ret && !is_data) {
                btrfs_dec_block_group_reservations(root->fs_info,
                                                   ins->objectid);
@@ -7958,6 +7965,7 @@ int btrfs_reserve_extent(struct btrfs_root *root,
                        num_bytes = min(num_bytes >> 1, ins->offset);
                        num_bytes = round_down(num_bytes, root->sectorsize);
                        num_bytes = max(num_bytes, min_alloc_size);
+                       ram_bytes = num_bytes;
                        if (num_bytes == min_alloc_size)
                                final_tried = true;
                        goto again;
@@ -7995,7 +8003,7 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
                if (btrfs_test_opt(root->fs_info, DISCARD))
                        ret = btrfs_discard_extent(root, start, len, NULL);
                btrfs_add_free_space(cache, start, len);
-               btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
+               btrfs_free_reserved_bytes(cache, len, delalloc);
                trace_btrfs_reserved_extent_free(root, start, len);
        }
 
@@ -8223,8 +8231,8 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
        if (!block_group)
                return -EINVAL;
 
-       ret = btrfs_update_reserved_bytes(block_group, ins->offset,
-                                         RESERVE_ALLOC_NO_ACCOUNT, 0);
+       ret = btrfs_add_reserved_bytes(block_group, ins->offset,
+                                      ins->offset, 0);
        BUG_ON(ret); /* logic error */
        ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
                                         0, owner, offset, ins, 1);
@@ -8368,7 +8376,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
        if (IS_ERR(block_rsv))
                return ERR_CAST(block_rsv);
 
-       ret = btrfs_reserve_extent(root, blocksize, blocksize,
+       ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
                                   empty_size, hint, &ins, 0, 0);
        if (ret)
                goto out_unuse;
@@ -8521,35 +8529,6 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
        wc->reada_slot = slot;
 }
 
-/*
- * These may not be seen by the usual inc/dec ref code so we have to
- * add them here.
- */
-static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
-                                    struct btrfs_root *root, u64 bytenr,
-                                    u64 num_bytes)
-{
-       struct btrfs_qgroup_extent_record *qrecord;
-       struct btrfs_delayed_ref_root *delayed_refs;
-
-       qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
-       if (!qrecord)
-               return -ENOMEM;
-
-       qrecord->bytenr = bytenr;
-       qrecord->num_bytes = num_bytes;
-       qrecord->old_roots = NULL;
-
-       delayed_refs = &trans->transaction->delayed_refs;
-       spin_lock(&delayed_refs->lock);
-       if (btrfs_qgroup_insert_dirty_extent(trans->fs_info,
-                                            delayed_refs, qrecord))
-               kfree(qrecord);
-       spin_unlock(&delayed_refs->lock);
-
-       return 0;
-}
-
 static int account_leaf_items(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root,
                              struct extent_buffer *eb)
@@ -8583,7 +8562,8 @@ static int account_leaf_items(struct btrfs_trans_handle *trans,
 
                num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
 
-               ret = record_one_subtree_extent(trans, root, bytenr, num_bytes);
+               ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
+                               bytenr, num_bytes, GFP_NOFS);
                if (ret)
                        return ret;
        }
@@ -8732,8 +8712,9 @@ static int account_shared_subtree(struct btrfs_trans_handle *trans,
                        btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
                        path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
 
-                       ret = record_one_subtree_extent(trans, root, child_bytenr,
-                                                       root->nodesize);
+                       ret = btrfs_qgroup_insert_dirty_extent(trans,
+                                       root->fs_info, child_bytenr,
+                                       root->nodesize, GFP_NOFS);
                        if (ret)
                                goto out;
                }
@@ -9906,6 +9887,7 @@ static int find_first_block_group(struct btrfs_root *root,
                        } else {
                                ret = 0;
                        }
+                       free_extent_map(em);
                        goto out;
                }
                path->slots[0]++;
@@ -9942,6 +9924,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
                block_group->iref = 0;
                block_group->inode = NULL;
                spin_unlock(&block_group->lock);
+               ASSERT(block_group->io_ctl.inode == NULL);
                iput(inode);
                last = block_group->key.objectid + block_group->key.offset;
                btrfs_put_block_group(block_group);
@@ -9999,6 +9982,10 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
                        free_excluded_extents(info->extent_root, block_group);
 
                btrfs_remove_free_space_cache(block_group);
+               ASSERT(list_empty(&block_group->dirty_list));
+               ASSERT(list_empty(&block_group->io_list));
+               ASSERT(list_empty(&block_group->bg_list));
+               ASSERT(atomic_read(&block_group->count) == 1);
                btrfs_put_block_group(block_group);
 
                spin_lock(&info->block_group_cache_lock);
index bc2729a7612db5e472fda6b359a7862bcead0cd4..28cd88fccc7eaa0e6776229cb00e6e336799263d 100644 (file)
@@ -20,6 +20,7 @@
 #define EXTENT_DAMAGED         (1U << 14)
 #define EXTENT_NORESERVE       (1U << 15)
 #define EXTENT_QGROUP_RESERVED (1U << 16)
+#define EXTENT_CLEAR_DATA_RESV (1U << 17)
 #define EXTENT_IOBITS          (EXTENT_LOCKED | EXTENT_WRITEBACK)
 #define EXTENT_CTLBITS         (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
 
index 5842423f8f47b6a7146240c6b47ea1ead637f984..fea31a4a6e36844d97a53c80a7f442c166e82b60 100644 (file)
@@ -2070,7 +2070,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        }
        trans->sync = true;
 
-       btrfs_init_log_ctx(&ctx);
+       btrfs_init_log_ctx(&ctx, inode);
 
        ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
        if (ret < 0) {
@@ -2675,6 +2675,7 @@ static long btrfs_fallocate(struct file *file, int mode,
 
        alloc_start = round_down(offset, blocksize);
        alloc_end = round_up(offset + len, blocksize);
+       cur_offset = alloc_start;
 
        /* Make sure we aren't being give some crap mode */
        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
@@ -2767,7 +2768,6 @@ static long btrfs_fallocate(struct file *file, int mode,
 
        /* First, check if we exceed the qgroup limit */
        INIT_LIST_HEAD(&reserve_list);
-       cur_offset = alloc_start;
        while (1) {
                em = btrfs_get_extent(inode, NULL, 0, cur_offset,
                                      alloc_end - cur_offset, 0);
@@ -2794,6 +2794,14 @@ static long btrfs_fallocate(struct file *file, int mode,
                                        last_byte - cur_offset);
                        if (ret < 0)
                                break;
+               } else {
+                       /*
+                        * Do not need to reserve unwritten extent for this
+                        * range, free reserved data space first, otherwise
+                        * it'll result in false ENOSPC error.
+                        */
+                       btrfs_free_reserved_data_space(inode, cur_offset,
+                               last_byte - cur_offset);
                }
                free_extent_map(em);
                cur_offset = last_byte;
@@ -2811,6 +2819,9 @@ static long btrfs_fallocate(struct file *file, int mode,
                                        range->start,
                                        range->len, 1 << inode->i_blkbits,
                                        offset + len, &alloc_hint);
+               else
+                       btrfs_free_reserved_data_space(inode, range->start,
+                                                      range->len);
                list_del(&range->list);
                kfree(range);
        }
@@ -2845,18 +2856,11 @@ static long btrfs_fallocate(struct file *file, int mode,
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
                             &cached_state, GFP_KERNEL);
 out:
-       /*
-        * As we waited the extent range, the data_rsv_map must be empty
-        * in the range, as written data range will be released from it.
-        * And for prealloacted extent, it will also be released when
-        * its metadata is written.
-        * So this is completely used as cleanup.
-        */
-       btrfs_qgroup_free_data(inode, alloc_start, alloc_end - alloc_start);
        inode_unlock(inode);
        /* Let go of our reservation. */
-       btrfs_free_reserved_data_space(inode, alloc_start,
-                                      alloc_end - alloc_start);
+       if (ret != 0)
+               btrfs_free_reserved_data_space(inode, alloc_start,
+                                      alloc_end - cur_offset);
        return ret;
 }
 
index aa6fabaee72ed488844fc042e98536a77e9148cf..359ee861b5a4b90e567ca63477fd579055477e3f 100644 (file)
@@ -495,10 +495,9 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
        ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
                                              prealloc, prealloc, &alloc_hint);
        if (ret) {
-               btrfs_delalloc_release_space(inode, 0, prealloc);
+               btrfs_delalloc_release_metadata(inode, prealloc);
                goto out_put;
        }
-       btrfs_free_reserved_data_space(inode, 0, prealloc);
 
        ret = btrfs_write_out_ino_cache(root, trans, path, inode);
 out_put:
index 08dfc57e22705363f1159def79316263a9f4293a..e6811c42e41ef34d27119ee5420d4c6653df8519 100644 (file)
@@ -566,6 +566,8 @@ static noinline void compress_file_range(struct inode *inode,
                                                     PAGE_SET_WRITEBACK |
                                                     page_error_op |
                                                     PAGE_END_WRITEBACK);
+                       btrfs_free_reserved_data_space_noquota(inode, start,
+                                               end - start + 1);
                        goto free_pages_out;
                }
        }
@@ -742,7 +744,7 @@ static noinline void submit_compressed_extents(struct inode *inode,
                lock_extent(io_tree, async_extent->start,
                            async_extent->start + async_extent->ram_size - 1);
 
-               ret = btrfs_reserve_extent(root,
+               ret = btrfs_reserve_extent(root, async_extent->ram_size,
                                           async_extent->compressed_size,
                                           async_extent->compressed_size,
                                           0, alloc_hint, &ins, 1, 1);
@@ -969,7 +971,8 @@ static noinline int cow_file_range(struct inode *inode,
                                     EXTENT_DEFRAG, PAGE_UNLOCK |
                                     PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
                                     PAGE_END_WRITEBACK);
-
+                       btrfs_free_reserved_data_space_noquota(inode, start,
+                                               end - start + 1);
                        *nr_written = *nr_written +
                             (end - start + PAGE_SIZE) / PAGE_SIZE;
                        *page_started = 1;
@@ -989,7 +992,7 @@ static noinline int cow_file_range(struct inode *inode,
                unsigned long op;
 
                cur_alloc_size = disk_num_bytes;
-               ret = btrfs_reserve_extent(root, cur_alloc_size,
+               ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
                                           root->sectorsize, 0, alloc_hint,
                                           &ins, 1, 1);
                if (ret < 0)
@@ -1489,8 +1492,10 @@ static noinline int run_delalloc_nocow(struct inode *inode,
                extent_clear_unlock_delalloc(inode, cur_offset,
                                             cur_offset + num_bytes - 1,
                                             locked_page, EXTENT_LOCKED |
-                                            EXTENT_DELALLOC, PAGE_UNLOCK |
-                                            PAGE_SET_PRIVATE2);
+                                            EXTENT_DELALLOC |
+                                            EXTENT_CLEAR_DATA_RESV,
+                                            PAGE_UNLOCK | PAGE_SET_PRIVATE2);
+
                if (!nolock && nocow)
                        btrfs_end_write_no_snapshoting(root);
                cur_offset = extent_end;
@@ -1807,7 +1812,9 @@ static void btrfs_clear_bit_hook(struct inode *inode,
                        return;
 
                if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
-                   && do_list && !(state->state & EXTENT_NORESERVE))
+                   && do_list && !(state->state & EXTENT_NORESERVE)
+                   && (*bits & (EXTENT_DO_ACCOUNTING |
+                   EXTENT_CLEAR_DATA_RESV)))
                        btrfs_free_reserved_data_space_noquota(inode,
                                        state->start, len);
 
@@ -7251,7 +7258,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
        int ret;
 
        alloc_hint = get_extent_allocation_hint(inode, start, len);
-       ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
+       ret = btrfs_reserve_extent(root, len, len, root->sectorsize, 0,
                                   alloc_hint, &ins, 1, 1);
        if (ret)
                return ERR_PTR(ret);
@@ -7751,6 +7758,13 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
                                ret = PTR_ERR(em2);
                                goto unlock_err;
                        }
+                       /*
+                        * For inode marked NODATACOW or extent marked PREALLOC,
+                        * use the existing or preallocated extent, so does not
+                        * need to adjust btrfs_space_info's bytes_may_use.
+                        */
+                       btrfs_free_reserved_data_space_noquota(inode,
+                                       start, len);
                        goto unlock;
                }
        }
@@ -7785,7 +7799,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
                        i_size_write(inode, start + len);
 
                adjust_dio_outstanding_extents(inode, dio_data, len);
-               btrfs_free_reserved_data_space(inode, start, len);
                WARN_ON(dio_data->reserve < len);
                dio_data->reserve -= len;
                dio_data->unsubmitted_oe_range_end = start + len;
@@ -10306,6 +10319,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
        u64 last_alloc = (u64)-1;
        int ret = 0;
        bool own_trans = true;
+       u64 end = start + num_bytes - 1;
 
        if (trans)
                own_trans = false;
@@ -10327,8 +10341,8 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
                 * sized chunks.
                 */
                cur_bytes = min(cur_bytes, last_alloc);
-               ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
-                                          *alloc_hint, &ins, 1, 0);
+               ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
+                               min_size, 0, *alloc_hint, &ins, 1, 0);
                if (ret) {
                        if (own_trans)
                                btrfs_end_transaction(trans, root);
@@ -10414,6 +10428,9 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
                if (own_trans)
                        btrfs_end_transaction(trans, root);
        }
+       if (cur_offset < end)
+               btrfs_free_reserved_data_space(inode, cur_offset,
+                       end - cur_offset + 1);
        return ret;
 }
 
index 14ed1e9e6bc83df20da4863798d0052b376e5636..b2a2da5893af5273313b3fc45765801831ce5d91 100644 (file)
@@ -5084,7 +5084,7 @@ static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       return btrfs_qgroup_wait_for_completion(root->fs_info);
+       return btrfs_qgroup_wait_for_completion(root->fs_info, true);
 }
 
 static long _btrfs_ioctl_set_received_subvol(struct file *file,
index 93ee1c18ef9d4c2e1677fc3ad437b3044c84f68e..8db2e29fdcf417db6096a107c938a4a133fb6af2 100644 (file)
@@ -995,7 +995,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
                goto out;
        fs_info->quota_enabled = 0;
        fs_info->pending_quota_state = 0;
-       btrfs_qgroup_wait_for_completion(fs_info);
+       btrfs_qgroup_wait_for_completion(fs_info, false);
        spin_lock(&fs_info->qgroup_lock);
        quota_root = fs_info->quota_root;
        fs_info->quota_root = NULL;
@@ -1453,10 +1453,9 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
        return ret;
 }
 
-struct btrfs_qgroup_extent_record *
-btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
-                                struct btrfs_delayed_ref_root *delayed_refs,
-                                struct btrfs_qgroup_extent_record *record)
+int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
+                               struct btrfs_delayed_ref_root *delayed_refs,
+                               struct btrfs_qgroup_extent_record *record)
 {
        struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
        struct rb_node *parent_node = NULL;
@@ -1475,12 +1474,42 @@ btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
                else if (bytenr > entry->bytenr)
                        p = &(*p)->rb_right;
                else
-                       return entry;
+                       return 1;
        }
 
        rb_link_node(&record->node, parent_node, p);
        rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
-       return NULL;
+       return 0;
+}
+
+int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
+               struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
+               gfp_t gfp_flag)
+{
+       struct btrfs_qgroup_extent_record *record;
+       struct btrfs_delayed_ref_root *delayed_refs;
+       int ret;
+
+       if (!fs_info->quota_enabled || bytenr == 0 || num_bytes == 0)
+               return 0;
+       if (WARN_ON(trans == NULL))
+               return -EINVAL;
+       record = kmalloc(sizeof(*record), gfp_flag);
+       if (!record)
+               return -ENOMEM;
+
+       delayed_refs = &trans->transaction->delayed_refs;
+       record->bytenr = bytenr;
+       record->num_bytes = num_bytes;
+       record->old_roots = NULL;
+
+       spin_lock(&delayed_refs->lock);
+       ret = btrfs_qgroup_insert_dirty_extent_nolock(fs_info, delayed_refs,
+                                                     record);
+       spin_unlock(&delayed_refs->lock);
+       if (ret > 0)
+               kfree(record);
+       return 0;
 }
 
 #define UPDATE_NEW     0
@@ -2303,6 +2332,10 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
        int err = -ENOMEM;
        int ret = 0;
 
+       mutex_lock(&fs_info->qgroup_rescan_lock);
+       fs_info->qgroup_rescan_running = true;
+       mutex_unlock(&fs_info->qgroup_rescan_lock);
+
        path = btrfs_alloc_path();
        if (!path)
                goto out;
@@ -2369,6 +2402,9 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
        }
 
 done:
+       mutex_lock(&fs_info->qgroup_rescan_lock);
+       fs_info->qgroup_rescan_running = false;
+       mutex_unlock(&fs_info->qgroup_rescan_lock);
        complete_all(&fs_info->qgroup_rescan_completion);
 }
 
@@ -2487,20 +2523,26 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
        return 0;
 }
 
-int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
+int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
+                                    bool interruptible)
 {
        int running;
        int ret = 0;
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
        spin_lock(&fs_info->qgroup_lock);
-       running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+       running = fs_info->qgroup_rescan_running;
        spin_unlock(&fs_info->qgroup_lock);
        mutex_unlock(&fs_info->qgroup_rescan_lock);
 
-       if (running)
+       if (!running)
+               return 0;
+
+       if (interruptible)
                ret = wait_for_completion_interruptible(
                                        &fs_info->qgroup_rescan_completion);
+       else
+               wait_for_completion(&fs_info->qgroup_rescan_completion);
 
        return ret;
 }
index 710887c06aaf4c6171a74b2b954653dfa61478e1..1bc64c864b626a25b9ec9bf983090504fe9a78e9 100644 (file)
@@ -46,7 +46,8 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
                        struct btrfs_fs_info *fs_info);
 int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
 void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
-int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info);
+int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
+                                    bool interruptible);
 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
                              struct btrfs_fs_info *fs_info, u64 src, u64 dst);
 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
@@ -63,10 +64,35 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
 struct btrfs_delayed_extent_op;
 int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
                                         struct btrfs_fs_info *fs_info);
-struct btrfs_qgroup_extent_record *
-btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
-                                struct btrfs_delayed_ref_root *delayed_refs,
-                                struct btrfs_qgroup_extent_record *record);
+/*
+ * Insert one dirty extent record into @delayed_refs, informing qgroup to
+ * account that extent at commit trans time.
+ *
+ * No lock version, caller must acquire delayed ref lock and allocate memory.
+ *
+ * Return 0 for success insert
+ * Return >0 for existing record, caller can free @record safely.
+ * Error is not possible
+ */
+int btrfs_qgroup_insert_dirty_extent_nolock(
+               struct btrfs_fs_info *fs_info,
+               struct btrfs_delayed_ref_root *delayed_refs,
+               struct btrfs_qgroup_extent_record *record);
+
+/*
+ * Insert one dirty extent record into @delayed_refs, informing qgroup to
+ * account that extent at commit trans time.
+ *
+ * Better encapsulated version.
+ *
+ * Return 0 if the operation is done.
+ * Return <0 for error, like memory allocation failure or invalid parameter
+ * (NULL trans)
+ */
+int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
+               struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
+               gfp_t gfp_flag);
+
 int
 btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
                            struct btrfs_fs_info *fs_info,
index b26a5aea41b4a67be495fb419080e3dd9d235136..8a2c2a07987b2e02b6023428b4b931940baffc03 100644 (file)
@@ -31,6 +31,7 @@
 #include "async-thread.h"
 #include "free-space-cache.h"
 #include "inode-map.h"
+#include "qgroup.h"
 
 /*
  * backref_node, mapping_node and tree_block start with this
@@ -3037,15 +3038,19 @@ int prealloc_file_extent_cluster(struct inode *inode,
        u64 num_bytes;
        int nr = 0;
        int ret = 0;
+       u64 prealloc_start = cluster->start - offset;
+       u64 prealloc_end = cluster->end - offset;
+       u64 cur_offset;
 
        BUG_ON(cluster->start != cluster->boundary[0]);
        inode_lock(inode);
 
-       ret = btrfs_check_data_free_space(inode, cluster->start,
-                                         cluster->end + 1 - cluster->start);
+       ret = btrfs_check_data_free_space(inode, prealloc_start,
+                                         prealloc_end + 1 - prealloc_start);
        if (ret)
                goto out;
 
+       cur_offset = prealloc_start;
        while (nr < cluster->nr) {
                start = cluster->boundary[nr] - offset;
                if (nr + 1 < cluster->nr)
@@ -3055,16 +3060,21 @@ int prealloc_file_extent_cluster(struct inode *inode,
 
                lock_extent(&BTRFS_I(inode)->io_tree, start, end);
                num_bytes = end + 1 - start;
+               if (cur_offset < start)
+                       btrfs_free_reserved_data_space(inode, cur_offset,
+                                       start - cur_offset);
                ret = btrfs_prealloc_file_range(inode, 0, start,
                                                num_bytes, num_bytes,
                                                end + 1, &alloc_hint);
+               cur_offset = end + 1;
                unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
                if (ret)
                        break;
                nr++;
        }
-       btrfs_free_reserved_data_space(inode, cluster->start,
-                                      cluster->end + 1 - cluster->start);
+       if (cur_offset < prealloc_end)
+               btrfs_free_reserved_data_space(inode, cur_offset,
+                                      prealloc_end + 1 - cur_offset);
 out:
        inode_unlock(inode);
        return ret;
@@ -3916,6 +3926,90 @@ int prepare_to_relocate(struct reloc_control *rc)
        return 0;
 }
 
+/*
+ * Qgroup fixer for data chunk relocation.
+ * The data relocation is done in the following steps
+ * 1) Copy data extents into data reloc tree
+ * 2) Create tree reloc tree(special snapshot) for related subvolumes
+ * 3) Modify file extents in tree reloc tree
+ * 4) Merge tree reloc tree with original fs tree, by swapping tree blocks
+ *
+ * The problem is, data and tree reloc tree are not accounted to qgroup,
+ * and 4) will only info qgroup to track tree blocks change, not file extents
+ * in the tree blocks.
+ *
+ * The good news is, related data extents are all in data reloc tree, so we
+ * only need to info qgroup to track all file extents in data reloc tree
+ * before commit trans.
+ */
+static int qgroup_fix_relocated_data_extents(struct btrfs_trans_handle *trans,
+                                            struct reloc_control *rc)
+{
+       struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
+       struct inode *inode = rc->data_inode;
+       struct btrfs_root *data_reloc_root = BTRFS_I(inode)->root;
+       struct btrfs_path *path;
+       struct btrfs_key key;
+       int ret = 0;
+
+       if (!fs_info->quota_enabled)
+               return 0;
+
+       /*
+        * Only for stage where we update data pointers the qgroup fix is
+        * valid.
+        * For MOVING_DATA stage, we will miss the timing of swapping tree
+        * blocks, and won't fix it.
+        */
+       if (!(rc->stage == UPDATE_DATA_PTRS && rc->extents_found))
+               return 0;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+       key.objectid = btrfs_ino(inode);
+       key.type = BTRFS_EXTENT_DATA_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(NULL, data_reloc_root, &key, path, 0, 0);
+       if (ret < 0)
+               goto out;
+
+       lock_extent(&BTRFS_I(inode)->io_tree, 0, (u64)-1);
+       while (1) {
+               struct btrfs_file_extent_item *fi;
+
+               btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+               if (key.objectid > btrfs_ino(inode))
+                       break;
+               if (key.type != BTRFS_EXTENT_DATA_KEY)
+                       goto next;
+               fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                                   struct btrfs_file_extent_item);
+               if (btrfs_file_extent_type(path->nodes[0], fi) !=
+                               BTRFS_FILE_EXTENT_REG)
+                       goto next;
+               ret = btrfs_qgroup_insert_dirty_extent(trans, fs_info,
+                       btrfs_file_extent_disk_bytenr(path->nodes[0], fi),
+                       btrfs_file_extent_disk_num_bytes(path->nodes[0], fi),
+                       GFP_NOFS);
+               if (ret < 0)
+                       break;
+next:
+               ret = btrfs_next_item(data_reloc_root, path);
+               if (ret < 0)
+                       break;
+               if (ret > 0) {
+                       ret = 0;
+                       break;
+               }
+       }
+       unlock_extent(&BTRFS_I(inode)->io_tree, 0 , (u64)-1);
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 {
        struct rb_root blocks = RB_ROOT;
@@ -4102,10 +4196,16 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 
        /* get rid of pinned extents */
        trans = btrfs_join_transaction(rc->extent_root);
-       if (IS_ERR(trans))
+       if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
-       else
-               btrfs_commit_transaction(trans, rc->extent_root);
+               goto out_free;
+       }
+       err = qgroup_fix_relocated_data_extents(trans, rc);
+       if (err < 0) {
+               btrfs_abort_transaction(trans, err);
+               goto out_free;
+       }
+       btrfs_commit_transaction(trans, rc->extent_root);
 out_free:
        btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
        btrfs_free_path(path);
@@ -4468,10 +4568,16 @@ int btrfs_recover_relocation(struct btrfs_root *root)
        unset_reloc_control(rc);
 
        trans = btrfs_join_transaction(rc->extent_root);
-       if (IS_ERR(trans))
+       if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
-       else
-               err = btrfs_commit_transaction(trans, rc->extent_root);
+               goto out_free;
+       }
+       err = qgroup_fix_relocated_data_extents(trans, rc);
+       if (err < 0) {
+               btrfs_abort_transaction(trans, err);
+               goto out_free;
+       }
+       err = btrfs_commit_transaction(trans, rc->extent_root);
 out_free:
        kfree(rc);
 out:
index 7fd7e1830cfe676e74000b54de026a5a1070cc1a..091296062456b5621ba6a24d4dcae6932afcba6f 100644 (file)
@@ -272,6 +272,23 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
                root_key.objectid = key.offset;
                key.offset++;
 
+               /*
+                * The root might have been inserted already, as before we look
+                * for orphan roots, log replay might have happened, which
+                * triggers a transaction commit and qgroup accounting, which
+                * in turn reads and inserts fs roots while doing backref
+                * walking.
+                */
+               root = btrfs_lookup_fs_root(tree_root->fs_info,
+                                           root_key.objectid);
+               if (root) {
+                       WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
+                                         &root->state));
+                       if (btrfs_root_refs(&root->root_item) == 0)
+                               btrfs_add_dead_root(root);
+                       continue;
+               }
+
                root = btrfs_read_fs_root(tree_root, &root_key);
                err = PTR_ERR_OR_ZERO(root);
                if (err && err != -ENOENT) {
@@ -310,16 +327,8 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
                set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
 
                err = btrfs_insert_fs_root(root->fs_info, root);
-               /*
-                * The root might have been inserted already, as before we look
-                * for orphan roots, log replay might have happened, which
-                * triggers a transaction commit and qgroup accounting, which
-                * in turn reads and inserts fs roots while doing backref
-                * walking.
-                */
-               if (err == -EEXIST)
-                       err = 0;
                if (err) {
+                       BUG_ON(err == -EEXIST);
                        btrfs_free_fs_root(root);
                        break;
                }
index 864ce334f696c31badf2919b07d503416545ded6..4071fe2bd0981a547f983ebeaa944f3179d97659 100644 (file)
@@ -2241,6 +2241,13 @@ static int btrfs_freeze(struct super_block *sb)
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = btrfs_sb(sb)->tree_root;
 
+       root->fs_info->fs_frozen = 1;
+       /*
+        * We don't need a barrier here, we'll wait for any transaction that
+        * could be in progress on other threads (and do delayed iputs that
+        * we want to avoid on a frozen filesystem), or do the commit
+        * ourselves.
+        */
        trans = btrfs_attach_transaction_barrier(root);
        if (IS_ERR(trans)) {
                /* no transaction, don't bother */
@@ -2251,6 +2258,14 @@ static int btrfs_freeze(struct super_block *sb)
        return btrfs_commit_transaction(trans, root);
 }
 
+static int btrfs_unfreeze(struct super_block *sb)
+{
+       struct btrfs_root *root = btrfs_sb(sb)->tree_root;
+
+       root->fs_info->fs_frozen = 0;
+       return 0;
+}
+
 static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
@@ -2299,6 +2314,7 @@ static const struct super_operations btrfs_super_ops = {
        .statfs         = btrfs_statfs,
        .remount_fs     = btrfs_remount,
        .freeze_fs      = btrfs_freeze,
+       .unfreeze_fs    = btrfs_unfreeze,
 };
 
 static const struct file_operations btrfs_ctl_fops = {
index 9cca0a72196180986b440865221a8aafa5659315..95d41919d034ef69647b6983dfac27f87f17e130 100644 (file)
@@ -2278,8 +2278,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
 
+       /*
+        * If fs has been frozen, we can not handle delayed iputs, otherwise
+        * it'll result in deadlock about SB_FREEZE_FS.
+        */
        if (current != root->fs_info->transaction_kthread &&
-           current != root->fs_info->cleaner_kthread)
+           current != root->fs_info->cleaner_kthread &&
+           !root->fs_info->fs_frozen)
                btrfs_run_delayed_iputs(root);
 
        return ret;
index fff3f3efa43602e0c04f9e5e019bf0e85a239d6f..e935035ac03435295c6bfcf9fd58c2b4978e65c8 100644 (file)
@@ -27,6 +27,7 @@
 #include "backref.h"
 #include "hash.h"
 #include "compression.h"
+#include "qgroup.h"
 
 /* magic values for the inode_only field in btrfs_log_inode:
  *
@@ -680,6 +681,21 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                ins.type = BTRFS_EXTENT_ITEM_KEY;
                offset = key->offset - btrfs_file_extent_offset(eb, item);
 
+               /*
+                * Manually record dirty extent, as here we did a shallow
+                * file extent item copy and skip normal backref update,
+                * but modifying extent tree all by ourselves.
+                * So need to manually record dirty extent for qgroup,
+                * as the owner of the file extent changed from log tree
+                * (doesn't affect qgroup) to fs/file tree(affects qgroup)
+                */
+               ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
+                               btrfs_file_extent_disk_bytenr(eb, item),
+                               btrfs_file_extent_disk_num_bytes(eb, item),
+                               GFP_NOFS);
+               if (ret < 0)
+                       goto out;
+
                if (ins.objectid > 0) {
                        u64 csum_start;
                        u64 csum_end;
@@ -2807,7 +2823,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
         */
        mutex_unlock(&root->log_mutex);
 
-       btrfs_init_log_ctx(&root_log_ctx);
+       btrfs_init_log_ctx(&root_log_ctx, NULL);
 
        mutex_lock(&log_root_tree->log_mutex);
        atomic_inc(&log_root_tree->log_batch);
@@ -4741,7 +4757,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                        if (ret < 0) {
                                err = ret;
                                goto out_unlock;
-                       } else if (ret > 0) {
+                       } else if (ret > 0 && ctx &&
+                                  other_ino != btrfs_ino(ctx->inode)) {
                                struct btrfs_key inode_key;
                                struct inode *other_inode;
 
index a9f1b75d080d3796756c9e9f09c04180b404eb1c..ab858e31ccbc2210a01a9b7ce69bf52127d00b8b 100644 (file)
@@ -30,15 +30,18 @@ struct btrfs_log_ctx {
        int log_transid;
        int io_err;
        bool log_new_dentries;
+       struct inode *inode;
        struct list_head list;
 };
 
-static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
+static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx,
+                                     struct inode *inode)
 {
        ctx->log_ret = 0;
        ctx->log_transid = 0;
        ctx->io_err = 0;
        ctx->log_new_dentries = false;
+       ctx->inode = inode;
        INIT_LIST_HEAD(&ctx->list);
 }
 
index 51f125508771ad950f2e1d4100cc398772bd06c6..035efce603a9c2db3989c1057dd1e20f82d2eba2 100644 (file)
@@ -834,10 +834,6 @@ static void __free_device(struct work_struct *work)
        struct btrfs_device *device;
 
        device = container_of(work, struct btrfs_device, rcu_work);
-
-       if (device->bdev)
-               blkdev_put(device->bdev, device->mode);
-
        rcu_string_free(device->name);
        kfree(device);
 }
@@ -852,6 +848,17 @@ static void free_device(struct rcu_head *head)
        schedule_work(&device->rcu_work);
 }
 
+static void btrfs_close_bdev(struct btrfs_device *device)
+{
+       if (device->bdev && device->writeable) {
+               sync_blockdev(device->bdev);
+               invalidate_bdev(device->bdev);
+       }
+
+       if (device->bdev)
+               blkdev_put(device->bdev, device->mode);
+}
+
 static void btrfs_close_one_device(struct btrfs_device *device)
 {
        struct btrfs_fs_devices *fs_devices = device->fs_devices;
@@ -870,10 +877,7 @@ static void btrfs_close_one_device(struct btrfs_device *device)
        if (device->missing)
                fs_devices->missing_devices--;
 
-       if (device->bdev && device->writeable) {
-               sync_blockdev(device->bdev);
-               invalidate_bdev(device->bdev);
-       }
+       btrfs_close_bdev(device);
 
        new_device = btrfs_alloc_device(NULL, &device->devid,
                                        device->uuid);
@@ -1932,6 +1936,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
                btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
        }
 
+       btrfs_close_bdev(device);
+
        call_rcu(&device->rcu, free_device);
 
        num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
@@ -2025,6 +2031,9 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
                /* zero out the old super if it is writable */
                btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
        }
+
+       btrfs_close_bdev(srcdev);
+
        call_rcu(&srcdev->rcu, free_device);
 
        /*
@@ -2080,6 +2089,8 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
         * the device_list_mutex lock.
         */
        btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
+
+       btrfs_close_bdev(tgtdev);
        call_rcu(&tgtdev->rcu, free_device);
 }
 
index eea64912c9c0a6a1e69e0b041b8404e611bf1e81..466f7d60edc2746f12a425ec91df2773f64dfedb 100644 (file)
@@ -607,20 +607,54 @@ static const struct file_operations format2_fops;
 static const struct file_operations format3_fops;
 static const struct file_operations format4_fops;
 
-static int table_open(struct inode *inode, struct file *file)
+static int table_open1(struct inode *inode, struct file *file)
 {
        struct seq_file *seq;
-       int ret = -1;
+       int ret;
 
-       if (file->f_op == &format1_fops)
-               ret = seq_open(file, &format1_seq_ops);
-       else if (file->f_op == &format2_fops)
-               ret = seq_open(file, &format2_seq_ops);
-       else if (file->f_op == &format3_fops)
-               ret = seq_open(file, &format3_seq_ops);
-       else if (file->f_op == &format4_fops)
-               ret = seq_open(file, &format4_seq_ops);
+       ret = seq_open(file, &format1_seq_ops);
+       if (ret)
+               return ret;
+
+       seq = file->private_data;
+       seq->private = inode->i_private; /* the dlm_ls */
+       return 0;
+}
+
+static int table_open2(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq;
+       int ret;
+
+       ret = seq_open(file, &format2_seq_ops);
+       if (ret)
+               return ret;
+
+       seq = file->private_data;
+       seq->private = inode->i_private; /* the dlm_ls */
+       return 0;
+}
+
+static int table_open3(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq;
+       int ret;
+
+       ret = seq_open(file, &format3_seq_ops);
+       if (ret)
+               return ret;
+
+       seq = file->private_data;
+       seq->private = inode->i_private; /* the dlm_ls */
+       return 0;
+}
+
+static int table_open4(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq;
+       int ret;
 
+       ret = seq_open(file, &format4_seq_ops);
        if (ret)
                return ret;
 
@@ -631,7 +665,7 @@ static int table_open(struct inode *inode, struct file *file)
 
 static const struct file_operations format1_fops = {
        .owner   = THIS_MODULE,
-       .open    = table_open,
+       .open    = table_open1,
        .read    = seq_read,
        .llseek  = seq_lseek,
        .release = seq_release
@@ -639,7 +673,7 @@ static const struct file_operations format1_fops = {
 
 static const struct file_operations format2_fops = {
        .owner   = THIS_MODULE,
-       .open    = table_open,
+       .open    = table_open2,
        .read    = seq_read,
        .llseek  = seq_lseek,
        .release = seq_release
@@ -647,7 +681,7 @@ static const struct file_operations format2_fops = {
 
 static const struct file_operations format3_fops = {
        .owner   = THIS_MODULE,
-       .open    = table_open,
+       .open    = table_open3,
        .read    = seq_read,
        .llseek  = seq_lseek,
        .release = seq_release
@@ -655,7 +689,7 @@ static const struct file_operations format3_fops = {
 
 static const struct file_operations format4_fops = {
        .owner   = THIS_MODULE,
-       .open    = table_open,
+       .open    = table_open4,
        .read    = seq_read,
        .llseek  = seq_lseek,
        .release = seq_release
index d64d2a515cb2ce1d162b8fd9c0c0e0e5e68a7fbb..ccb401eebc112b26b848818185579929b5ee1030 100644 (file)
@@ -1699,11 +1699,11 @@ static int f2fs_write_end(struct file *file,
        trace_f2fs_write_end(inode, pos, len, copied);
 
        set_page_dirty(page);
-       f2fs_put_page(page, 1);
 
        if (pos + copied > i_size_read(inode))
                f2fs_i_size_write(inode, pos + copied);
 
+       f2fs_put_page(page, 1);
        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
        return copied;
 }
index 675fa79d86f6500d779917b7ca7da3e62f14fda7..14f5fe2b841e2e7d4e699072cc2aeb6e1c14c645 100644 (file)
@@ -538,7 +538,7 @@ struct f2fs_nm_info {
        /* NAT cache management */
        struct radix_tree_root nat_root;/* root of the nat entry cache */
        struct radix_tree_root nat_set_root;/* root of the nat set cache */
-       struct percpu_rw_semaphore nat_tree_lock;       /* protect nat_tree_lock */
+       struct rw_semaphore nat_tree_lock;      /* protect nat_tree_lock */
        struct list_head nat_entries;   /* cached nat entry list (clean) */
        unsigned int nat_cnt;           /* the # of cached nat entries */
        unsigned int dirty_nat_cnt;     /* total num of nat entries in set */
@@ -787,7 +787,7 @@ struct f2fs_sb_info {
        struct f2fs_checkpoint *ckpt;           /* raw checkpoint pointer */
        struct inode *meta_inode;               /* cache meta blocks */
        struct mutex cp_mutex;                  /* checkpoint procedure lock */
-       struct percpu_rw_semaphore cp_rwsem;            /* blocking FS operations */
+       struct rw_semaphore cp_rwsem;           /* blocking FS operations */
        struct rw_semaphore node_write;         /* locking node writes */
        wait_queue_head_t cp_wait;
        unsigned long last_time[MAX_TIME];      /* to store time in jiffies */
@@ -1074,22 +1074,22 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
 
 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
 {
-       percpu_down_read(&sbi->cp_rwsem);
+       down_read(&sbi->cp_rwsem);
 }
 
 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
 {
-       percpu_up_read(&sbi->cp_rwsem);
+       up_read(&sbi->cp_rwsem);
 }
 
 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
 {
-       percpu_down_write(&sbi->cp_rwsem);
+       down_write(&sbi->cp_rwsem);
 }
 
 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
 {
-       percpu_up_write(&sbi->cp_rwsem);
+       up_write(&sbi->cp_rwsem);
 }
 
 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
index 0e493f63ea41410ad04c22e24038bd2d4eebe0ba..47abb96098e49d71fcb082b39752be4f89882199 100644 (file)
@@ -2086,15 +2086,19 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
        if (unlikely(f2fs_readonly(src->i_sb)))
                return -EROFS;
 
-       if (S_ISDIR(src->i_mode) || S_ISDIR(dst->i_mode))
-               return -EISDIR;
+       if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
+               return -EINVAL;
 
        if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
                return -EOPNOTSUPP;
 
        inode_lock(src);
-       if (src != dst)
-               inode_lock(dst);
+       if (src != dst) {
+               if (!inode_trylock(dst)) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+       }
 
        ret = -EINVAL;
        if (pos_in + len > src->i_size || pos_in + len < pos_in)
@@ -2152,6 +2156,7 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
 out_unlock:
        if (src != dst)
                inode_unlock(dst);
+out:
        inode_unlock(src);
        return ret;
 }
index b2fa4b615925b894a92377b215780edb33354571..f75d197d5beb05366e876876e09521db7ac3930c 100644 (file)
@@ -206,14 +206,14 @@ int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
        struct nat_entry *e;
        bool need = false;
 
-       percpu_down_read(&nm_i->nat_tree_lock);
+       down_read(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, nid);
        if (e) {
                if (!get_nat_flag(e, IS_CHECKPOINTED) &&
                                !get_nat_flag(e, HAS_FSYNCED_INODE))
                        need = true;
        }
-       percpu_up_read(&nm_i->nat_tree_lock);
+       up_read(&nm_i->nat_tree_lock);
        return need;
 }
 
@@ -223,11 +223,11 @@ bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
        struct nat_entry *e;
        bool is_cp = true;
 
-       percpu_down_read(&nm_i->nat_tree_lock);
+       down_read(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, nid);
        if (e && !get_nat_flag(e, IS_CHECKPOINTED))
                is_cp = false;
-       percpu_up_read(&nm_i->nat_tree_lock);
+       up_read(&nm_i->nat_tree_lock);
        return is_cp;
 }
 
@@ -237,13 +237,13 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
        struct nat_entry *e;
        bool need_update = true;
 
-       percpu_down_read(&nm_i->nat_tree_lock);
+       down_read(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, ino);
        if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
                        (get_nat_flag(e, IS_CHECKPOINTED) ||
                         get_nat_flag(e, HAS_FSYNCED_INODE)))
                need_update = false;
-       percpu_up_read(&nm_i->nat_tree_lock);
+       up_read(&nm_i->nat_tree_lock);
        return need_update;
 }
 
@@ -284,7 +284,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct nat_entry *e;
 
-       percpu_down_write(&nm_i->nat_tree_lock);
+       down_write(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, ni->nid);
        if (!e) {
                e = grab_nat_entry(nm_i, ni->nid);
@@ -334,7 +334,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
                        set_nat_flag(e, HAS_FSYNCED_INODE, true);
                set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
        }
-       percpu_up_write(&nm_i->nat_tree_lock);
+       up_write(&nm_i->nat_tree_lock);
 }
 
 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
@@ -342,7 +342,8 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        int nr = nr_shrink;
 
-       percpu_down_write(&nm_i->nat_tree_lock);
+       if (!down_write_trylock(&nm_i->nat_tree_lock))
+               return 0;
 
        while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
                struct nat_entry *ne;
@@ -351,7 +352,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
                __del_from_nat_cache(nm_i, ne);
                nr_shrink--;
        }
-       percpu_up_write(&nm_i->nat_tree_lock);
+       up_write(&nm_i->nat_tree_lock);
        return nr - nr_shrink;
 }
 
@@ -373,13 +374,13 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
        ni->nid = nid;
 
        /* Check nat cache */
-       percpu_down_read(&nm_i->nat_tree_lock);
+       down_read(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, nid);
        if (e) {
                ni->ino = nat_get_ino(e);
                ni->blk_addr = nat_get_blkaddr(e);
                ni->version = nat_get_version(e);
-               percpu_up_read(&nm_i->nat_tree_lock);
+               up_read(&nm_i->nat_tree_lock);
                return;
        }
 
@@ -403,11 +404,11 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
        node_info_from_raw_nat(ni, &ne);
        f2fs_put_page(page, 1);
 cache:
-       percpu_up_read(&nm_i->nat_tree_lock);
+       up_read(&nm_i->nat_tree_lock);
        /* cache nat entry */
-       percpu_down_write(&nm_i->nat_tree_lock);
+       down_write(&nm_i->nat_tree_lock);
        cache_nat_entry(sbi, nid, &ne);
-       percpu_up_write(&nm_i->nat_tree_lock);
+       up_write(&nm_i->nat_tree_lock);
 }
 
 /*
@@ -1788,7 +1789,7 @@ void build_free_nids(struct f2fs_sb_info *sbi)
        ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
                                                        META_NAT, true);
 
-       percpu_down_read(&nm_i->nat_tree_lock);
+       down_read(&nm_i->nat_tree_lock);
 
        while (1) {
                struct page *page = get_current_nat_page(sbi, nid);
@@ -1820,7 +1821,7 @@ void build_free_nids(struct f2fs_sb_info *sbi)
                        remove_free_nid(nm_i, nid);
        }
        up_read(&curseg->journal_rwsem);
-       percpu_up_read(&nm_i->nat_tree_lock);
+       up_read(&nm_i->nat_tree_lock);
 
        ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
                                        nm_i->ra_nid_pages, META_NAT, false);
@@ -2209,7 +2210,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
        if (!nm_i->dirty_nat_cnt)
                return;
 
-       percpu_down_write(&nm_i->nat_tree_lock);
+       down_write(&nm_i->nat_tree_lock);
 
        /*
         * if there are no enough space in journal to store dirty nat
@@ -2232,7 +2233,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
        list_for_each_entry_safe(set, tmp, &sets, set_list)
                __flush_nat_entry_set(sbi, set);
 
-       percpu_up_write(&nm_i->nat_tree_lock);
+       up_write(&nm_i->nat_tree_lock);
 
        f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
 }
@@ -2268,8 +2269,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
 
        mutex_init(&nm_i->build_lock);
        spin_lock_init(&nm_i->free_nid_list_lock);
-       if (percpu_init_rwsem(&nm_i->nat_tree_lock))
-               return -ENOMEM;
+       init_rwsem(&nm_i->nat_tree_lock);
 
        nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
        nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
@@ -2326,7 +2326,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
        spin_unlock(&nm_i->free_nid_list_lock);
 
        /* destroy nat cache */
-       percpu_down_write(&nm_i->nat_tree_lock);
+       down_write(&nm_i->nat_tree_lock);
        while ((found = __gang_lookup_nat_cache(nm_i,
                                        nid, NATVEC_SIZE, natvec))) {
                unsigned idx;
@@ -2351,9 +2351,8 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
                        kmem_cache_free(nat_entry_set_slab, setvec[idx]);
                }
        }
-       percpu_up_write(&nm_i->nat_tree_lock);
+       up_write(&nm_i->nat_tree_lock);
 
-       percpu_free_rwsem(&nm_i->nat_tree_lock);
        kfree(nm_i->nat_bitmap);
        sbi->nm_info = NULL;
        kfree(nm_i);
index 1b86d3f638efcf87435da17af79811237fc584a9..7f863a645ab1d0c46052875f35f603bebde10a33 100644 (file)
@@ -706,8 +706,6 @@ static void destroy_percpu_info(struct f2fs_sb_info *sbi)
                percpu_counter_destroy(&sbi->nr_pages[i]);
        percpu_counter_destroy(&sbi->alloc_valid_block_count);
        percpu_counter_destroy(&sbi->total_valid_inode_count);
-
-       percpu_free_rwsem(&sbi->cp_rwsem);
 }
 
 static void f2fs_put_super(struct super_block *sb)
@@ -1483,9 +1481,6 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
 {
        int i, err;
 
-       if (percpu_init_rwsem(&sbi->cp_rwsem))
-               return -ENOMEM;
-
        for (i = 0; i < NR_COUNT_TYPE; i++) {
                err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL);
                if (err)
@@ -1686,6 +1681,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
                sbi->write_io[i].bio = NULL;
        }
 
+       init_rwsem(&sbi->cp_rwsem);
        init_waitqueue_head(&sbi->cp_wait);
        init_sb_info(sbi);
 
index 19f532e7d35e9a501256ab64f76a645bf7d6b2e6..6dc4296eed62c5d2a493c5ba3f649c890aa37981 100644 (file)
@@ -223,8 +223,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
                size -= n;
                buf += n;
                copied += n;
-               if (!m->count)
+               if (!m->count) {
+                       m->from = 0;
                        m->index++;
+               }
                if (!size)
                        goto Done;
        }
index b45345d701e77fea809144009e3a46fa05560533..51157da3f76ed87a2a0c7f34c7c2240db116ed00 100644 (file)
@@ -370,7 +370,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
 
        p = c->gap_lebs;
        do {
-               ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs);
+               ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs);
                written = layout_leb_in_gaps(c, p);
                if (written < 0) {
                        err = written;
index e237811f09ce5ce6139aab0951047a74dd715665..11a004114eba5fcd00cce57893c28846ba71d17f 100644 (file)
@@ -575,7 +575,8 @@ static int ubifs_xattr_get(const struct xattr_handler *handler,
        dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name,
                inode->i_ino, dentry, size);
 
-       return  __ubifs_getxattr(inode, name, buffer, size);
+       name = xattr_full_name(handler, name);
+       return __ubifs_getxattr(inode, name, buffer, size);
 }
 
 static int ubifs_xattr_set(const struct xattr_handler *handler,
@@ -586,6 +587,8 @@ static int ubifs_xattr_set(const struct xattr_handler *handler,
        dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd",
                name, inode->i_ino, dentry, size);
 
+       name = xattr_full_name(handler, name);
+
        if (value)
                return __ubifs_setxattr(inode, name, value, size, flags);
        else
index 59ffaa68b11bbe1fda638fcf0d7128f3f5d2dc9d..23ddf4b46a9b016ef7adab062604c2b08f5563f7 100644 (file)
@@ -71,7 +71,8 @@ static inline bool bio_has_data(struct bio *bio)
 {
        if (bio &&
            bio->bi_iter.bi_size &&
-           bio_op(bio) != REQ_OP_DISCARD)
+           bio_op(bio) != REQ_OP_DISCARD &&
+           bio_op(bio) != REQ_OP_SECURE_ERASE)
                return true;
 
        return false;
@@ -79,7 +80,9 @@ static inline bool bio_has_data(struct bio *bio)
 
 static inline bool bio_no_advance_iter(struct bio *bio)
 {
-       return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_WRITE_SAME;
+       return bio_op(bio) == REQ_OP_DISCARD ||
+              bio_op(bio) == REQ_OP_SECURE_ERASE ||
+              bio_op(bio) == REQ_OP_WRITE_SAME;
 }
 
 static inline bool bio_is_rw(struct bio *bio)
@@ -199,6 +202,9 @@ static inline unsigned bio_segments(struct bio *bio)
        if (bio_op(bio) == REQ_OP_DISCARD)
                return 1;
 
+       if (bio_op(bio) == REQ_OP_SECURE_ERASE)
+               return 1;
+
        if (bio_op(bio) == REQ_OP_WRITE_SAME)
                return 1;
 
index 2c210b6a7bcf8ba3cd6a48571ff0fe8d84b21e91..e79055c8b577995c1a668f8ab228aa0e593c3432 100644 (file)
@@ -882,7 +882,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
                                                     int op)
 {
-       if (unlikely(op == REQ_OP_DISCARD))
+       if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
                return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
 
        if (unlikely(op == REQ_OP_WRITE_SAME))
@@ -913,7 +913,9 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
        if (unlikely(rq->cmd_type != REQ_TYPE_FS))
                return q->limits.max_hw_sectors;
 
-       if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD))
+       if (!q->limits.chunk_sectors ||
+           req_op(rq) == REQ_OP_DISCARD ||
+           req_op(rq) == REQ_OP_SECURE_ERASE)
                return blk_queue_get_max_sectors(q, req_op(rq));
 
        return min(blk_max_size_offset(q, offset),
index e2949397c19b0d58bbebf84d37028ae9b969546c..8dbc8929a6a0051a2840a3badb580b7bb3d8adc4 100644 (file)
  */
 #define asm_volatile_goto(x...)        do { asm goto(x); asm (""); } while (0)
 
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+/*
+ * sparse (__CHECKER__) pretends to be gcc, but can't do constant
+ * folding in __builtin_bswap*() (yet), so don't set these for it.
+ */
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
 #if GCC_VERSION >= 40400
 #define __HAVE_BUILTIN_BSWAP32__
 #define __HAVE_BUILTIN_BSWAP64__
 #if GCC_VERSION >= 40800
 #define __HAVE_BUILTIN_BSWAP16__
 #endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
 
 #if GCC_VERSION >= 50000
 #define KASAN_ABI_VERSION 4
index 56b0b7ec66aacd6bb895deb21d818ef9b068e801..99ac022edc60633be531e5df89d252d26f1b0766 100644 (file)
  */
 #define E_ITS_MOVI_UNMAPPED_INTERRUPT          0x010107
 #define E_ITS_MOVI_UNMAPPED_COLLECTION         0x010109
+#define E_ITS_INT_UNMAPPED_INTERRUPT           0x010307
 #define E_ITS_CLEAR_UNMAPPED_INTERRUPT         0x010507
 #define E_ITS_MAPD_DEVICE_OOR                  0x010801
 #define E_ITS_MAPC_PROCNUM_OOR                 0x010902
index 2599a980340f44f2550bacded406cb34f38808dc..fbc1fa625c3ec306ed610c2c8b1b1a50f505e2a5 100644 (file)
@@ -1251,10 +1251,12 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
                      unsigned int command_bits, u32 flags);
 
-#define PCI_IRQ_NOLEGACY       (1 << 0) /* don't use legacy interrupts */
-#define PCI_IRQ_NOMSI          (1 << 1) /* don't use MSI interrupts */
-#define PCI_IRQ_NOMSIX         (1 << 2) /* don't use MSI-X interrupts */
-#define PCI_IRQ_NOAFFINITY     (1 << 3) /* don't auto-assign affinity */
+#define PCI_IRQ_LEGACY         (1 << 0) /* allow legacy interrupts */
+#define PCI_IRQ_MSI            (1 << 1) /* allow MSI interrupts */
+#define PCI_IRQ_MSIX           (1 << 2) /* allow MSI-X interrupts */
+#define PCI_IRQ_AFFINITY       (1 << 3) /* auto-assign affinity */
+#define PCI_IRQ_ALL_TYPES \
+       (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
 
 /* kmem_cache style wrapper around pci_alloc_consistent() */
 
index 697e160c78d0d7afd65dcf9ea07afaccd431669c..a4f7203a9017040ca166cdb06aacf1df30113d38 100644 (file)
@@ -42,6 +42,8 @@ extern int proc_dostring(struct ctl_table *, int,
                         void __user *, size_t *, loff_t *);
 extern int proc_dointvec(struct ctl_table *, int,
                         void __user *, size_t *, loff_t *);
+extern int proc_douintvec(struct ctl_table *, int,
+                        void __user *, size_t *, loff_t *);
 extern int proc_dointvec_minmax(struct ctl_table *, int,
                                void __user *, size_t *, loff_t *);
 extern int proc_dointvec_jiffies(struct ctl_table *, int,
index 8e90dd28bb7536d16058d711b096f8125bd42874..e1f96737c2a17fc51cd1222da7fd8b6b4c0bdeb4 100644 (file)
@@ -2115,22 +2115,17 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
                                       size_t len)
 {
        const void __user *p = udata->inbuf + offset;
-       bool ret = false;
+       bool ret;
        u8 *buf;
 
        if (len > USHRT_MAX)
                return false;
 
-       buf = kmalloc(len, GFP_KERNEL);
-       if (!buf)
+       buf = memdup_user(p, len);
+       if (IS_ERR(buf))
                return false;
 
-       if (copy_from_user(buf, p, len))
-               goto free;
-
        ret = !memchr_inv(buf, 0, len);
-
-free:
        kfree(buf);
        return ret;
 }
index 9a37c541822f6093aa86188484e5a6463e214e96..b5486e64860759aed11763ac983b3b4aa12988b3 100644 (file)
@@ -9,8 +9,8 @@
 
 DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
 
-DECLARE_PER_CPU(int, xen_vcpu_id);
-static inline int xen_vcpu_nr(int cpu)
+DECLARE_PER_CPU(uint32_t, xen_vcpu_id);
+static inline uint32_t xen_vcpu_nr(int cpu)
 {
        return per_cpu(xen_vcpu_id, cpu);
 }
index 5650f5317e0c3bf48782349f1f3e64e2aa3d48a1..3cfabdf7b942908d78f071a5bedab9f7bcb6fde7 100644 (file)
@@ -6166,7 +6166,7 @@ static int __perf_pmu_output_stop(void *info)
 {
        struct perf_event *event = info;
        struct pmu *pmu = event->pmu;
-       struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
        struct remote_output ro = {
                .rb     = event->rb,
        };
index f68959341c0faa87809531af1a508dde6978feef..32f6cfcff21244a199bec1877b3a29ada5dcac5f 100644 (file)
@@ -39,6 +39,7 @@ struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
                return NULL;
        }
 
+       get_online_cpus();
        if (max_vecs >= num_online_cpus()) {
                cpumask_copy(affinity_mask, cpu_online_mask);
                *nr_vecs = num_online_cpus();
@@ -56,6 +57,7 @@ struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
                }
                *nr_vecs = vecs;
        }
+       put_online_cpus();
 
        return affinity_mask;
 }
index b4c1bc7c9ca204440c7c51e7df7a49b0c2ae511f..637389088b3f9cf88db58ce920350a3517a20ed2 100644 (file)
@@ -820,6 +820,17 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
        desc->name = name;
 
        if (handle != handle_bad_irq && is_chained) {
+               /*
+                * We're about to start this interrupt immediately,
+                * hence the need to set the trigger configuration.
+                * But the .set_type callback may have overridden the
+                * flow handler, ignoring that we're dealing with a
+                * chained interrupt. Reset it immediately because we
+                * do know better.
+                */
+               __irq_set_trigger(desc, irqd_get_trigger_type(&desc->irq_data));
+               desc->handle_irq = handle;
+
                irq_settings_set_noprobe(desc);
                irq_settings_set_norequest(desc);
                irq_settings_set_nothread(desc);
index 73a2b786b5e99b528250ddbc4b4fb007ab1e67e5..9530fcd27704008a9531c49d623910c707d681ce 100644 (file)
@@ -1681,8 +1681,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        action->dev_id = dev_id;
 
        retval = irq_chip_pm_get(&desc->irq_data);
-       if (retval < 0)
+       if (retval < 0) {
+               kfree(action);
                return retval;
+       }
 
        chip_bus_lock(desc);
        retval = __setup_irq(irq, desc, action);
@@ -1985,8 +1987,10 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler,
        action->percpu_dev_id = dev_id;
 
        retval = irq_chip_pm_get(&desc->irq_data);
-       if (retval < 0)
+       if (retval < 0) {
+               kfree(action);
                return retval;
+       }
 
        chip_bus_lock(desc);
        retval = __setup_irq(irq, desc, action);
index 276762f3a46078141117e5810c1afc964e869ec9..d5760c42f042f4accfb65de23784bcaa877d9b00 100644 (file)
@@ -9,10 +9,10 @@
 
 char *_braille_console_setup(char **str, char **brl_options)
 {
-       if (!memcmp(*str, "brl,", 4)) {
+       if (!strncmp(*str, "brl,", 4)) {
                *brl_options = "";
                *str += 4;
-       } else if (!memcmp(str, "brl=", 4)) {
+       } else if (!strncmp(*str, "brl=", 4)) {
                *brl_options = *str + 4;
                *str = strchr(*brl_options, ',');
                if (!*str)
index b43d0b27c1feb5f6af7250ea2312bf6d5251aad0..a13bbdaab47dc66fec2c0dd3da0b2c9898725bdf 100644 (file)
@@ -2140,6 +2140,21 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
        return 0;
 }
 
+static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
+                                int *valp,
+                                int write, void *data)
+{
+       if (write) {
+               if (*negp)
+                       return -EINVAL;
+               *valp = *lvalp;
+       } else {
+               unsigned int val = *valp;
+               *lvalp = (unsigned long)val;
+       }
+       return 0;
+}
+
 static const char proc_wspace_sep[] = { ' ', '\t', '\n' };
 
 static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
@@ -2259,8 +2274,27 @@ static int do_proc_dointvec(struct ctl_table *table, int write,
 int proc_dointvec(struct ctl_table *table, int write,
                     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-    return do_proc_dointvec(table,write,buffer,lenp,ppos,
-                           NULL,NULL);
+       return do_proc_dointvec(table, write, buffer, lenp, ppos, NULL, NULL);
+}
+
+/**
+ * proc_douintvec - read a vector of unsigned integers
+ * @table: the sysctl table
+ * @write: %TRUE if this is a write to the sysctl file
+ * @buffer: the user buffer
+ * @lenp: the size of the user buffer
+ * @ppos: file position
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) unsigned integer
+ * values from/to the user buffer, treated as an ASCII string.
+ *
+ * Returns 0 on success.
+ */
+int proc_douintvec(struct ctl_table *table, int write,
+                    void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       return do_proc_dointvec(table, write, buffer, lenp, ppos,
+                               do_proc_douintvec_conv, NULL);
 }
 
 /*
@@ -2858,6 +2892,12 @@ int proc_dointvec(struct ctl_table *table, int write,
        return -ENOSYS;
 }
 
+int proc_douintvec(struct ctl_table *table, int write,
+                 void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       return -ENOSYS;
+}
+
 int proc_dointvec_minmax(struct ctl_table *table, int write,
                    void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -2903,6 +2943,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
  * exception granted :-)
  */
 EXPORT_SYMBOL(proc_dointvec);
+EXPORT_SYMBOL(proc_douintvec);
 EXPORT_SYMBOL(proc_dointvec_jiffies);
 EXPORT_SYMBOL(proc_dointvec_minmax);
 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
index 3b65746c7f156dbdea1e7b1f6fb4e43197820369..e07fb093f8195ace846881ffd81da46d5ce66e65 100644 (file)
@@ -401,7 +401,10 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
        do {
                seq = raw_read_seqcount_latch(&tkf->seq);
                tkr = tkf->base + (seq & 0x01);
-               now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
+               now = ktime_to_ns(tkr->base);
+
+               now += clocksource_delta(tkr->read(tkr->clock),
+                                        tkr->cycle_last, tkr->mask);
        } while (read_seqcount_retry(&tkf->seq, seq));
 
        return now;
index f6bd65236712b218fe89c2bee83b8e56ca1bf89e..107310a6f36f43a47300c46a4bd3b51a6b5b70fa 100644 (file)
@@ -23,7 +23,9 @@
 
 #include "timekeeping_internal.h"
 
-static unsigned int sleep_time_bin[32] = {0};
+#define NUM_BINS 32
+
+static unsigned int sleep_time_bin[NUM_BINS] = {0};
 
 static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
 {
@@ -69,6 +71,9 @@ late_initcall(tk_debug_sleep_time_init);
 
 void tk_debug_account_sleep_time(struct timespec64 *t)
 {
-       sleep_time_bin[fls(t->tv_sec)]++;
+       /* Cap bin index so we don't overflow the array */
+       int bin = min(fls(t->tv_sec), NUM_BINS-1);
+
+       sleep_time_bin[bin]++;
 }
 
index 7598e6ca817a8b9ed5ea6ec85781499f967d26f7..dbafc5df03f3f04705ff5739c2c0f51aed8ca6ee 100644 (file)
@@ -223,7 +223,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
        what |= MASK_TC_BIT(op_flags, META);
        what |= MASK_TC_BIT(op_flags, PREFLUSH);
        what |= MASK_TC_BIT(op_flags, FUA);
-       if (op == REQ_OP_DISCARD)
+       if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
                what |= BLK_TC_ACT(BLK_TC_DISCARD);
        if (op == REQ_OP_FLUSH)
                what |= BLK_TC_ACT(BLK_TC_FLUSH);
index 78a23c5c302d96ad6ef1198a02deb39a9a6bc228..be0ee11fa0d9ee8ff068244a559a2c52ad96c84c 100644 (file)
@@ -262,7 +262,14 @@ config COMPACTION
        select MIGRATION
        depends on MMU
        help
-         Allows the compaction of memory for the allocation of huge pages.
+          Compaction is the only memory management component to form
+          high order (larger physically contiguous) memory blocks
+          reliably. The page allocator relies on compaction heavily and
+          the lack of the feature can lead to unexpected OOM killer
+          invocations for high order memory requests. You shouldn't
+          disable this option unless there really is a strong reason for
+          it and then we would be really interested to hear about that at
+          linux-mm@kvack.org.
 
 #
 # support for page migration
index 2373f0a7d3405bb869ac4a3a4be175289af38114..2db2112aa31ed5506b4aef572d8e8c70477a8aa7 100644 (file)
@@ -1512,7 +1512,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        struct page *page;
        pgtable_t pgtable;
        pmd_t _pmd;
-       bool young, write, dirty;
+       bool young, write, dirty, soft_dirty;
        unsigned long addr;
        int i;
 
@@ -1546,6 +1546,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        write = pmd_write(*pmd);
        young = pmd_young(*pmd);
        dirty = pmd_dirty(*pmd);
+       soft_dirty = pmd_soft_dirty(*pmd);
 
        pmdp_huge_split_prepare(vma, haddr, pmd);
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
@@ -1562,6 +1563,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                        swp_entry_t swp_entry;
                        swp_entry = make_migration_entry(page + i, write);
                        entry = swp_entry_to_pte(swp_entry);
+                       if (soft_dirty)
+                               entry = pte_swp_mksoft_dirty(entry);
                } else {
                        entry = mk_pte(page + i, vma->vm_page_prot);
                        entry = maybe_mkwrite(entry, vma);
@@ -1569,6 +1572,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                                entry = pte_wrprotect(entry);
                        if (!young)
                                entry = pte_mkold(entry);
+                       if (soft_dirty)
+                               entry = pte_mksoft_dirty(entry);
                }
                if (dirty)
                        SetPageDirty(page + i);
index 2ff0289ad061322298b472edba9eebb9abb302a7..9a6a51a7c416074ccd5d974ac9f17a54c85d14db 100644 (file)
@@ -4082,24 +4082,6 @@ static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
        atomic_add(n, &memcg->id.ref);
 }
 
-static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
-{
-       while (!atomic_inc_not_zero(&memcg->id.ref)) {
-               /*
-                * The root cgroup cannot be destroyed, so it's refcount must
-                * always be >= 1.
-                */
-               if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
-                       VM_BUG_ON(1);
-                       break;
-               }
-               memcg = parent_mem_cgroup(memcg);
-               if (!memcg)
-                       memcg = root_mem_cgroup;
-       }
-       return memcg;
-}
-
 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
 {
        if (atomic_sub_and_test(n, &memcg->id.ref)) {
@@ -5821,6 +5803,24 @@ static int __init mem_cgroup_init(void)
 subsys_initcall(mem_cgroup_init);
 
 #ifdef CONFIG_MEMCG_SWAP
+static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
+{
+       while (!atomic_inc_not_zero(&memcg->id.ref)) {
+               /*
+                * The root cgroup cannot be destroyed, so it's refcount must
+                * always be >= 1.
+                */
+               if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
+                       VM_BUG_ON(1);
+                       break;
+               }
+               memcg = parent_mem_cgroup(memcg);
+               if (!memcg)
+                       memcg = root_mem_cgroup;
+       }
+       return memcg;
+}
+
 /**
  * mem_cgroup_swapout - transfer a memsw charge to swap
  * @page: page whose memsw charge to transfer
index 65ec288dc057e874a661fd372501454c32cc2bd4..c8a955b1297e0b60fb2efffdf3fc2bb71cfc4630 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dax.h>
 #include <linux/gfp.h>
 #include <linux/export.h>
 #include <linux/blkdev.h>
@@ -544,6 +545,14 @@ do_readahead(struct address_space *mapping, struct file *filp,
        if (!mapping || !mapping->a_ops)
                return -EINVAL;
 
+       /*
+        * Readahead doesn't make sense for DAX inodes, but we don't want it
+        * to report a failure either.  Instead, we just return success and
+        * don't do any work.
+        */
+       if (dax_mapping(mapping))
+               return 0;
+
        return force_page_cache_readahead(mapping, filp, index, nr);
 }
 
index 8ebae91a6b5519cebf28c623527f6429904820b1..a3cc3052f830a01d414c7ff90c375988561863e1 100644 (file)
@@ -83,7 +83,7 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
        unsigned long check_high = check_low + n;
 
        /* Does not overlap if entirely above or entirely below. */
-       if (check_low >= high || check_high < low)
+       if (check_low >= high || check_high <= low)
                return false;
 
        return true;
@@ -124,7 +124,7 @@ static inline const char *check_kernel_text_object(const void *ptr,
 static inline const char *check_bogus_address(const void *ptr, unsigned long n)
 {
        /* Reject if object wraps past end of memory. */
-       if (ptr + n < ptr)
+       if ((unsigned long)ptr + n < (unsigned long)ptr)
                return "<wrapped address>";
 
        /* Reject if NULL or ZERO-allocation. */
index 49a00d54b835f156745c27bfb12b1f64bcf9140c..aed4511f0304e4922ff6770a3d573777b4f235ed 100755 (executable)
@@ -2136,9 +2136,11 @@ sub vcs_file_exists {
 
     my $cmd = $VCS_cmds{"file_exists_cmd"};
     $cmd =~ s/(\$\w+)/$1/eeg;          # interpolate $cmd
-
+    $cmd .= " 2>&1";
     $exists = &{$VCS_cmds{"execute_cmd"}}($cmd);
 
+    return 0 if ($? != 0);
+
     return $exists;
 }
 
index 574b1b48996f1d9199bd1349ca08b0f108199762..7100f05e651a0da2027a5ebb92b558af11662925 100644 (file)
@@ -4828,7 +4828,7 @@ enum {
        ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC292_FIXUP_TPT440_DOCK,
        ALC292_FIXUP_TPT440,
-       ALC283_FIXUP_BXBT2807_MIC,
+       ALC283_FIXUP_HEADSET_MIC,
        ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
        ALC282_FIXUP_ASPIRE_V5_PINS,
        ALC280_FIXUP_HP_GPIO4,
@@ -5321,7 +5321,7 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC292_FIXUP_TPT440_DOCK,
        },
-       [ALC283_FIXUP_BXBT2807_MIC] = {
+       [ALC283_FIXUP_HEADSET_MIC] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
                        { 0x19, 0x04a110f0 },
@@ -5651,7 +5651,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
-       SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
+       SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
index 54c09acd3fed06f4fba7ef602d0bd7466825c100..16e459aedffe4c3a4af7733b9ea6cf22087c7fa2 100644 (file)
@@ -299,8 +299,9 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
        clk_enable(ssc_p->ssc->clk);
        ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk);
 
-       /* Reset the SSC to keep it at a clean status */
-       ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
+       /* Reset the SSC unless initialized to keep it in a clean state */
+       if (!ssc_p->initialized)
+               ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                dir = 0;
index e5527bc570ae550394807f8062092af3e8f0a6e1..bcf1834c564812a6af86d547eb880cd93e83214e 100644 (file)
@@ -1247,8 +1247,8 @@ static int da7213_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
                return -EINVAL;
        }
 
-       /* By default only 32 BCLK per WCLK is supported */
-       dai_clk_mode |= DA7213_DAI_BCLKS_PER_WCLK_32;
+       /* By default only 64 BCLK per WCLK is supported */
+       dai_clk_mode |= DA7213_DAI_BCLKS_PER_WCLK_64;
 
        snd_soc_write(codec, DA7213_DAI_CLK_MODE, dai_clk_mode);
        snd_soc_update_bits(codec, DA7213_DAI_CTRL, DA7213_DAI_FORMAT_MASK,
index cf0a39bb631aac9ae9bccd6c0b5163e65d8178b1..02352ed8961c47ccb270ef9ff8f7aff3784bd4ca 100644 (file)
@@ -412,6 +412,7 @@ static int max98371_i2c_remove(struct i2c_client *client)
 
 static const struct i2c_device_id max98371_i2c_id[] = {
        { "max98371", 0 },
+       { }
 };
 
 MODULE_DEVICE_TABLE(i2c, max98371_i2c_id);
index 5c9707ac4bbff20e1240d9961ea28bcc8d5af82f..2e59a85e360b67cb89ffed8fca86148db8d0a6c1 100644 (file)
@@ -212,31 +212,6 @@ static const unsigned short logtable[256] = {
        0xfa2f, 0xfaea, 0xfba5, 0xfc60, 0xfd1a, 0xfdd4, 0xfe8e, 0xff47
 };
 
-static struct snd_soc_dai *nau8825_get_codec_dai(struct nau8825 *nau8825)
-{
-       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(nau8825->dapm);
-       struct snd_soc_component *component = &codec->component;
-       struct snd_soc_dai *codec_dai, *_dai;
-
-       list_for_each_entry_safe(codec_dai, _dai, &component->dai_list, list) {
-               if (!strncmp(codec_dai->name, NUVOTON_CODEC_DAI,
-                       strlen(NUVOTON_CODEC_DAI)))
-                       return codec_dai;
-       }
-       return NULL;
-}
-
-static bool nau8825_dai_is_active(struct nau8825 *nau8825)
-{
-       struct snd_soc_dai *codec_dai = nau8825_get_codec_dai(nau8825);
-
-       if (codec_dai) {
-               if (codec_dai->playback_active || codec_dai->capture_active)
-                       return true;
-       }
-       return false;
-}
-
 /**
  * nau8825_sema_acquire - acquire the semaphore of nau88l25
  * @nau8825:  component to register the codec private data with
@@ -250,19 +225,26 @@ static bool nau8825_dai_is_active(struct nau8825 *nau8825)
  * Acquires the semaphore without jiffies. If no more tasks are allowed
  * to acquire the semaphore, calling this function will put the task to
  * sleep until the semaphore is released.
- * It returns if the semaphore was acquired.
+ * If the semaphore is not released within the specified number of jiffies,
+ * this function returns -ETIME.
+ * If the sleep is interrupted by a signal, this function will return -EINTR.
+ * It returns 0 if the semaphore was acquired successfully.
  */
-static void nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
+static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
 {
        int ret;
 
-       if (timeout)
+       if (timeout) {
                ret = down_timeout(&nau8825->xtalk_sem, timeout);
-       else
+               if (ret < 0)
+                       dev_warn(nau8825->dev, "Acquire semaphone timeout\n");
+       } else {
                ret = down_interruptible(&nau8825->xtalk_sem);
+               if (ret < 0)
+                       dev_warn(nau8825->dev, "Acquire semaphone fail\n");
+       }
 
-       if (ret < 0)
-               dev_warn(nau8825->dev, "Acquire semaphone fail\n");
+       return ret;
 }
 
 /**
@@ -1205,6 +1187,8 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
        struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
        unsigned int val_len = 0;
 
+       nau8825_sema_acquire(nau8825, 2 * HZ);
+
        switch (params_width(params)) {
        case 16:
                val_len |= NAU8825_I2S_DL_16;
@@ -1225,6 +1209,9 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
        regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL1,
                NAU8825_I2S_DL_MASK, val_len);
 
+       /* Release the semaphone. */
+       nau8825_sema_release(nau8825);
+
        return 0;
 }
 
@@ -1234,6 +1221,8 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
        unsigned int ctrl1_val = 0, ctrl2_val = 0;
 
+       nau8825_sema_acquire(nau8825, 2 * HZ);
+
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
        case SND_SOC_DAIFMT_CBM_CFM:
                ctrl2_val |= NAU8825_I2S_MS_MASTER;
@@ -1282,6 +1271,9 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
                NAU8825_I2S_MS_MASK, ctrl2_val);
 
+       /* Release the semaphone. */
+       nau8825_sema_release(nau8825);
+
        return 0;
 }
 
@@ -1611,8 +1603,11 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
                                         * cess and restore changes if process
                                         * is ongoing when ejection.
                                         */
+                                       int ret;
                                        nau8825->xtalk_protect = true;
-                                       nau8825_sema_acquire(nau8825, 0);
+                                       ret = nau8825_sema_acquire(nau8825, 0);
+                                       if (ret < 0)
+                                               nau8825->xtalk_protect = false;
                                }
                                /* Startup cross talk detection process */
                                nau8825->xtalk_state = NAU8825_XTALK_PREPARE;
@@ -2238,23 +2233,14 @@ static int __maybe_unused nau8825_suspend(struct snd_soc_codec *codec)
 static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec)
 {
        struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+       int ret;
 
        regcache_cache_only(nau8825->regmap, false);
        regcache_sync(nau8825->regmap);
-       if (nau8825_is_jack_inserted(nau8825->regmap)) {
-               /* If the jack is inserted, we need to check whether the play-
-                * back is active before suspend. If active, the driver has to
-                * raise the protection for cross talk function to avoid the
-                * playback recovers before cross talk process finish. Other-
-                * wise, the playback will be interfered by cross talk func-
-                * tion. It is better to apply hardware related parameters
-                * before starting playback or record.
-                */
-               if (nau8825_dai_is_active(nau8825)) {
-                       nau8825->xtalk_protect = true;
-                       nau8825_sema_acquire(nau8825, 0);
-               }
-       }
+       nau8825->xtalk_protect = true;
+       ret = nau8825_sema_acquire(nau8825, 0);
+       if (ret < 0)
+               nau8825->xtalk_protect = false;
        enable_irq(nau8825->irq);
 
        return 0;
index a67ea10f41a1adce51e3625986e1170904429837..f2664396be6fd752c3fa62f398a6e2e1857eef91 100644 (file)
@@ -581,7 +581,7 @@ static int wm2000_anc_transition(struct wm2000_priv *wm2000,
        if (anc_transitions[i].dest == ANC_OFF)
                clk_disable_unprepare(wm2000->mclk);
 
-       return ret;
+       return 0;
 }
 
 static int wm2000_anc_set_mode(struct wm2000_priv *wm2000)
index 45602ca8536ea7935cf0a019eb399d2dd432cb5f..2d53c8d70705b2688a748967bbd0517335138fe2 100644 (file)
@@ -1,5 +1,5 @@
-obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) := simple-card-utils.o
-
+snd-soc-simple-card-utils-objs := simple-card-utils.o
 snd-soc-simple-card-objs       := simple-card.o
 
-obj-$(CONFIG_SND_SIMPLE_CARD)  += snd-soc-simple-card.o
+obj-$(CONFIG_SND_SIMPLE_CARD_UTILS)    += snd-soc-simple-card-utils.o
+obj-$(CONFIG_SND_SIMPLE_CARD)          += snd-soc-simple-card.o
index d89a9a1b2471554042df5d372b8220f7a54a7cf2..9599de69a880eb2a59ecb3fe142eb84a7ddacbcc 100644 (file)
@@ -7,6 +7,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/module.h>
 #include <linux/of.h>
 #include <sound/simple_card_utils.h>
 
@@ -95,3 +96,8 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
        return 0;
 }
 EXPORT_SYMBOL_GPL(asoc_simple_card_parse_card_name);
+
+/* Module information */
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_DESCRIPTION("ALSA SoC Simple Card Utils");
+MODULE_LICENSE("GPL v2");
index 25fcb796bd86414c49a08df1af445b1b4127a7a2..ddcb52a5185481057fc40b406bf3f09077b51984 100644 (file)
@@ -123,6 +123,11 @@ int snd_skl_get_module_info(struct skl_sst *ctx, u8 *uuid,
 
        uuid_mod = (uuid_le *)uuid;
 
+       if (list_empty(&ctx->uuid_list)) {
+               dev_err(ctx->dev, "Module list is empty\n");
+               return -EINVAL;
+       }
+
        list_for_each_entry(module, &ctx->uuid_list, list) {
                if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
                        dfw_config->module_id = module->id;
index cd59536a761dd84bcca2fb3afb44dc82adf1b417..e3e7641677657ecffb828ec94c865d0e80201dbf 100644 (file)
@@ -672,8 +672,10 @@ static int skl_probe(struct pci_dev *pci,
 
        skl->nhlt = skl_nhlt_init(bus->dev);
 
-       if (skl->nhlt == NULL)
+       if (skl->nhlt == NULL) {
+               err = -ENODEV;
                goto out_free;
+       }
 
        skl_nhlt_update_topology_bin(skl);
 
index 0843a68f277c2cffb0bdffa9a5a200b86544950f..f61b3b58083b9c7cf6d8848de3318b00902ce61b 100644 (file)
 struct abe_twl6040 {
        int     jack_detection; /* board can detect jack events */
        int     mclk_freq;      /* MCLK frequency speed for twl6040 */
-
-       struct platform_device *dmic_codec_dev;
 };
 
+struct platform_device *dmic_codec_dev;
+
 static int omap_abe_hw_params(struct snd_pcm_substream *substream,
        struct snd_pcm_hw_params *params)
 {
@@ -258,8 +258,6 @@ static int omap_abe_probe(struct platform_device *pdev)
        if (priv == NULL)
                return -ENOMEM;
 
-       priv->dmic_codec_dev = ERR_PTR(-EINVAL);
-
        if (snd_soc_of_parse_card_name(card, "ti,model")) {
                dev_err(&pdev->dev, "Card name is not provided\n");
                return -ENODEV;
@@ -284,13 +282,6 @@ static int omap_abe_probe(struct platform_device *pdev)
                num_links = 2;
                abe_twl6040_dai_links[1].cpu_of_node = dai_node;
                abe_twl6040_dai_links[1].platform_of_node = dai_node;
-
-               priv->dmic_codec_dev = platform_device_register_simple(
-                                               "dmic-codec", -1, NULL, 0);
-               if (IS_ERR(priv->dmic_codec_dev)) {
-                       dev_err(&pdev->dev, "Can't instantiate dmic-codec\n");
-                       return PTR_ERR(priv->dmic_codec_dev);
-               }
        } else {
                num_links = 1;
        }
@@ -299,16 +290,14 @@ static int omap_abe_probe(struct platform_device *pdev)
        of_property_read_u32(node, "ti,mclk-freq", &priv->mclk_freq);
        if (!priv->mclk_freq) {
                dev_err(&pdev->dev, "MCLK frequency not provided\n");
-               ret = -EINVAL;
-               goto err_unregister;
+               return -EINVAL;
        }
 
        card->fully_routed = 1;
 
        if (!priv->mclk_freq) {
                dev_err(&pdev->dev, "MCLK frequency missing\n");
-               ret = -ENODEV;
-               goto err_unregister;
+               return -ENODEV;
        }
 
        card->dai_link = abe_twl6040_dai_links;
@@ -317,17 +306,9 @@ static int omap_abe_probe(struct platform_device *pdev)
        snd_soc_card_set_drvdata(card, priv);
 
        ret = snd_soc_register_card(card);
-       if (ret) {
+       if (ret)
                dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
                        ret);
-               goto err_unregister;
-       }
-
-       return 0;
-
-err_unregister:
-       if (!IS_ERR(priv->dmic_codec_dev))
-               platform_device_unregister(priv->dmic_codec_dev);
 
        return ret;
 }
@@ -335,13 +316,9 @@ static int omap_abe_probe(struct platform_device *pdev)
 static int omap_abe_remove(struct platform_device *pdev)
 {
        struct snd_soc_card *card = platform_get_drvdata(pdev);
-       struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
 
        snd_soc_unregister_card(card);
 
-       if (!IS_ERR(priv->dmic_codec_dev))
-               platform_device_unregister(priv->dmic_codec_dev);
-
        return 0;
 }
 
@@ -361,7 +338,33 @@ static struct platform_driver omap_abe_driver = {
        .remove = omap_abe_remove,
 };
 
-module_platform_driver(omap_abe_driver);
+static int __init omap_abe_init(void)
+{
+       int ret;
+
+       dmic_codec_dev = platform_device_register_simple("dmic-codec", -1, NULL,
+                                                        0);
+       if (IS_ERR(dmic_codec_dev)) {
+               pr_err("%s: dmic-codec device registration failed\n", __func__);
+               return PTR_ERR(dmic_codec_dev);
+       }
+
+       ret = platform_driver_register(&omap_abe_driver);
+       if (ret) {
+               pr_err("%s: platform driver registration failed\n", __func__);
+               platform_device_unregister(dmic_codec_dev);
+       }
+
+       return ret;
+}
+module_init(omap_abe_init);
+
+static void __exit omap_abe_exit(void)
+{
+       platform_driver_unregister(&omap_abe_driver);
+       platform_device_unregister(dmic_codec_dev);
+}
+module_exit(omap_abe_exit);
 
 MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
 MODULE_DESCRIPTION("ALSA SoC for OMAP boards with ABE and twl6040 codec");
index e7cdc51fd806587a9ec1f8b4c8103bbffb5858d8..64609c77a79d1b875ac577801beadd5029a6fcac 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/irq.h>
-#include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
 #include <linux/of_device.h>
@@ -55,7 +54,6 @@ struct omap_mcpdm {
        unsigned long phys_base;
        void __iomem *io_base;
        int irq;
-       struct clk *pdmclk;
 
        struct mutex mutex;
 
@@ -390,15 +388,14 @@ static int omap_mcpdm_probe(struct snd_soc_dai *dai)
        struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
        int ret;
 
-       clk_prepare_enable(mcpdm->pdmclk);
        pm_runtime_enable(mcpdm->dev);
 
        /* Disable lines while request is ongoing */
        pm_runtime_get_sync(mcpdm->dev);
        omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, 0x00);
 
-       ret = devm_request_irq(mcpdm->dev, mcpdm->irq, omap_mcpdm_irq_handler,
-                               0, "McPDM", (void *)mcpdm);
+       ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler, 0, "McPDM",
+                         (void *)mcpdm);
 
        pm_runtime_put_sync(mcpdm->dev);
 
@@ -423,9 +420,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
 {
        struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
 
+       free_irq(mcpdm->irq, (void *)mcpdm);
        pm_runtime_disable(mcpdm->dev);
 
-       clk_disable_unprepare(mcpdm->pdmclk);
        return 0;
 }
 
@@ -445,8 +442,6 @@ static int omap_mcpdm_suspend(struct snd_soc_dai *dai)
                mcpdm->pm_active_count++;
        }
 
-       clk_disable_unprepare(mcpdm->pdmclk);
-
        return 0;
 }
 
@@ -454,8 +449,6 @@ static int omap_mcpdm_resume(struct snd_soc_dai *dai)
 {
        struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
 
-       clk_prepare_enable(mcpdm->pdmclk);
-
        if (mcpdm->pm_active_count) {
                while (mcpdm->pm_active_count--)
                        pm_runtime_get_sync(mcpdm->dev);
@@ -549,15 +542,6 @@ static int asoc_mcpdm_probe(struct platform_device *pdev)
 
        mcpdm->dev = &pdev->dev;
 
-       mcpdm->pdmclk = devm_clk_get(&pdev->dev, "pdmclk");
-       if (IS_ERR(mcpdm->pdmclk)) {
-               if (PTR_ERR(mcpdm->pdmclk) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
-               dev_warn(&pdev->dev, "Error getting pdmclk (%ld)!\n",
-                        PTR_ERR(mcpdm->pdmclk));
-               mcpdm->pdmclk = NULL;
-       }
-
        ret =  devm_snd_soc_register_component(&pdev->dev,
                                               &omap_mcpdm_component,
                                               &omap_mcpdm_dai, 1);
index 50849e137fc0edf2455a7d643b17c936f2e63794..92e88bca386e8199d4316898d27195e64251a626 100644 (file)
@@ -58,10 +58,12 @@ static struct platform_device *s3c24xx_uda134x_snd_device;
 
 static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
 {
-       int ret = 0;
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 #ifdef ENFORCE_RATES
        struct snd_pcm_runtime *runtime = substream->runtime;
 #endif
+       int ret = 0;
 
        mutex_lock(&clk_lock);
        pr_debug("%s %d\n", __func__, clk_users);
@@ -71,8 +73,7 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
                        printk(KERN_ERR "%s cannot get xtal\n", __func__);
                        ret = PTR_ERR(xtal);
                } else {
-                       pclk = clk_get(&s3c24xx_uda134x_snd_device->dev,
-                                      "pclk");
+                       pclk = clk_get(cpu_dai->dev, "iis");
                        if (IS_ERR(pclk)) {
                                printk(KERN_ERR "%s cannot get pclk\n",
                                       __func__);
index e39f916d0f2fa811b1faece1d212b0f8fe1fc5d8..969a5169de255a2fe66202ffc89808c89bfe98ec 100644 (file)
@@ -226,8 +226,12 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
        ifscr = 0;
        fsrate = 0;
        if (fin != fout) {
+               u64 n;
+
                ifscr = 1;
-               fsrate = 0x0400000 / fout * fin;
+               n = (u64)0x0400000 * fin;
+               do_div(n, fout);
+               fsrate = n;
        }
 
        /*
index d2df46c14c6823599c6210b3a31c9b1aa60ce72e..bf7b52fce597422cc2a2c171b9d6ac97c4e0c1e2 100644 (file)
@@ -121,7 +121,7 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
 
                dpcm_be_disconnect(fe, stream);
                fe->dpcm[stream].runtime = NULL;
-               goto fe_err;
+               goto path_err;
        }
 
        dpcm_clear_pending_state(fe, stream);
@@ -136,6 +136,8 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
 
        return 0;
 
+path_err:
+       dpcm_path_put(&list);
 fe_err:
        if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
                fe->dai_link->compr_ops->shutdown(cstream);
index 16369cad480388c326f2463273e17ebef0324924..4afa8dba5e982f664b1a214226688c50dfee1212 100644 (file)
@@ -1056,7 +1056,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
        if (!rtd->platform) {
                dev_err(card->dev, "ASoC: platform %s not registered\n",
                        dai_link->platform_name);
-               return -EPROBE_DEFER;
+               goto _err_defer;
        }
 
        soc_add_pcm_runtime(card, rtd);
@@ -2083,14 +2083,13 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
        /* remove auxiliary devices */
        soc_remove_aux_devices(card);
 
+       snd_soc_dapm_free(&card->dapm);
        soc_cleanup_card_debugfs(card);
 
        /* remove the card */
        if (card->remove)
                card->remove(card);
 
-       snd_soc_dapm_free(&card->dapm);
-
        snd_card_free(card->snd_card);
        return 0;
 
index 8698c26773b3c8d43e52a5610155348d6261a2e5..d908ff8f97554cacdf6658288222744209287aee 100644 (file)
@@ -3493,6 +3493,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
        const struct snd_soc_pcm_stream *config = w->params + w->params_select;
        struct snd_pcm_substream substream;
        struct snd_pcm_hw_params *params = NULL;
+       struct snd_pcm_runtime *runtime = NULL;
        u64 fmt;
        int ret;
 
@@ -3541,6 +3542,14 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
 
        memset(&substream, 0, sizeof(substream));
 
+       /* Allocate a dummy snd_pcm_runtime for startup() and other ops() */
+       runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
+       if (!runtime) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       substream.runtime = runtime;
+
        switch (event) {
        case SND_SOC_DAPM_PRE_PMU:
                substream.stream = SNDRV_PCM_STREAM_CAPTURE;
@@ -3606,6 +3615,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
        }
 
 out:
+       kfree(runtime);
        kfree(params);
        return ret;
 }
index 204cc074adb96f8f99ebd9d69bd7e1e4cf91f98e..41aa3355e920499830aeefdaf961659dfcabdba3 100644 (file)
@@ -55,7 +55,6 @@ static int snd_line6_impulse_volume_put(struct snd_kcontrol *kcontrol,
                err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE);
                if (err < 0) {
                        line6pcm->impulse_volume = 0;
-                       line6_pcm_release(line6pcm, LINE6_STREAM_IMPULSE);
                        return err;
                }
        } else {
@@ -211,7 +210,9 @@ static void line6_stream_stop(struct snd_line6_pcm *line6pcm, int direction,
        spin_lock_irqsave(&pstr->lock, flags);
        clear_bit(type, &pstr->running);
        if (!pstr->running) {
+               spin_unlock_irqrestore(&pstr->lock, flags);
                line6_unlink_audio_urbs(line6pcm, pstr);
+               spin_lock_irqsave(&pstr->lock, flags);
                if (direction == SNDRV_PCM_STREAM_CAPTURE) {
                        line6pcm->prev_fbuf = NULL;
                        line6pcm->prev_fsize = 0;
index daf81d169a42043f265a48df58afcc5490d40f97..45dd34874f43a7f00fc481b25d7b5bf80ffb6c02 100644 (file)
@@ -244,8 +244,8 @@ static int pod_set_system_param_int(struct usb_line6_pod *pod, int value,
 static ssize_t serial_number_show(struct device *dev,
                                  struct device_attribute *attr, char *buf)
 {
-       struct usb_interface *interface = to_usb_interface(dev);
-       struct usb_line6_pod *pod = usb_get_intfdata(interface);
+       struct snd_card *card = dev_to_snd_card(dev);
+       struct usb_line6_pod *pod = card->private_data;
 
        return sprintf(buf, "%u\n", pod->serial_number);
 }
@@ -256,8 +256,8 @@ static ssize_t serial_number_show(struct device *dev,
 static ssize_t firmware_version_show(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
-       struct usb_interface *interface = to_usb_interface(dev);
-       struct usb_line6_pod *pod = usb_get_intfdata(interface);
+       struct snd_card *card = dev_to_snd_card(dev);
+       struct usb_line6_pod *pod = card->private_data;
 
        return sprintf(buf, "%d.%02d\n", pod->firmware_version / 100,
                       pod->firmware_version % 100);
@@ -269,8 +269,8 @@ static ssize_t firmware_version_show(struct device *dev,
 static ssize_t device_id_show(struct device *dev,
                              struct device_attribute *attr, char *buf)
 {
-       struct usb_interface *interface = to_usb_interface(dev);
-       struct usb_line6_pod *pod = usb_get_intfdata(interface);
+       struct snd_card *card = dev_to_snd_card(dev);
+       struct usb_line6_pod *pod = card->private_data;
 
        return sprintf(buf, "%d\n", pod->device_id);
 }
index 448ed96b3b4f88edde1bdcad5d7750edf73099e6..1c14c25951583611167fe0d48ab00479d108232a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * gpio-hammer - example swiss army knife to shake GPIO lines on a system
+ * gpio-event-mon - monitor GPIO line events from userspace
  *
  * Copyright (C) 2016 Linus Walleij
  *
index b968794773116527208be6eb4b7dfccff00edd2f..f436d2420a18575095e66b7bfbf15b0abbe83fe8 100644 (file)
@@ -8,7 +8,11 @@ void *memdup(const void *src, size_t len);
 
 int strtobool(const char *s, bool *res);
 
-#ifdef __GLIBC__
+/*
+ * glibc based builds needs the extern while uClibc doesn't.
+ * However uClibc headers also define __GLIBC__ hence the hack below
+ */
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
 extern size_t strlcpy(char *dest, const char *src, size_t size);
 #endif
 
index d9b80ef881cde43a1e134b89c6a752f2ae06d108..21fd573106edd90ee4c33b3a0a9b46e43ecd052b 100644 (file)
@@ -507,17 +507,17 @@ static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
        u8 op, result, type = (config >>  0) & 0xff;
        const char *err = "unknown-ext-hardware-cache-type";
 
-       if (type > PERF_COUNT_HW_CACHE_MAX)
+       if (type >= PERF_COUNT_HW_CACHE_MAX)
                goto out_err;
 
        op = (config >>  8) & 0xff;
        err = "unknown-ext-hardware-cache-op";
-       if (op > PERF_COUNT_HW_CACHE_OP_MAX)
+       if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
                goto out_err;
 
        result = (config >> 16) & 0xff;
        err = "unknown-ext-hardware-cache-result";
-       if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
+       if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
                goto out_err;
 
        err = "invalid-cache";
index cf5e250bc78e1a4c82a08fc2ea50d5876bec1c69..783a53fb7a4ed41c2fef9e90de26355e6b1bcc7e 100644 (file)
@@ -66,7 +66,7 @@ static int entry(u64 ip, struct unwind_info *ui)
        if (__report_module(&al, ip, ui))
                return -1;
 
-       e->ip  = ip;
+       e->ip  = al.addr;
        e->map = al.map;
        e->sym = al.sym;
 
index 97c0f8fc55615961df9cd1a70f0c94125860d39a..20c2e5743903872771ae975b340b63aec521c3d3 100644 (file)
@@ -542,7 +542,7 @@ static int entry(u64 ip, struct thread *thread,
        thread__find_addr_location(thread, PERF_RECORD_MISC_USER,
                                   MAP__FUNCTION, ip, &al);
 
-       e.ip = ip;
+       e.ip = al.addr;
        e.map = al.map;
        e.sym = al.sym;
 
index 4fde8c7dfcfe18ce2a0d96ffb465d177ed590e86..77e6ccf149011b2f3b43ba01aa1454fd08889dc8 100644 (file)
@@ -33,6 +33,7 @@
 static struct timecounter *timecounter;
 static struct workqueue_struct *wqueue;
 static unsigned int host_vtimer_irq;
+static u32 host_vtimer_irq_flags;
 
 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
 {
@@ -365,7 +366,7 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
 
 static void kvm_timer_init_interrupt(void *info)
 {
-       enable_percpu_irq(host_vtimer_irq, 0);
+       enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
 }
 
 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
@@ -432,6 +433,14 @@ int kvm_timer_hyp_init(void)
        }
        host_vtimer_irq = info->virtual_irq;
 
+       host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
+       if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
+           host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
+               kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
+                       host_vtimer_irq);
+               host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
+       }
+
        err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
                                 "kvm guest timer", kvm_get_running_vcpus());
        if (err) {
index 07411cf967b98767bada3c6287fc1cc69c53a86a..4660a7d04eeaf9792b4df044a092054759977046 100644 (file)
@@ -51,7 +51,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
 
        irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
        if (!irq)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        INIT_LIST_HEAD(&irq->lpi_list);
        INIT_LIST_HEAD(&irq->ap_list);
@@ -441,39 +441,63 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
  * Find the target VCPU and the LPI number for a given devid/eventid pair
  * and make this IRQ pending, possibly injecting it.
  * Must be called with the its_lock mutex held.
+ * Returns 0 on success, a positive error value for any ITS mapping
+ * related errors and negative error values for generic errors.
  */
-static void vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
-                                u32 devid, u32 eventid)
+static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
+                               u32 devid, u32 eventid)
 {
+       struct kvm_vcpu *vcpu;
        struct its_itte *itte;
 
        if (!its->enabled)
-               return;
+               return -EBUSY;
 
        itte = find_itte(its, devid, eventid);
-       /* Triggering an unmapped IRQ gets silently dropped. */
-       if (itte && its_is_collection_mapped(itte->collection)) {
-               struct kvm_vcpu *vcpu;
-
-               vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
-               if (vcpu && vcpu->arch.vgic_cpu.lpis_enabled) {
-                       spin_lock(&itte->irq->irq_lock);
-                       itte->irq->pending = true;
-                       vgic_queue_irq_unlock(kvm, itte->irq);
-               }
-       }
+       if (!itte || !its_is_collection_mapped(itte->collection))
+               return E_ITS_INT_UNMAPPED_INTERRUPT;
+
+       vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
+       if (!vcpu)
+               return E_ITS_INT_UNMAPPED_INTERRUPT;
+
+       if (!vcpu->arch.vgic_cpu.lpis_enabled)
+               return -EBUSY;
+
+       spin_lock(&itte->irq->irq_lock);
+       itte->irq->pending = true;
+       vgic_queue_irq_unlock(kvm, itte->irq);
+
+       return 0;
+}
+
+static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
+{
+       struct vgic_io_device *iodev;
+
+       if (dev->ops != &kvm_io_gic_ops)
+               return NULL;
+
+       iodev = container_of(dev, struct vgic_io_device, dev);
+
+       if (iodev->iodev_type != IODEV_ITS)
+               return NULL;
+
+       return iodev;
 }
 
 /*
  * Queries the KVM IO bus framework to get the ITS pointer from the given
  * doorbell address.
  * We then call vgic_its_trigger_msi() with the decoded data.
+ * According to the KVM_SIGNAL_MSI API description returns 1 on success.
  */
 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
 {
        u64 address;
        struct kvm_io_device *kvm_io_dev;
        struct vgic_io_device *iodev;
+       int ret;
 
        if (!vgic_has_its(kvm))
                return -ENODEV;
@@ -485,15 +509,28 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
 
        kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
        if (!kvm_io_dev)
-               return -ENODEV;
+               return -EINVAL;
 
-       iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
+       iodev = vgic_get_its_iodev(kvm_io_dev);
+       if (!iodev)
+               return -EINVAL;
 
        mutex_lock(&iodev->its->its_lock);
-       vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
+       ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
        mutex_unlock(&iodev->its->its_lock);
 
-       return 0;
+       if (ret < 0)
+               return ret;
+
+       /*
+        * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
+        * if the guest has blocked the MSI. So we map any LPI mapping
+        * related error to that.
+        */
+       if (ret)
+               return 0;
+       else
+               return 1;
 }
 
 /* Requires the its_lock to be held. */
@@ -502,7 +539,8 @@ static void its_free_itte(struct kvm *kvm, struct its_itte *itte)
        list_del(&itte->itte_list);
 
        /* This put matches the get in vgic_add_lpi. */
-       vgic_put_irq(kvm, itte->irq);
+       if (itte->irq)
+               vgic_put_irq(kvm, itte->irq);
 
        kfree(itte);
 }
@@ -697,6 +735,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
        struct its_device *device;
        struct its_collection *collection, *new_coll = NULL;
        int lpi_nr;
+       struct vgic_irq *irq;
 
        device = find_its_device(its, device_id);
        if (!device)
@@ -710,6 +749,10 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
            lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
                return E_ITS_MAPTI_PHYSICALID_OOR;
 
+       /* If there is an existing mapping, behavior is UNPREDICTABLE. */
+       if (find_itte(its, device_id, event_id))
+               return 0;
+
        collection = find_collection(its, coll_id);
        if (!collection) {
                int ret = vgic_its_alloc_collection(its, &collection, coll_id);
@@ -718,22 +761,28 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
                new_coll = collection;
        }
 
-       itte = find_itte(its, device_id, event_id);
+       itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
        if (!itte) {
-               itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
-               if (!itte) {
-                       if (new_coll)
-                               vgic_its_free_collection(its, coll_id);
-                       return -ENOMEM;
-               }
-
-               itte->event_id  = event_id;
-               list_add_tail(&itte->itte_list, &device->itt_head);
+               if (new_coll)
+                       vgic_its_free_collection(its, coll_id);
+               return -ENOMEM;
        }
 
+       itte->event_id  = event_id;
+       list_add_tail(&itte->itte_list, &device->itt_head);
+
        itte->collection = collection;
        itte->lpi = lpi_nr;
-       itte->irq = vgic_add_lpi(kvm, lpi_nr);
+
+       irq = vgic_add_lpi(kvm, lpi_nr);
+       if (IS_ERR(irq)) {
+               if (new_coll)
+                       vgic_its_free_collection(its, coll_id);
+               its_free_itte(kvm, itte);
+               return PTR_ERR(irq);
+       }
+       itte->irq = irq;
+
        update_affinity_itte(kvm, itte);
 
        /*
@@ -981,9 +1030,7 @@ static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
        u32 msi_data = its_cmd_get_id(its_cmd);
        u64 msi_devid = its_cmd_get_deviceid(its_cmd);
 
-       vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
-
-       return 0;
+       return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
 }
 
 /*
@@ -1288,13 +1335,13 @@ void vgic_enable_lpis(struct kvm_vcpu *vcpu)
                its_sync_lpi_pending_table(vcpu);
 }
 
-static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its)
+static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its)
 {
        struct vgic_io_device *iodev = &its->iodev;
        int ret;
 
-       if (its->initialized)
-               return 0;
+       if (!its->initialized)
+               return -EBUSY;
 
        if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
                return -ENXIO;
@@ -1311,9 +1358,6 @@ static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its)
                                      KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
        mutex_unlock(&kvm->slots_lock);
 
-       if (!ret)
-               its->initialized = true;
-
        return ret;
 }
 
@@ -1435,9 +1479,6 @@ static int vgic_its_set_attr(struct kvm_device *dev,
                if (type != KVM_VGIC_ITS_ADDR_TYPE)
                        return -ENODEV;
 
-               if (its->initialized)
-                       return -EBUSY;
-
                if (copy_from_user(&addr, uaddr, sizeof(addr)))
                        return -EFAULT;
 
@@ -1453,7 +1494,9 @@ static int vgic_its_set_attr(struct kvm_device *dev,
        case KVM_DEV_ARM_VGIC_GRP_CTRL:
                switch (attr->attr) {
                case KVM_DEV_ARM_VGIC_CTRL_INIT:
-                       return vgic_its_init_its(dev->kvm, its);
+                       its->initialized = true;
+
+                       return 0;
                }
                break;
        }
@@ -1498,3 +1541,30 @@ int kvm_vgic_register_its_device(void)
        return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
                                       KVM_DEV_TYPE_ARM_VGIC_ITS);
 }
+
+/*
+ * Registers all ITSes with the kvm_io_bus framework.
+ * To follow the existing VGIC initialization sequence, this has to be
+ * done as late as possible, just before the first VCPU runs.
+ */
+int vgic_register_its_iodevs(struct kvm *kvm)
+{
+       struct kvm_device *dev;
+       int ret = 0;
+
+       list_for_each_entry(dev, &kvm->devices, vm_node) {
+               if (dev->ops != &kvm_arm_vgic_its_ops)
+                       continue;
+
+               ret = vgic_register_its_iodev(kvm, dev->private);
+               if (ret)
+                       return ret;
+               /*
+                * We don't need to care about tearing down previously
+                * registered ITSes, as the kvm_io_bus framework removes
+                * them for us if the VM gets destroyed.
+                */
+       }
+
+       return ret;
+}
index ff668e0dd586de477767b796e780597c6736eeca..90d81811fdda0e653e19fe92c640bede9053f617 100644 (file)
@@ -306,16 +306,19 @@ static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
 {
        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       u64 propbaser = dist->propbaser;
+       u64 old_propbaser, propbaser;
 
        /* Storing a value with LPIs already enabled is undefined */
        if (vgic_cpu->lpis_enabled)
                return;
 
-       propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
-       propbaser = vgic_sanitise_propbaser(propbaser);
-
-       dist->propbaser = propbaser;
+       do {
+               old_propbaser = dist->propbaser;
+               propbaser = old_propbaser;
+               propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
+               propbaser = vgic_sanitise_propbaser(propbaser);
+       } while (cmpxchg64(&dist->propbaser, old_propbaser,
+                          propbaser) != old_propbaser);
 }
 
 static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
@@ -331,16 +334,19 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
                                     unsigned long val)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-       u64 pendbaser = vgic_cpu->pendbaser;
+       u64 old_pendbaser, pendbaser;
 
        /* Storing a value with LPIs already enabled is undefined */
        if (vgic_cpu->lpis_enabled)
                return;
 
-       pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
-       pendbaser = vgic_sanitise_pendbaser(pendbaser);
-
-       vgic_cpu->pendbaser = pendbaser;
+       do {
+               old_pendbaser = vgic_cpu->pendbaser;
+               pendbaser = old_pendbaser;
+               pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
+               pendbaser = vgic_sanitise_pendbaser(pendbaser);
+       } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
+                          pendbaser) != old_pendbaser);
 }
 
 /*
index 0506543df38a7aeee6c24dbfc60b84d433e6c7d7..9f0dae397d9c818b1d0237076d0bc59020a05950 100644 (file)
@@ -289,6 +289,14 @@ int vgic_v3_map_resources(struct kvm *kvm)
                goto out;
        }
 
+       if (vgic_has_its(kvm)) {
+               ret = vgic_register_its_iodevs(kvm);
+               if (ret) {
+                       kvm_err("Unable to register VGIC ITS MMIO regions\n");
+                       goto out;
+               }
+       }
+
        dist->ready = true;
 
 out:
index e7aeac719e09175514a6a0c4dd349ac6500039a4..e83b7fe4baaed1e1c5bc5f7fd520995002186b61 100644 (file)
@@ -117,17 +117,17 @@ static void vgic_irq_release(struct kref *ref)
 
 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
 {
-       struct vgic_dist *dist;
+       struct vgic_dist *dist = &kvm->arch.vgic;
 
        if (irq->intid < VGIC_MIN_LPI)
                return;
 
-       if (!kref_put(&irq->refcount, vgic_irq_release))
+       spin_lock(&dist->lpi_list_lock);
+       if (!kref_put(&irq->refcount, vgic_irq_release)) {
+               spin_unlock(&dist->lpi_list_lock);
                return;
+       };
 
-       dist = &kvm->arch.vgic;
-
-       spin_lock(&dist->lpi_list_lock);
        list_del(&irq->lpi_list);
        dist->lpi_list_count--;
        spin_unlock(&dist->lpi_list_lock);
index 1d8e21d5c13f589d3e8389829f8e6722b0b0c0ee..6c4625c4636843c551a23d26b6b1d849e26b52cd 100644 (file)
@@ -84,6 +84,7 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu);
 int vgic_v3_probe(const struct gic_kvm_info *info);
 int vgic_v3_map_resources(struct kvm *kvm);
 int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
+int vgic_register_its_iodevs(struct kvm *kvm);
 bool vgic_has_its(struct kvm *kvm);
 int kvm_vgic_register_its_device(void);
 void vgic_enable_lpis(struct kvm_vcpu *vcpu);
@@ -140,6 +141,11 @@ static inline int vgic_register_redist_iodevs(struct kvm *kvm,
        return -ENODEV;
 }
 
+static inline int vgic_register_its_iodevs(struct kvm *kvm)
+{
+       return -ENODEV;
+}
+
 static inline bool vgic_has_its(struct kvm *kvm)
 {
        return false;