]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 16 Jul 2015 01:38:24 +0000 (18:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 16 Jul 2015 01:38:24 +0000 (18:38 -0700)
Pull TPM bugfixes from James Morris.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security:
  tpm, tpm_crb: fail when TPM2 ACPI table contents look corrupted
  tpm: Fix initialization of the cdev

79 files changed:
Documentation/kbuild/makefiles.txt
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/include/asm/bitops.h
arch/arc/include/asm/futex.h
arch/arc/include/asm/ptrace.h
arch/arc/kernel/intc-arcv2.c
arch/arc/kernel/intc-compact.c
arch/arc/kernel/mcip.c
arch/arc/kernel/setup.c
arch/arc/kernel/troubleshoot.c
arch/arc/mm/cache.c
arch/arc/mm/dma.c
arch/s390/include/asm/ctl_reg.h
arch/s390/include/asm/perf_event.h
arch/s390/kernel/nmi.c
arch/s390/kernel/process.c
arch/s390/kernel/sclp.S
arch/s390/oprofile/init.c
arch/x86/include/asm/kvm_host.h
arch/x86/include/uapi/asm/hyperv.h
arch/x86/kvm/cpuid.c
arch/x86/kvm/iommu.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
drivers/infiniband/core/agent.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/iwpm_msg.c
drivers/infiniband/core/iwpm_util.c
drivers/infiniband/core/iwpm_util.h
drivers/infiniband/core/mad.c
drivers/infiniband/core/multicast.c
drivers/infiniband/core/opa_smi.h
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/smi.c
drivers/infiniband/core/smi.h
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ucma.c
drivers/infiniband/hw/ehca/ehca_sqp.c
drivers/infiniband/hw/ipath/ipath_mad.c
drivers/infiniband/hw/ipath/ipath_verbs.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mthca/mthca_mad.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/qib/qib_mad.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_alias.c
drivers/s390/char/sclp_early.c
drivers/s390/crypto/zcrypt_api.c
drivers/scsi/scsi_transport_srp.c
fs/locks.c
fs/nfs/nfs4proc.c
include/linux/fs.h
include/linux/init.h
include/linux/kvm_host.h
include/linux/module.h
include/rdma/ib_verbs.h
include/scsi/scsi_transport_srp.h
kernel/trace/trace.h
kernel/trace/trace_branch.c
net/rds/ib_rdma.c
virt/kvm/vfio.c

index e63b446d973cd716097adb60295577fe558f3f96..13f888a02a3de5cb7ecfdb2ed03eb1d721601de9 100644 (file)
@@ -952,6 +952,14 @@ When kbuild executes, the following steps are followed (roughly):
        $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
        mode) if this option is supported by $(AR).
 
+    ARCH_CPPFLAGS, ARCH_AFLAGS, ARCH_CFLAGS   Overrides the kbuild defaults
+
+       These variables are appended to the KBUILD_CPPFLAGS,
+       KBUILD_AFLAGS, and KBUILD_CFLAGS, respectively, after the
+       top-level Makefile has set any other flags. This provides a
+       means for an architecture to override the defaults.
+
+
 --- 6.2 Add prerequisites to archheaders:
 
        The archheaders: rule is used to generate header files that
index 257ef5892ab7483c973e9fbcbd2d76e02af34e50..2f49d89eccfa2fc635024aa82ca9e2851a6c6c88 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -780,10 +780,11 @@ endif
 include scripts/Makefile.kasan
 include scripts/Makefile.extrawarn
 
-# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
-KBUILD_CPPFLAGS += $(KCPPFLAGS)
-KBUILD_AFLAGS += $(KAFLAGS)
-KBUILD_CFLAGS += $(KCFLAGS)
+# Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
+# last assignments
+KBUILD_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS)
+KBUILD_AFLAGS   += $(ARCH_AFLAGS)   $(KAFLAGS)
+KBUILD_CFLAGS   += $(ARCH_CFLAGS)   $(KCFLAGS)
 
 # Use --build-id when available.
 LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
index e7cee0a5c56dfa80222d8286a63342b10c07bc42..91cf4055acab0439e564a96056012befd5fb4c36 100644 (file)
@@ -115,6 +115,7 @@ if ISA_ARCOMPACT
 
 config ARC_CPU_750D
        bool "ARC750D"
+       select ARC_CANT_LLSC
        help
          Support for ARC750 core
 
@@ -362,7 +363,7 @@ config ARC_CANT_LLSC
 config ARC_HAS_LLSC
        bool "Insn: LLOCK/SCOND (efficient atomic ops)"
        default y
-       depends on !ARC_CPU_750D && !ARC_CANT_LLSC
+       depends on !ARC_CANT_LLSC
 
 config ARC_HAS_SWAPE
        bool "Insn: SWAPE (endian-swap)"
index 6107062c01115dbea8a56e02bce254a8ba5b91af..46d87310220dadaf96be4ff08c42b240d2eb4916 100644 (file)
@@ -49,7 +49,8 @@ endif
 
 ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
 # Generic build system uses -O2, we want -O3
-cflags-y  += -O3
+# Note: No need to add to cflags-y as that happens anyways
+ARCH_CFLAGS += -O3
 endif
 
 # small data is default for elf32 tool-chain. If not usable, disable it
index 15c8d6226c9d8508b54fbecc6547f5c0a0529e14..1cd5e82f5dc2c6f74cb36d40632998fd26195426 100644 (file)
@@ -12,7 +12,7 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <75000000>;
+       clock-frequency = <90000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index 199d42820eca784b4bd5955d3a7420bc53697774..2f0b33257db2e2ecf4749bee0d2f5ed3260dc3a3 100644 (file)
@@ -12,7 +12,7 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <75000000>;
+       clock-frequency = <90000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index 99fe118d3730bc050263e5be7dd3423ab659d46c..57c1f33844d44f1f9d16ed448ce0b13e87b88cea 100644 (file)
@@ -50,8 +50,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
         * done for const @nr, but no code is generated due to gcc      \
         * const prop.                                                  \
         */                                                             \
-       if (__builtin_constant_p(nr))                                   \
-               nr &= 0x1f;                                             \
+       nr &= 0x1f;                                                     \
                                                                        \
        __asm__ __volatile__(                                           \
        "1:     llock       %0, [%1]            \n"                     \
@@ -82,8 +81,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
                                                                        \
        m += nr >> 5;                                                   \
                                                                        \
-       if (__builtin_constant_p(nr))                                   \
-               nr &= 0x1f;                                             \
+       nr &= 0x1f;                                                     \
                                                                        \
        /*                                                              \
         * Explicit full memory barrier needed before/after as          \
@@ -129,16 +127,13 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
        unsigned long temp, flags;                                      \
        m += nr >> 5;                                                   \
                                                                        \
-       if (__builtin_constant_p(nr))                                   \
-               nr &= 0x1f;                                             \
-                                                                       \
        /*                                                              \
         * spin lock/unlock provide the needed smp_mb() before/after    \
         */                                                             \
        bitops_lock(flags);                                             \
                                                                        \
        temp = *m;                                                      \
-       *m = temp c_op (1UL << nr);                                     \
+       *m = temp c_op (1UL << (nr & 0x1f));                                    \
                                                                        \
        bitops_unlock(flags);                                           \
 }
@@ -149,17 +144,14 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
        unsigned long old, flags;                                       \
        m += nr >> 5;                                                   \
                                                                        \
-       if (__builtin_constant_p(nr))                                   \
-               nr &= 0x1f;                                             \
-                                                                       \
        bitops_lock(flags);                                             \
                                                                        \
        old = *m;                                                       \
-       *m = old c_op (1 << nr);                                        \
+       *m = old c_op (1UL << (nr & 0x1f));                             \
                                                                        \
        bitops_unlock(flags);                                           \
                                                                        \
-       return (old & (1 << nr)) != 0;                                  \
+       return (old & (1UL << (nr & 0x1f))) != 0;                       \
 }
 
 #endif /* CONFIG_ARC_HAS_LLSC */
@@ -174,11 +166,8 @@ static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)       \
        unsigned long temp;                                             \
        m += nr >> 5;                                                   \
                                                                        \
-       if (__builtin_constant_p(nr))                                   \
-               nr &= 0x1f;                                             \
-                                                                       \
        temp = *m;                                                      \
-       *m = temp c_op (1UL << nr);                                     \
+       *m = temp c_op (1UL << (nr & 0x1f));                            \
 }
 
 #define __TEST_N_BIT_OP(op, c_op, asm_op)                              \
@@ -187,13 +176,10 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long
        unsigned long old;                                              \
        m += nr >> 5;                                                   \
                                                                        \
-       if (__builtin_constant_p(nr))                                   \
-               nr &= 0x1f;                                             \
-                                                                       \
        old = *m;                                                       \
-       *m = old c_op (1 << nr);                                        \
+       *m = old c_op (1UL << (nr & 0x1f));                             \
                                                                        \
-       return (old & (1 << nr)) != 0;                                  \
+       return (old & (1UL << (nr & 0x1f))) != 0;                       \
 }
 
 #define BIT_OPS(op, c_op, asm_op)                                      \
@@ -224,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
 
        addr += nr >> 5;
 
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       mask = 1 << nr;
+       mask = 1UL << (nr & 0x1f);
 
        return ((mask & *addr) != 0);
 }
index 05b5aaf5b0f91e5580395e08ae778f5ddace5b3c..70cfe16b742d78f7e8016a41b11271e7d258f7a6 100644 (file)
 #include <linux/uaccess.h>
 #include <asm/errno.h>
 
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+                                                       \
+       __asm__ __volatile__(                           \
+       "1:     llock   %1, [%2]                \n"     \
+               insn                            "\n"    \
+       "2:     scond   %0, [%2]                \n"     \
+       "       bnz     1b                      \n"     \
+       "       mov %0, 0                       \n"     \
+       "3:                                     \n"     \
+       "       .section .fixup,\"ax\"          \n"     \
+       "       .align  4                       \n"     \
+       "4:     mov %0, %4                      \n"     \
+       "       b   3b                          \n"     \
+       "       .previous                       \n"     \
+       "       .section __ex_table,\"a\"       \n"     \
+       "       .align  4                       \n"     \
+       "       .word   1b, 4b                  \n"     \
+       "       .word   2b, 4b                  \n"     \
+       "       .previous                       \n"     \
+                                                       \
+       : "=&r" (ret), "=&r" (oldval)                   \
+       : "r" (uaddr), "r" (oparg), "ir" (-EFAULT)      \
+       : "cc", "memory")
+
+#else  /* !CONFIG_ARC_HAS_LLSC */
+
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
                                                        \
        __asm__ __volatile__(                           \
-       "1:     ld  %1, [%2]                    \n"     \
+       "1:     ld      %1, [%2]                \n"     \
                insn                            "\n"    \
-       "2:     st  %0, [%2]                    \n"     \
+       "2:     st      %0, [%2]                \n"     \
        "       mov %0, 0                       \n"     \
        "3:                                     \n"     \
        "       .section .fixup,\"ax\"          \n"     \
@@ -39,6 +67,8 @@
        : "r" (uaddr), "r" (oparg), "ir" (-EFAULT)      \
        : "cc", "memory")
 
+#endif
+
 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
@@ -123,11 +153,17 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
 
        pagefault_disable();
 
-       /* TBD : can use llock/scond */
        __asm__ __volatile__(
-       "1:     ld    %0, [%3]  \n"
-       "       brne  %0, %1, 3f        \n"
-       "2:     st    %2, [%3]  \n"
+#ifdef CONFIG_ARC_HAS_LLSC
+       "1:     llock   %0, [%3]                \n"
+       "       brne    %0, %1, 3f              \n"
+       "2:     scond   %2, [%3]                \n"
+       "       bnz     1b                      \n"
+#else
+       "1:     ld      %0, [%3]                \n"
+       "       brne    %0, %1, 3f              \n"
+       "2:     st      %2, [%3]                \n"
+#endif
        "3:     \n"
        "       .section .fixup,\"ax\"  \n"
        "4:     mov %0, %4      \n"
index 91755972b9a25222c37a36e6b76dfd758cdbe771..91694ec1ce959498fd5b4431962b03bbdf4119b7 100644 (file)
@@ -106,7 +106,7 @@ struct callee_regs {
        long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
 };
 
-#define instruction_pointer(regs)      ((regs)->ret)
+#define instruction_pointer(regs)      (unsigned long)((regs)->ret)
 #define profile_pc(regs)               instruction_pointer(regs)
 
 /* return 1 if user mode or 0 if kernel mode */
index 6208c630abed23a4fe2fd1306d5b31c6ec1d172e..26c15682747960d3b5b1c412f76dff3c2ebc7780 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/of.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip.h>
-#include "../../drivers/irqchip/irqchip.h"
 #include <asm/irq.h>
 
 /*
index fcdddb631766eab0c9a2d3dd3d9b3456c90dfdb4..039fac30b5c1f2fca837c6f9cc11de0e0c56c35d 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/of.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip.h>
-#include "../../drivers/irqchip/irqchip.h"
 #include <asm/irq.h>
 
 /*
index 30284e8de6ffc2af0844468798a5c1002465eb56..2fb86589054de6c0b051325463ebd03236c70299 100644 (file)
@@ -175,7 +175,6 @@ void mcip_init_early_smp(void)
 #include <linux/irqchip.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
-#include "../../drivers/irqchip/irqchip.h"
 
 /*
  * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
@@ -218,11 +217,28 @@ static void idu_irq_unmask(struct irq_data *data)
        raw_spin_unlock_irqrestore(&mcip_lock, flags);
 }
 
+#ifdef CONFIG_SMP
 static int
-idu_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool f)
+idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
+                    bool force)
 {
+       unsigned long flags;
+       cpumask_t online;
+
+       /* errout if no online cpu per @cpumask */
+       if (!cpumask_and(&online, cpumask, cpu_online_mask))
+               return -EINVAL;
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+
+       idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
+       idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
+
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+
        return IRQ_SET_MASK_OK;
 }
+#endif
 
 static struct irq_chip idu_irq_chip = {
        .name                   = "MCIP IDU Intc",
@@ -330,8 +346,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
                if (!i)
                        idu_first_irq = irq;
 
-               irq_set_handler_data(irq, domain);
-               irq_set_chained_handler(irq, idu_cascade_isr);
+               irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
        }
 
        __mcip_cmd(CMD_IDU_ENABLE, 0);
index a3d186211ed367bcf852718682a62f24b555f3de..18cc01591c96e64186a8b13c1aef5b8011091b12 100644 (file)
@@ -142,17 +142,22 @@ static void read_arc_build_cfg_regs(void)
 }
 
 static const struct cpuinfo_data arc_cpu_tbl[] = {
+#ifdef CONFIG_ISA_ARCOMPACT
        { {0x20, "ARC 600"      }, 0x2F},
        { {0x30, "ARC 700"      }, 0x33},
        { {0x34, "ARC 700 R4.10"}, 0x34},
        { {0x35, "ARC 700 R4.11"}, 0x35},
-       { {0x50, "ARC HS38"     }, 0x51},
+#else
+       { {0x50, "ARC HS38 R2.0"}, 0x51},
+       { {0x52, "ARC HS38 R2.1"}, 0x52},
+#endif
        { {0x00, NULL           } }
 };
 
-#define IS_AVAIL1(v, str)      ((v) ? str : "")
-#define IS_USED(cfg)           (IS_ENABLED(cfg) ? "" : "(not used) ")
-#define IS_AVAIL2(v, str, cfg)  IS_AVAIL1(v, str), IS_AVAIL1(v, IS_USED(cfg))
+#define IS_AVAIL1(v, s)                ((v) ? s : "")
+#define IS_USED_RUN(v)         ((v) ? "" : "(not used) ")
+#define IS_USED_CFG(cfg)       IS_USED_RUN(IS_ENABLED(cfg))
+#define IS_AVAIL2(v, s, cfg)   IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
 
 static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 {
@@ -226,7 +231,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                        n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
                }
                n += scnprintf(buf + n, len - n, "%s",
-                              IS_USED(CONFIG_ARC_HAS_HW_MPY));
+                              IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY));
        }
 
        n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
index 807f7d61d7a7cf867bca011251729d1164bd3f33..a6f91e88ce36e3ea2a2c95d8eabdceffa48be7fc 100644 (file)
@@ -58,7 +58,6 @@ static void show_callee_regs(struct callee_regs *cregs)
 
 static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
 {
-       struct path path;
        char *path_nm = NULL;
        struct mm_struct *mm;
        struct file *exe_file;
index b29d62ed4f7ece64acc41287ded40e3cfb3e186b..1cd6695b6ab50cbbb870ffa969027a50103dba29 100644 (file)
@@ -468,10 +468,18 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
 noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
 {
 #ifdef CONFIG_ISA_ARCV2
+       /*
+        * SLC is shared between all cores and concurrent aux operations from
+        * multiple cores need to be serialized using a spinlock
+        * A concurrent operation can be silently ignored and/or the old/new
+        * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
+        * below)
+        */
+       static DEFINE_SPINLOCK(lock);
        unsigned long flags;
        unsigned int ctrl;
 
-       local_irq_save(flags);
+       spin_lock_irqsave(&lock, flags);
 
        /*
         * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
@@ -504,7 +512,7 @@ noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
 
        while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
 
-       local_irq_restore(flags);
+       spin_unlock_irqrestore(&lock, flags);
 #endif
 }
 
index 74a637a1cfc48b2c5d4f0047a0b25bc51d238e16..57706a9c69489791df22419ec069c1970d1aa634 100644 (file)
@@ -60,8 +60,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
 
        /* This is kernel Virtual address (0x7000_0000 based) */
        kvaddr = ioremap_nocache((unsigned long)paddr, size);
-       if (kvaddr != NULL)
-               memset(kvaddr, 0, size);
+       if (kvaddr == NULL)
+               return NULL;
 
        /* This is bus address, platform dependent */
        *dma_handle = (dma_addr_t)paddr;
index cfad7fca01d61d72942d98a37d18e5e0675980bc..d7697ab802f6c94813a27394baa255fa26a93ddc 100644 (file)
@@ -57,7 +57,10 @@ union ctlreg0 {
                unsigned long lap  : 1; /* Low-address-protection control */
                unsigned long      : 4;
                unsigned long edat : 1; /* Enhanced-DAT-enablement control */
-               unsigned long      : 23;
+               unsigned long      : 4;
+               unsigned long afp  : 1; /* AFP-register control */
+               unsigned long vx   : 1; /* Vector enablement control */
+               unsigned long      : 17;
        };
 };
 
index 4cb19fe76dd98b13abfc02c9520e37460475e645..f897ec73dc8c9c02943a5c76254140bc951a6024 100644 (file)
@@ -87,7 +87,15 @@ struct sf_raw_sample {
 } __packed;
 
 /* Perf hardware reserve and release functions */
+#ifdef CONFIG_PERF_EVENTS
 int perf_reserve_sampling(void);
 void perf_release_sampling(void);
+#else /* CONFIG_PERF_EVENTS */
+static inline int perf_reserve_sampling(void)
+{
+       return 0;
+}
+static inline void perf_release_sampling(void) {}
+#endif /* CONFIG_PERF_EVENTS */
 
 #endif /* _ASM_S390_PERF_EVENT_H */
index 505c17c0ae1a67a93542c963ce800f7e8d9a11db..56b550893593a58a5ab07879a2fa3561cde009d4 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/nmi.h>
 #include <asm/crw.h>
 #include <asm/switch_to.h>
+#include <asm/ctl_reg.h>
 
 struct mcck_struct {
        int kill_task;
@@ -129,26 +130,30 @@ static int notrace s390_revalidate_registers(struct mci *mci)
        } else
                asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
 
-       asm volatile(
-               "       ld      0,0(%0)\n"
-               "       ld      1,8(%0)\n"
-               "       ld      2,16(%0)\n"
-               "       ld      3,24(%0)\n"
-               "       ld      4,32(%0)\n"
-               "       ld      5,40(%0)\n"
-               "       ld      6,48(%0)\n"
-               "       ld      7,56(%0)\n"
-               "       ld      8,64(%0)\n"
-               "       ld      9,72(%0)\n"
-               "       ld      10,80(%0)\n"
-               "       ld      11,88(%0)\n"
-               "       ld      12,96(%0)\n"
-               "       ld      13,104(%0)\n"
-               "       ld      14,112(%0)\n"
-               "       ld      15,120(%0)\n"
-               : : "a" (fpt_save_area));
-       /* Revalidate vector registers */
-       if (MACHINE_HAS_VX && current->thread.vxrs) {
+       if (!MACHINE_HAS_VX) {
+               /* Revalidate floating point registers */
+               asm volatile(
+                       "       ld      0,0(%0)\n"
+                       "       ld      1,8(%0)\n"
+                       "       ld      2,16(%0)\n"
+                       "       ld      3,24(%0)\n"
+                       "       ld      4,32(%0)\n"
+                       "       ld      5,40(%0)\n"
+                       "       ld      6,48(%0)\n"
+                       "       ld      7,56(%0)\n"
+                       "       ld      8,64(%0)\n"
+                       "       ld      9,72(%0)\n"
+                       "       ld      10,80(%0)\n"
+                       "       ld      11,88(%0)\n"
+                       "       ld      12,96(%0)\n"
+                       "       ld      13,104(%0)\n"
+                       "       ld      14,112(%0)\n"
+                       "       ld      15,120(%0)\n"
+                       : : "a" (fpt_save_area));
+       } else {
+               /* Revalidate vector registers */
+               union ctlreg0 cr0;
+
                if (!mci->vr) {
                        /*
                         * Vector registers can't be restored and therefore
@@ -156,8 +161,12 @@ static int notrace s390_revalidate_registers(struct mci *mci)
                         */
                        kill_task = 1;
                }
+               cr0.val = S390_lowcore.cregs_save_area[0];
+               cr0.afp = cr0.vx = 1;
+               __ctl_load(cr0.val, 0, 0);
                restore_vx_regs((__vector128 *)
-                               S390_lowcore.vector_save_area_addr);
+                               &S390_lowcore.vector_save_area);
+               __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
        }
        /* Revalidate access registers */
        asm volatile(
index dc5edc29b73aaf120bd462d93fd371c2803daf18..8f587d871b9f234bed189b9c568746930d1045f3 100644 (file)
@@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
 asmlinkage void execve_tail(void)
 {
        current->thread.fp_regs.fpc = 0;
-       asm volatile("sfpc %0,%0" : : "d" (0));
+       asm volatile("sfpc %0" : : "d" (0));
 }
 
 /*
index 43c3169ea49c7d019543d8e2754dd3061fd95789..ada0c07fe1a8744ecf989093720422cac1429e66 100644 (file)
@@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
        jno     .Lesa2
        ahi     %r15,-80
        stmh    %r6,%r15,96(%r15)               # store upper register halves
+       basr    %r13,0
+       lmh     %r0,%r15,.Lzeroes-.(%r13)       # clear upper register halves
 .Lesa2:
        lr      %r10,%r2                        # save string pointer
        lhi     %r2,0
@@ -291,6 +293,8 @@ ENTRY(_sclp_print_early)
 .Lesa3:
        lm      %r6,%r15,120(%r15)              # restore registers
        br      %r14
+.Lzeroes:
+       .fill   64,4,0
 
 .LwritedataS4:
        .long   0x00760005                      # SCLP command for write data
index bc927a09a172b6d6699cf60610cde600d4c73e74..9cfa2ffaa9d6bb02dd29cb01d895a0feaf63bdb0 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/fs.h>
 #include <linux/module.h>
 #include <asm/processor.h>
+#include <asm/perf_event.h>
 
 #include "../../../drivers/oprofile/oprof.h"
 
index 2a7f5d782c332d1965ac1c5a23a33289cfce7352..49ec9038ec14102a286c9b4bed126a6825613439 100644 (file)
@@ -604,6 +604,8 @@ struct kvm_arch {
        bool iommu_noncoherent;
 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
        atomic_t noncoherent_dma_count;
+#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
+       atomic_t assigned_device_count;
        struct kvm_pic *vpic;
        struct kvm_ioapic *vioapic;
        struct kvm_pit *vpit;
index 8fba544e9cc4164261f76c6c5d894b80a92f0377..f36d56bd76324543f6f0c208bea569ea96e6e802 100644 (file)
 #define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE          (1 << 4)
 /* Support for a virtual guest idle state is available */
 #define HV_X64_GUEST_IDLE_STATE_AVAILABLE              (1 << 5)
+/* Guest crash data handler available */
+#define HV_X64_GUEST_CRASH_MSR_AVAILABLE               (1 << 10)
 
 /*
  * Implementation recommendations. Indicates which behaviors the hypervisor
index 64dd467930997adbfa6aecd25641ef2004c5488c..2fbea2544f2437bc0ae50ef00288dc320effd81e 100644 (file)
@@ -98,6 +98,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
                best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
 
        vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
+       if (vcpu->arch.eager_fpu)
+               kvm_x86_ops->fpu_activate(vcpu);
 
        /*
         * The existing code assumes virtual address is 48-bit in the canonical
index 7dbced309ddb526d99ae41114bf3afd8488ccaa8..5c520ebf6343270272679e213b19cd9380b63293 100644 (file)
@@ -200,6 +200,7 @@ int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
                        goto out_unmap;
        }
 
+       kvm_arch_start_assignment(kvm);
        pci_set_dev_assigned(pdev);
 
        dev_info(&pdev->dev, "kvm assign device\n");
@@ -224,6 +225,7 @@ int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
        iommu_detach_device(domain, &pdev->dev);
 
        pci_clear_dev_assigned(pdev);
+       kvm_arch_end_assignment(kvm);
 
        dev_info(&pdev->dev, "kvm deassign device\n");
 
index f807496b62c2cc76e82a60cd58ee187f0cdc77c2..44171462bd2a31561645aff21c83584b82eed6ff 100644 (file)
@@ -2479,6 +2479,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
        return 0;
 }
 
+static bool kvm_is_mmio_pfn(pfn_t pfn)
+{
+       if (pfn_valid(pfn))
+               return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
+
+       return true;
+}
+
 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                    unsigned pte_access, int level,
                    gfn_t gfn, pfn_t pfn, bool speculative,
@@ -2506,7 +2514,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                spte |= PT_PAGE_SIZE_MASK;
        if (tdp_enabled)
                spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
-                       kvm_is_reserved_pfn(pfn));
+                       kvm_is_mmio_pfn(pfn));
 
        if (host_writable)
                spte |= SPTE_HOST_WRITEABLE;
index 602b974a60a626e18d11965ed5ad1461c329bee8..bbc678a66b18719287b091787db96cf1f9f98981 100644 (file)
@@ -865,6 +865,64 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
        set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
 }
 
+#define MTRR_TYPE_UC_MINUS     7
+#define MTRR2PROTVAL_INVALID 0xff
+
+static u8 mtrr2protval[8];
+
+static u8 fallback_mtrr_type(int mtrr)
+{
+       /*
+        * WT and WP aren't always available in the host PAT.  Treat
+        * them as UC and UC- respectively.  Everything else should be
+        * there.
+        */
+       switch (mtrr)
+       {
+       case MTRR_TYPE_WRTHROUGH:
+               return MTRR_TYPE_UNCACHABLE;
+       case MTRR_TYPE_WRPROT:
+               return MTRR_TYPE_UC_MINUS;
+       default:
+               BUG();
+       }
+}
+
+static void build_mtrr2protval(void)
+{
+       int i;
+       u64 pat;
+
+       for (i = 0; i < 8; i++)
+               mtrr2protval[i] = MTRR2PROTVAL_INVALID;
+
+       /* Ignore the invalid MTRR types.  */
+       mtrr2protval[2] = 0;
+       mtrr2protval[3] = 0;
+
+       /*
+        * Use host PAT value to figure out the mapping from guest MTRR
+        * values to nested page table PAT/PCD/PWT values.  We do not
+        * want to change the host PAT value every time we enter the
+        * guest.
+        */
+       rdmsrl(MSR_IA32_CR_PAT, pat);
+       for (i = 0; i < 8; i++) {
+               u8 mtrr = pat >> (8 * i);
+
+               if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
+                       mtrr2protval[mtrr] = __cm_idx2pte(i);
+       }
+
+       for (i = 0; i < 8; i++) {
+               if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
+                       u8 fallback = fallback_mtrr_type(i);
+                       mtrr2protval[i] = mtrr2protval[fallback];
+                       BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
+               }
+       }
+}
+
 static __init int svm_hardware_setup(void)
 {
        int cpu;
@@ -931,6 +989,7 @@ static __init int svm_hardware_setup(void)
        } else
                kvm_disable_tdp();
 
+       build_mtrr2protval();
        return 0;
 
 err:
@@ -1085,6 +1144,39 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
        return target_tsc - tsc;
 }
 
+static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
+{
+       struct kvm_vcpu *vcpu = &svm->vcpu;
+
+       /* Unlike Intel, AMD takes the guest's CR0.CD into account.
+        *
+        * AMD does not have IPAT.  To emulate it for the case of guests
+        * with no assigned devices, just set everything to WB.  If guests
+        * have assigned devices, however, we cannot force WB for RAM
+        * pages only, so use the guest PAT directly.
+        */
+       if (!kvm_arch_has_assigned_device(vcpu->kvm))
+               *g_pat = 0x0606060606060606;
+       else
+               *g_pat = vcpu->arch.pat;
+}
+
+static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+       u8 mtrr;
+
+       /*
+        * 1. MMIO: trust guest MTRR, so same as item 3.
+        * 2. No passthrough: always map as WB, and force guest PAT to WB as well
+        * 3. Passthrough: can't guarantee the result, try to trust guest.
+        */
+       if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
+               return 0;
+
+       mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
+       return mtrr2protval[mtrr];
+}
+
 static void init_vmcb(struct vcpu_svm *svm, bool init_event)
 {
        struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1180,6 +1272,7 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
                clr_cr_intercept(svm, INTERCEPT_CR3_READ);
                clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
                save->g_pat = svm->vcpu.arch.pat;
+               svm_set_guest_pat(svm, &save->g_pat);
                save->cr3 = 0;
                save->cr4 = 0;
        }
@@ -3254,6 +3347,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        case MSR_VM_IGNNE:
                vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
                break;
+       case MSR_IA32_CR_PAT:
+               if (npt_enabled) {
+                       if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+                               return 1;
+                       vcpu->arch.pat = data;
+                       svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
+                       mark_dirty(svm->vmcb, VMCB_NPT);
+                       break;
+               }
+               /* fall through */
        default:
                return kvm_set_msr_common(vcpu, msr);
        }
@@ -4088,11 +4191,6 @@ static bool svm_has_high_real_mode_segbase(void)
        return true;
 }
 
-static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
-{
-       return 0;
-}
-
 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 {
 }
index e856dd566f4c2a6c10a4a6dc9c6ea018fe72cdda..5b4e9384717a17257ea20ef47031dd5f158273a2 100644 (file)
@@ -8632,22 +8632,17 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
        u64 ipat = 0;
 
        /* For VT-d and EPT combination
-        * 1. MMIO: always map as UC
+        * 1. MMIO: guest may want to apply WC, trust it.
         * 2. EPT with VT-d:
         *   a. VT-d without snooping control feature: can't guarantee the
-        *      result, try to trust guest.
+        *      result, try to trust guest.  So the same as item 1.
         *   b. VT-d with snooping control feature: snooping control feature of
         *      VT-d engine can guarantee the cache correctness. Just set it
         *      to WB to keep consistent with host. So the same as item 3.
         * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
         *    consistent with host MTRR
         */
-       if (is_mmio) {
-               cache = MTRR_TYPE_UNCACHABLE;
-               goto exit;
-       }
-
-       if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+       if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
                ipat = VMX_EPT_IPAT_BIT;
                cache = MTRR_TYPE_WRBACK;
                goto exit;
index bbaf44e8f0d3cdd7100c40c98ccf2ab40ae1ea2f..5ef2560075bfb80e6fdabcdf51f71258091e4339 100644 (file)
@@ -3157,8 +3157,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
                        cpuid_count(XSTATE_CPUID, index,
                                    &size, &offset, &ecx, &edx);
                        memcpy(dest, src + offset, size);
-               } else
-                       WARN_ON_ONCE(1);
+               }
 
                valid -= feature;
        }
@@ -7315,11 +7314,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 
        vcpu = kvm_x86_ops->vcpu_create(kvm, id);
 
-       /*
-        * Activate fpu unconditionally in case the guest needs eager FPU.  It will be
-        * deactivated soon if it doesn't.
-        */
-       kvm_x86_ops->fpu_activate(vcpu);
        return vcpu;
 }
 
@@ -8218,6 +8212,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
                        kvm_x86_ops->interrupt_allowed(vcpu);
 }
 
+void kvm_arch_start_assignment(struct kvm *kvm)
+{
+       atomic_inc(&kvm->arch.assigned_device_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
+
+void kvm_arch_end_assignment(struct kvm *kvm)
+{
+       atomic_dec(&kvm->arch.assigned_device_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
+
+bool kvm_arch_has_assigned_device(struct kvm *kvm)
+{
+       return atomic_read(&kvm->arch.assigned_device_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
+
 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
 {
        atomic_inc(&kvm->arch.noncoherent_dma_count);
index c7dcfe4ca5f10219e553cd4cb5acdd2e1658c95b..0429040304fd478a7ad7833df48c0bdc74c429bc 100644 (file)
@@ -88,7 +88,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
        struct ib_ah *ah;
        struct ib_mad_send_wr_private *mad_send_wr;
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH)
+       if (rdma_cap_ib_switch(device))
                port_priv = ib_get_agent_port(device, 0);
        else
                port_priv = ib_get_agent_port(device, port_num);
@@ -122,7 +122,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
        memcpy(send_buf->mad, mad_hdr, resp_mad_len);
        send_buf->ah = ah;
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH) {
+       if (rdma_cap_ib_switch(device)) {
                mad_send_wr = container_of(send_buf,
                                           struct ib_mad_send_wr_private,
                                           send_buf);
index dbddddd6fb5d111e94e44e2800282c84312131a0..3a972ebf3c0d1170efe280aa7bcf781c831fa98f 100644 (file)
@@ -169,6 +169,7 @@ struct cm_device {
        struct ib_device *ib_device;
        struct device *device;
        u8 ack_delay;
+       int going_down;
        struct cm_port *port[0];
 };
 
@@ -805,6 +806,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
 {
        int wait_time;
        unsigned long flags;
+       struct cm_device *cm_dev;
+
+       cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
+       if (!cm_dev)
+               return;
 
        spin_lock_irqsave(&cm.lock, flags);
        cm_cleanup_timewait(cm_id_priv->timewait_info);
@@ -818,8 +824,14 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
         */
        cm_id_priv->id.state = IB_CM_TIMEWAIT;
        wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
-       queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
-                          msecs_to_jiffies(wait_time));
+
+       /* Check if the device started its remove_one */
+       spin_lock_irq(&cm.lock);
+       if (!cm_dev->going_down)
+               queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
+                                  msecs_to_jiffies(wait_time));
+       spin_unlock_irq(&cm.lock);
+
        cm_id_priv->timewait_info = NULL;
 }
 
@@ -3305,6 +3317,11 @@ static int cm_establish(struct ib_cm_id *cm_id)
        struct cm_work *work;
        unsigned long flags;
        int ret = 0;
+       struct cm_device *cm_dev;
+
+       cm_dev = ib_get_client_data(cm_id->device, &cm_client);
+       if (!cm_dev)
+               return -ENODEV;
 
        work = kmalloc(sizeof *work, GFP_ATOMIC);
        if (!work)
@@ -3343,7 +3360,17 @@ static int cm_establish(struct ib_cm_id *cm_id)
        work->remote_id = cm_id->remote_id;
        work->mad_recv_wc = NULL;
        work->cm_event.event = IB_CM_USER_ESTABLISHED;
-       queue_delayed_work(cm.wq, &work->work, 0);
+
+       /* Check if the device started its remove_one */
+       spin_lock_irq(&cm.lock);
+       if (!cm_dev->going_down) {
+               queue_delayed_work(cm.wq, &work->work, 0);
+       } else {
+               kfree(work);
+               ret = -ENODEV;
+       }
+       spin_unlock_irq(&cm.lock);
+
 out:
        return ret;
 }
@@ -3394,6 +3421,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
        enum ib_cm_event_type event;
        u16 attr_id;
        int paths = 0;
+       int going_down = 0;
 
        switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
        case CM_REQ_ATTR_ID:
@@ -3452,7 +3480,19 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
        work->cm_event.event = event;
        work->mad_recv_wc = mad_recv_wc;
        work->port = port;
-       queue_delayed_work(cm.wq, &work->work, 0);
+
+       /* Check if the device started its remove_one */
+       spin_lock_irq(&cm.lock);
+       if (!port->cm_dev->going_down)
+               queue_delayed_work(cm.wq, &work->work, 0);
+       else
+               going_down = 1;
+       spin_unlock_irq(&cm.lock);
+
+       if (going_down) {
+               kfree(work);
+               ib_free_recv_mad(mad_recv_wc);
+       }
 }
 
 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@@ -3771,7 +3811,7 @@ static void cm_add_one(struct ib_device *ib_device)
 
        cm_dev->ib_device = ib_device;
        cm_get_ack_delay(cm_dev);
-
+       cm_dev->going_down = 0;
        cm_dev->device = device_create(&cm_class, &ib_device->dev,
                                       MKDEV(0, 0), NULL,
                                       "%s", ib_device->name);
@@ -3864,14 +3904,23 @@ static void cm_remove_one(struct ib_device *ib_device)
        list_del(&cm_dev->list);
        write_unlock_irqrestore(&cm.device_lock, flags);
 
+       spin_lock_irq(&cm.lock);
+       cm_dev->going_down = 1;
+       spin_unlock_irq(&cm.lock);
+
        for (i = 1; i <= ib_device->phys_port_cnt; i++) {
                if (!rdma_cap_ib_cm(ib_device, i))
                        continue;
 
                port = cm_dev->port[i-1];
                ib_modify_port(ib_device, port->port_num, 0, &port_modify);
-               ib_unregister_mad_agent(port->mad_agent);
+               /*
+                * We flush the queue here after the going_down set, this
+                * verify that no new works will be queued in the recv handler,
+                * after that we can call the unregister_mad_agent
+                */
                flush_workqueue(cm.wq);
+               ib_unregister_mad_agent(port->mad_agent);
                cm_remove_port_fs(port);
        }
        device_unregister(cm_dev->device);
index e6ffa2e66c1ac54b7a2645f59bbb28c00bc91cc3..22a3abee2a54c0fdce95a4567ef3cc45a20901ad 100644 (file)
@@ -67,7 +67,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
                err_str = "Invalid port mapper client";
                goto pid_query_error;
        }
-       if (iwpm_registered_client(nl_client))
+       if (iwpm_check_registration(nl_client, IWPM_REG_VALID) ||
+                       iwpm_user_pid == IWPM_PID_UNAVAILABLE)
                return 0;
        skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);
        if (!skb) {
@@ -106,7 +107,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
        ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
        if (ret) {
                skb = NULL; /* skb is freed in the netlink send-op handling */
-               iwpm_set_registered(nl_client, 1);
                iwpm_user_pid = IWPM_PID_UNAVAILABLE;
                err_str = "Unable to send a nlmsg";
                goto pid_query_error;
@@ -144,12 +144,12 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
                err_str = "Invalid port mapper client";
                goto add_mapping_error;
        }
-       if (!iwpm_registered_client(nl_client)) {
+       if (!iwpm_valid_pid())
+               return 0;
+       if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
                err_str = "Unregistered port mapper client";
                goto add_mapping_error;
        }
-       if (!iwpm_valid_pid())
-               return 0;
        skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);
        if (!skb) {
                err_str = "Unable to create a nlmsg";
@@ -214,12 +214,12 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
                err_str = "Invalid port mapper client";
                goto query_mapping_error;
        }
-       if (!iwpm_registered_client(nl_client)) {
+       if (!iwpm_valid_pid())
+               return 0;
+       if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
                err_str = "Unregistered port mapper client";
                goto query_mapping_error;
        }
-       if (!iwpm_valid_pid())
-               return 0;
        ret = -ENOMEM;
        skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);
        if (!skb) {
@@ -288,12 +288,12 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
                err_str = "Invalid port mapper client";
                goto remove_mapping_error;
        }
-       if (!iwpm_registered_client(nl_client)) {
+       if (!iwpm_valid_pid())
+               return 0;
+       if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) {
                err_str = "Unregistered port mapper client";
                goto remove_mapping_error;
        }
-       if (!iwpm_valid_pid())
-               return 0;
        skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);
        if (!skb) {
                ret = -ENOMEM;
@@ -388,7 +388,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
        pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
                        __func__, iwpm_user_pid);
        if (iwpm_valid_client(nl_client))
-               iwpm_set_registered(nl_client, 1);
+               iwpm_set_registration(nl_client, IWPM_REG_VALID);
 register_pid_response_exit:
        nlmsg_request->request_done = 1;
        /* always for found nlmsg_request */
@@ -644,7 +644,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX];
        const char *msg_type = "Mapping Info response";
-       int iwpm_pid;
        u8 nl_client;
        char *iwpm_name;
        u16 iwpm_version;
@@ -669,14 +668,14 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
                                __func__, nl_client);
                return ret;
        }
-       iwpm_set_registered(nl_client, 0);
+       iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
        atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+       iwpm_user_pid = cb->nlh->nlmsg_pid;
        if (!iwpm_mapinfo_available())
                return 0;
-       iwpm_pid = cb->nlh->nlmsg_pid;
        pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
-                __func__, iwpm_pid);
-       ret = iwpm_send_mapinfo(nl_client, iwpm_pid);
+                __func__, iwpm_user_pid);
+       ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid);
        return ret;
 }
 EXPORT_SYMBOL(iwpm_mapping_info_cb);
index a626795bf9c71f43f7d526d07ae3a490399fdb79..5fb089e913530c54a9852d4ae6fabcda24a00451 100644 (file)
@@ -78,6 +78,7 @@ int iwpm_init(u8 nl_client)
        mutex_unlock(&iwpm_admin_lock);
        if (!ret) {
                iwpm_set_valid(nl_client, 1);
+               iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
                pr_debug("%s: Mapinfo and reminfo tables are created\n",
                                __func__);
        }
@@ -106,6 +107,7 @@ int iwpm_exit(u8 nl_client)
        }
        mutex_unlock(&iwpm_admin_lock);
        iwpm_set_valid(nl_client, 0);
+       iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
        return 0;
 }
 EXPORT_SYMBOL(iwpm_exit);
@@ -397,17 +399,23 @@ void iwpm_set_valid(u8 nl_client, int valid)
 }
 
 /* valid client */
-int iwpm_registered_client(u8 nl_client)
+u32 iwpm_get_registration(u8 nl_client)
 {
        return iwpm_admin.reg_list[nl_client];
 }
 
 /* valid client */
-void iwpm_set_registered(u8 nl_client, int reg)
+void iwpm_set_registration(u8 nl_client, u32 reg)
 {
        iwpm_admin.reg_list[nl_client] = reg;
 }
 
+/* valid client */
+u32 iwpm_check_registration(u8 nl_client, u32 reg)
+{
+       return (iwpm_get_registration(nl_client) & reg);
+}
+
 int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
                                struct sockaddr_storage *b_sockaddr)
 {
index ee2d9ff095be2d68d14c9c48eb551f9647ca562f..b7b9e194ce81fd8f2c7598bb2a6b58cab4fad6b8 100644 (file)
 #define IWPM_PID_UNDEFINED     -1
 #define IWPM_PID_UNAVAILABLE   -2
 
+#define IWPM_REG_UNDEF          0x01
+#define IWPM_REG_VALID          0x02
+#define IWPM_REG_INCOMPL        0x04
+
 struct iwpm_nlmsg_request {
        struct list_head    inprocess_list;
        __u32               nlmsg_seq;
@@ -88,7 +92,7 @@ struct iwpm_admin_data {
        atomic_t refcount;
        atomic_t nlmsg_seq;
        int      client_list[RDMA_NL_NUM_CLIENTS];
-       int      reg_list[RDMA_NL_NUM_CLIENTS];
+       u32      reg_list[RDMA_NL_NUM_CLIENTS];
 };
 
 /**
@@ -159,19 +163,31 @@ int iwpm_valid_client(u8 nl_client);
 void iwpm_set_valid(u8 nl_client, int valid);
 
 /**
- * iwpm_registered_client - Check if the port mapper client is registered
+ * iwpm_check_registration - Check if the client registration
+ *                           matches the given one
  * @nl_client: The index of the netlink client
+ * @reg: The given registration type to compare with
  *
  * Call iwpm_register_pid() to register a client
+ * Returns true if the client registration matches reg,
+ * otherwise returns false
+ */
+u32 iwpm_check_registration(u8 nl_client, u32 reg);
+
+/**
+ * iwpm_set_registration - Set the client registration
+ * @nl_client: The index of the netlink client
+ * @reg: Registration type to set
  */
-int iwpm_registered_client(u8 nl_client);
+void iwpm_set_registration(u8 nl_client, u32 reg);
 
 /**
- * iwpm_set_registered - Set the port mapper client to registered or not
+ * iwpm_get_registration
  * @nl_client: The index of the netlink client
- * @reg: 1 if registered or 0 if not
+ *
+ * Returns the client registration type
  */
-void iwpm_set_registered(u8 nl_client, int reg);
+u32 iwpm_get_registration(u8 nl_client);
 
 /**
  * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of
index a4b1466c1bf686431db027309db9722c7b044455..786fc51bf04b22b0d9b0fc371f3fdb25ec4c811b 100644 (file)
@@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
        bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
                                    mad_agent_priv->qp_info->port_priv->port_num);
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH &&
+       if (rdma_cap_ib_switch(device) &&
            smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
                port_num = send_wr->wr.ud.port_num;
        else
@@ -787,14 +787,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
                if ((opa_get_smp_direction(opa_smp)
                     ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
                     OPA_LID_PERMISSIVE &&
-                    opa_smi_handle_dr_smp_send(opa_smp, device->node_type,
+                    opa_smi_handle_dr_smp_send(opa_smp,
+                                               rdma_cap_ib_switch(device),
                                                port_num) == IB_SMI_DISCARD) {
                        ret = -EINVAL;
                        dev_err(&device->dev, "OPA Invalid directed route\n");
                        goto out;
                }
                opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
-               if (opa_drslid != OPA_LID_PERMISSIVE &&
+               if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
                    opa_drslid & 0xffff0000) {
                        ret = -EINVAL;
                        dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
@@ -810,7 +811,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
        } else {
                if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
                     IB_LID_PERMISSIVE &&
-                    smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
+                    smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
                     IB_SMI_DISCARD) {
                        ret = -EINVAL;
                        dev_err(&device->dev, "Invalid directed route\n");
@@ -2030,7 +2031,7 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
        struct ib_smp *smp = (struct ib_smp *)recv->mad;
 
        if (smi_handle_dr_smp_recv(smp,
-                                  port_priv->device->node_type,
+                                  rdma_cap_ib_switch(port_priv->device),
                                   port_num,
                                   port_priv->device->phys_port_cnt) ==
                                   IB_SMI_DISCARD)
@@ -2042,13 +2043,13 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
 
        if (retsmi == IB_SMI_SEND) { /* don't forward */
                if (smi_handle_dr_smp_send(smp,
-                                          port_priv->device->node_type,
+                                          rdma_cap_ib_switch(port_priv->device),
                                           port_num) == IB_SMI_DISCARD)
                        return IB_SMI_DISCARD;
 
                if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
                        return IB_SMI_DISCARD;
-       } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
+       } else if (rdma_cap_ib_switch(port_priv->device)) {
                /* forward case for switches */
                memcpy(response, recv, mad_priv_size(response));
                response->header.recv_wc.wc = &response->header.wc;
@@ -2115,7 +2116,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
        struct opa_smp *smp = (struct opa_smp *)recv->mad;
 
        if (opa_smi_handle_dr_smp_recv(smp,
-                                  port_priv->device->node_type,
+                                  rdma_cap_ib_switch(port_priv->device),
                                   port_num,
                                   port_priv->device->phys_port_cnt) ==
                                   IB_SMI_DISCARD)
@@ -2127,7 +2128,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
 
        if (retsmi == IB_SMI_SEND) { /* don't forward */
                if (opa_smi_handle_dr_smp_send(smp,
-                                          port_priv->device->node_type,
+                                          rdma_cap_ib_switch(port_priv->device),
                                           port_num) == IB_SMI_DISCARD)
                        return IB_SMI_DISCARD;
 
@@ -2135,7 +2136,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
                    IB_SMI_DISCARD)
                        return IB_SMI_DISCARD;
 
-       } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
+       } else if (rdma_cap_ib_switch(port_priv->device)) {
                /* forward case for switches */
                memcpy(response, recv, mad_priv_size(response));
                response->header.recv_wc.wc = &response->header.wc;
@@ -2235,7 +2236,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
                goto out;
        }
 
-       if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
+       if (rdma_cap_ib_switch(port_priv->device))
                port_num = wc->port_num;
        else
                port_num = port_priv->port_num;
@@ -3297,17 +3298,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
 
 static void ib_mad_init_device(struct ib_device *device)
 {
-       int start, end, i;
+       int start, i;
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH) {
-               start = 0;
-               end   = 0;
-       } else {
-               start = 1;
-               end   = device->phys_port_cnt;
-       }
+       start = rdma_start_port(device);
 
-       for (i = start; i <= end; i++) {
+       for (i = start; i <= rdma_end_port(device); i++) {
                if (!rdma_cap_ib_mad(device, i))
                        continue;
 
@@ -3342,17 +3337,9 @@ static void ib_mad_init_device(struct ib_device *device)
 
 static void ib_mad_remove_device(struct ib_device *device)
 {
-       int start, end, i;
-
-       if (device->node_type == RDMA_NODE_IB_SWITCH) {
-               start = 0;
-               end   = 0;
-       } else {
-               start = 1;
-               end   = device->phys_port_cnt;
-       }
+       int i;
 
-       for (i = start; i <= end; i++) {
+       for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
                if (!rdma_cap_ib_mad(device, i))
                        continue;
 
index 1244f02a5c6d402aa5389e206b6b8e5482ec2de2..2cb865c7ce7a98773f338b1b8c09ffc66db4cebf 100644 (file)
@@ -812,12 +812,8 @@ static void mcast_add_one(struct ib_device *device)
        if (!dev)
                return;
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH)
-               dev->start_port = dev->end_port = 0;
-       else {
-               dev->start_port = 1;
-               dev->end_port = device->phys_port_cnt;
-       }
+       dev->start_port = rdma_start_port(device);
+       dev->end_port = rdma_end_port(device);
 
        for (i = 0; i <= dev->end_port - dev->start_port; i++) {
                if (!rdma_cap_ib_mcast(device, dev->start_port + i))
index 62d91bfa4cb70bed63cfac71a746fca544513aff..3bfab3505a2917d561d9d45372c091b38649151c 100644 (file)
 
 #include "smi.h"
 
-enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type,
+enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
                                       int port_num, int phys_port_cnt);
 int opa_smi_get_fwd_port(struct opa_smp *smp);
 extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
 extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
-                                             u8 node_type, int port_num);
+                                             bool is_switch, int port_num);
 
 /*
  * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
index 0fae85062a65b8704ddc11a117751df62db9c9d4..ca919f4296664f070f0c63b1765542375818f0aa 100644 (file)
@@ -1156,12 +1156,8 @@ static void ib_sa_add_one(struct ib_device *device)
        int s, e, i;
        int count = 0;
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH)
-               s = e = 0;
-       else {
-               s = 1;
-               e = device->phys_port_cnt;
-       }
+       s = rdma_start_port(device);
+       e = rdma_end_port(device);
 
        sa_dev = kzalloc(sizeof *sa_dev +
                         (e - s + 1) * sizeof (struct ib_sa_port),
index 368a561d1a5d49d931ef45738c35f3be4b068725..f19b23817c2b49b3650f36077f00d67df801b3a7 100644 (file)
@@ -41,7 +41,7 @@
 #include "smi.h"
 #include "opa_smi.h"
 
-static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
+static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
                                                u8 *hop_ptr, u8 hop_cnt,
                                                const u8 *initial_path,
                                                const u8 *return_path,
@@ -64,7 +64,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
 
                /* C14-9:2 */
                if (*hop_ptr && *hop_ptr < hop_cnt) {
-                       if (node_type != RDMA_NODE_IB_SWITCH)
+                       if (!is_switch)
                                return IB_SMI_DISCARD;
 
                        /* return_path set when received */
@@ -77,7 +77,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
                if (*hop_ptr == hop_cnt) {
                        /* return_path set when received */
                        (*hop_ptr)++;
-                       return (node_type == RDMA_NODE_IB_SWITCH ||
+                       return (is_switch ||
                                dr_dlid_is_permissive ?
                                IB_SMI_HANDLE : IB_SMI_DISCARD);
                }
@@ -96,7 +96,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
 
                /* C14-13:2 */
                if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
-                       if (node_type != RDMA_NODE_IB_SWITCH)
+                       if (!is_switch)
                                return IB_SMI_DISCARD;
 
                        (*hop_ptr)--;
@@ -108,7 +108,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
                if (*hop_ptr == 1) {
                        (*hop_ptr)--;
                        /* C14-13:3 -- SMPs destined for SM shouldn't be here */
-                       return (node_type == RDMA_NODE_IB_SWITCH ||
+                       return (is_switch ||
                                dr_slid_is_permissive ?
                                IB_SMI_HANDLE : IB_SMI_DISCARD);
                }
@@ -127,9 +127,9 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
  * Return IB_SMI_DISCARD if the SMP should be discarded
  */
 enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
-                                      u8 node_type, int port_num)
+                                      bool is_switch, int port_num)
 {
-       return __smi_handle_dr_smp_send(node_type, port_num,
+       return __smi_handle_dr_smp_send(is_switch, port_num,
                                        &smp->hop_ptr, smp->hop_cnt,
                                        smp->initial_path,
                                        smp->return_path,
@@ -139,9 +139,9 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
 }
 
 enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
-                                      u8 node_type, int port_num)
+                                      bool is_switch, int port_num)
 {
-       return __smi_handle_dr_smp_send(node_type, port_num,
+       return __smi_handle_dr_smp_send(is_switch, port_num,
                                        &smp->hop_ptr, smp->hop_cnt,
                                        smp->route.dr.initial_path,
                                        smp->route.dr.return_path,
@@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
                                        OPA_LID_PERMISSIVE);
 }
 
-static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
+static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
                                                int phys_port_cnt,
                                                u8 *hop_ptr, u8 hop_cnt,
                                                const u8 *initial_path,
@@ -173,7 +173,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
 
                /* C14-9:2 -- intermediate hop */
                if (*hop_ptr && *hop_ptr < hop_cnt) {
-                       if (node_type != RDMA_NODE_IB_SWITCH)
+                       if (!is_switch)
                                return IB_SMI_DISCARD;
 
                        return_path[*hop_ptr] = port_num;
@@ -188,7 +188,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
                                return_path[*hop_ptr] = port_num;
                        /* hop_ptr updated when sending */
 
-                       return (node_type == RDMA_NODE_IB_SWITCH ||
+                       return (is_switch ||
                                dr_dlid_is_permissive ?
                                IB_SMI_HANDLE : IB_SMI_DISCARD);
                }
@@ -208,7 +208,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
 
                /* C14-13:2 */
                if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
-                       if (node_type != RDMA_NODE_IB_SWITCH)
+                       if (!is_switch)
                                return IB_SMI_DISCARD;
 
                        /* hop_ptr updated when sending */
@@ -224,8 +224,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
                                return IB_SMI_HANDLE;
                        }
                        /* hop_ptr updated when sending */
-                       return (node_type == RDMA_NODE_IB_SWITCH ?
-                               IB_SMI_HANDLE : IB_SMI_DISCARD);
+                       return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD);
                }
 
                /* C14-13:4 -- hop_ptr = 0 -> give to SM */
@@ -238,10 +237,10 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
  * Adjust information for a received SMP
  * Return IB_SMI_DISCARD if the SMP should be dropped
  */
-enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
+enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
                                       int port_num, int phys_port_cnt)
 {
-       return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt,
+       return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
                                        &smp->hop_ptr, smp->hop_cnt,
                                        smp->initial_path,
                                        smp->return_path,
@@ -254,10 +253,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
  * Adjust information for a received SMP
  * Return IB_SMI_DISCARD if the SMP should be dropped
  */
-enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type,
+enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
                                           int port_num, int phys_port_cnt)
 {
-       return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt,
+       return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
                                        &smp->hop_ptr, smp->hop_cnt,
                                        smp->route.dr.initial_path,
                                        smp->route.dr.return_path,
index aff96bac49b4c9e126a1e4b6fd309bc4b3b256aa..33c91c8a16e9524da0da6c00ed7a3259084f2c74 100644 (file)
@@ -51,12 +51,12 @@ enum smi_forward_action {
        IB_SMI_FORWARD  /* SMP should be forwarded (for switches only) */
 };
 
-enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
+enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
                                       int port_num, int phys_port_cnt);
 int smi_get_fwd_port(struct ib_smp *smp);
 extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
 extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
-                                             u8 node_type, int port_num);
+                                             bool is_switch, int port_num);
 
 /*
  * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
index ed6b6c85c334b124e3fa4a47225c3f8a6b5c62df..0b84a9cdfe5b90636d3633dbb42d84ef84c4e98e 100644 (file)
@@ -870,7 +870,7 @@ int ib_device_register_sysfs(struct ib_device *device,
                goto err_put;
        }
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH) {
+       if (rdma_cap_ib_switch(device)) {
                ret = add_port(device, 0, port_callback);
                if (ret)
                        goto err_put;
index 62c24b1452b89e2546f2e023a560ee3a21f222e4..00948107364466cafe28e95557be1fcf829e988c 100644 (file)
@@ -1193,6 +1193,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
        return 0;
 }
 
+static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
 static void ib_ucm_release_dev(struct device *dev)
 {
        struct ib_ucm_device *ucm_dev;
@@ -1202,7 +1203,7 @@ static void ib_ucm_release_dev(struct device *dev)
        if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
                clear_bit(ucm_dev->devnum, dev_map);
        else
-               clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
+               clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
        kfree(ucm_dev);
 }
 
@@ -1226,7 +1227,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
 
 static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
 static int find_overflow_devnum(void)
 {
        int ret;
index ad45469f7582dbe47788c5b1330803148c0b5dab..29b21213ea7586129357bd803c7de89096227bf1 100644 (file)
@@ -1354,10 +1354,10 @@ static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
        /* Acquire mutex's based on pointer comparison to prevent deadlock. */
        if (file1 < file2) {
                mutex_lock(&file1->mut);
-               mutex_lock(&file2->mut);
+               mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
        } else {
                mutex_lock(&file2->mut);
-               mutex_lock(&file1->mut);
+               mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
        }
 }
 
@@ -1616,6 +1616,7 @@ static void __exit ucma_cleanup(void)
        device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
        misc_deregister(&ucma_misc);
        idr_destroy(&ctx_idr);
+       idr_destroy(&multicast_idr);
 }
 
 module_init(ucma_init);
index 12b5bc23832b13804c1f07b61ee4d83b8650dcd1..376b031c2c7fa0e00bee607ac3462c74e8d3594c 100644 (file)
@@ -226,8 +226,9 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
        if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
                return IB_MAD_RESULT_FAILURE;
index 948188e37f95ab3fc2dfb2dd4ab7a1698c0ae84f..ad3a926ab3c5d41b393ca04bee9a851ec196460c 100644 (file)
@@ -1499,8 +1499,9 @@ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
        switch (in_mad->mad_hdr.mgmt_class) {
        case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
index 48253b839a6f741535c11a93cce685e2cce37901..30ba49c4a98c06b21dff0e0599569dfad19f1f17 100644 (file)
@@ -2044,9 +2044,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
 
        spin_lock_init(&idev->qp_table.lock);
        spin_lock_init(&idev->lk_table.lock);
-       idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
+       idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
        /* Set the prefix to the default value (see ch. 4.1.1) */
-       idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
+       idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
 
        ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
        if (ret)
index 85a50df2f20360e1b8da63959b49901843012198..68b3dfa922bf3e01ce3c00a60674ca508fd50912 100644 (file)
@@ -860,21 +860,31 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
        struct mlx4_ib_dev *dev = to_mdev(ibdev);
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
+       enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
-       switch (rdma_port_get_link_layer(ibdev, port_num)) {
-       case IB_LINK_LAYER_INFINIBAND:
-               if (!mlx4_is_slave(dev->dev))
-                       return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
-                                             in_grh, in_mad, out_mad);
-       case IB_LINK_LAYER_ETHERNET:
-               return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
-                                         in_grh, in_mad, out_mad);
-       default:
-               return -EINVAL;
+       /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
+        * queries, should be called only by VFs and for that specific purpose
+        */
+       if (link == IB_LINK_LAYER_INFINIBAND) {
+               if (mlx4_is_slave(dev->dev) &&
+                   in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+                   in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
+                       return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+                                               in_grh, in_mad, out_mad);
+
+               return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
+                                     in_grh, in_mad, out_mad);
        }
+
+       if (link == IB_LINK_LAYER_ETHERNET)
+               return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+                                       in_grh, in_mad, out_mad);
+
+       return -EINVAL;
 }
 
 static void send_handler(struct ib_mad_agent *agent,
index 067a691ecbed449e098d333e8d82cf62c47ebd7a..8be6db81646049a741abebe1c4ab982fad7e6011 100644 (file)
@@ -253,14 +253,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
        props->timestamp_mask = 0xFFFFFFFFFFFFULL;
 
-       err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
-       if (err)
-               goto out;
+       if (!mlx4_is_slave(dev->dev))
+               err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
 
        if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
-               resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
                resp.response_length += sizeof(resp.hca_core_clock_offset);
-               resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+               if (!err && !mlx4_is_slave(dev->dev)) {
+                       resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+                       resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
+               }
        }
 
        if (uhw->outlen) {
@@ -2669,31 +2670,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
        dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
        if (!dm) {
                pr_err("failed to allocate memory for tunneling qp update\n");
-               goto out;
+               return;
        }
 
        for (i = 0; i < ports; i++) {
                dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
                if (!dm[i]) {
                        pr_err("failed to allocate memory for tunneling qp update work struct\n");
-                       for (i = 0; i < dev->caps.num_ports; i++) {
-                               if (dm[i])
-                                       kfree(dm[i]);
-                       }
+                       while (--i >= 0)
+                               kfree(dm[i]);
                        goto out;
                }
-       }
-       /* initialize or tear down tunnel QPs for the slave */
-       for (i = 0; i < ports; i++) {
                INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
                dm[i]->port = first_port + i + 1;
                dm[i]->slave = slave;
                dm[i]->do_init = do_init;
                dm[i]->dev = ibdev;
-               spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
-               if (!ibdev->sriov.is_going_down)
+       }
+       /* initialize or tear down tunnel QPs for the slave */
+       spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
+       if (!ibdev->sriov.is_going_down) {
+               for (i = 0; i < ports; i++)
                        queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
                spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+       } else {
+               spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+               for (i = 0; i < ports; i++)
+                       kfree(dm[i]);
        }
 out:
        kfree(dm);
index 01fc97db45d6e8e3b92f38b1f667627ea4f2a60c..b84d13a487cc04dcfbb501cfbd460c335bf91f8d 100644 (file)
@@ -68,8 +68,9 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
        slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
 
index 6b2418b74c99ab84345403afe9d310f04aa465e5..7c3f2fb44ba51d8f288221df7b4d316a56adab28 100644 (file)
@@ -209,8 +209,9 @@ int mthca_process_mad(struct ib_device *ibdev,
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
        /* Forward locally generated traps to the SM */
        if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
index 9047af4299065f543252a29297fbd40e603531b3..8a3ad170d790cc336c08a527db314d859beefd6c 100644 (file)
@@ -1520,8 +1520,9 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
        int rc = arpindex;
        struct net_device *netdev;
        struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
+       __be32 dst_ipaddr = htonl(dst_ip);
 
-       rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0);
+       rt = ip_route_output(&init_net, dst_ipaddr, nesvnic->local_ipaddr, 0, 0);
        if (IS_ERR(rt)) {
                printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
                       __func__, dst_ip);
@@ -1533,7 +1534,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
        else
                netdev = nesvnic->netdev;
 
-       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+       neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
 
        rcu_read_lock();
        if (neigh) {
index 02120d340d50cffa62fa87bb31e4e22502d5e547..4713dd7ed76432b6d11b042e6dfd24c268f82325 100644 (file)
@@ -3861,7 +3861,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
                                (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
                                (((u32)mac_addr[4]) << 8)  | (u32)mac_addr[5]);
                cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
-                               (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]);
+                               (((u32)mac_addr[0]) << 8) | (u32)mac_addr[1]);
        } else {
                cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
                cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;
index 4bafa15708d0fc4212587cb4dfd6fce7d3f3211b..29b27675dd709e8271708c6ca91cc9deb0a1076e 100644 (file)
@@ -215,8 +215,9 @@ int ocrdma_process_mad(struct ib_device *ibdev,
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
        switch (in_mad->mad_hdr.mgmt_class) {
        case IB_MGMT_CLASS_PERF_MGMT:
index 8a1398b253a2bec42f2d0032e0aa915ce747bfd9..d98a707a5eb9b3e27a51548a0fbe2ae9b893ebab 100644 (file)
@@ -696,6 +696,7 @@ static void __exit ocrdma_exit_module(void)
        ocrdma_unregister_inet6addr_notifier();
        ocrdma_unregister_inetaddr_notifier();
        ocrdma_rem_debugfs();
+       idr_destroy(&ocrdma_dev_id);
 }
 
 module_init(ocrdma_init_module);
index 05e3242d84425acd6229204e642084a4ce0f654d..9625e7c438e57749c12495799fb896ac293880fd 100644 (file)
@@ -2412,8 +2412,9 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
        switch (in_mad->mad_hdr.mgmt_class) {
        case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
index bd94b0a6e9e535f8d8b4a9e1fa1428e0696e2947..79859c4d43c9c572f4946a364b87fdc74acf40a3 100644 (file)
@@ -239,7 +239,7 @@ struct ipoib_cm_tx {
        struct net_device   *dev;
        struct ipoib_neigh  *neigh;
        struct ipoib_path   *path;
-       struct ipoib_cm_tx_buf *tx_ring;
+       struct ipoib_tx_buf *tx_ring;
        unsigned             tx_head;
        unsigned             tx_tail;
        unsigned long        flags;
@@ -504,6 +504,33 @@ int ipoib_mcast_stop_thread(struct net_device *dev);
 void ipoib_mcast_dev_down(struct net_device *dev);
 void ipoib_mcast_dev_flush(struct net_device *dev);
 
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+                       struct ipoib_tx_buf *tx_req);
+
+static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
+                                  struct ipoib_tx_buf *tx_req)
+{
+       int i, off;
+       struct sk_buff *skb = tx_req->skb;
+       skb_frag_t *frags = skb_shinfo(skb)->frags;
+       int nr_frags = skb_shinfo(skb)->nr_frags;
+       u64 *mapping = tx_req->mapping;
+
+       if (skb_headlen(skb)) {
+               priv->tx_sge[0].addr         = mapping[0];
+               priv->tx_sge[0].length       = skb_headlen(skb);
+               off = 1;
+       } else
+               off = 0;
+
+       for (i = 0; i < nr_frags; ++i) {
+               priv->tx_sge[i + off].addr = mapping[i + off];
+               priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
+       }
+       priv->tx_wr.num_sge          = nr_frags + off;
+}
+
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
index cf32a778e7d0ccc0b6225d9c01442f5d2ec4cdb1..ee39be6ccfb0fdd9aa75ad408543e1f0b08ff1d4 100644 (file)
@@ -694,14 +694,12 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
 static inline int post_send(struct ipoib_dev_priv *priv,
                            struct ipoib_cm_tx *tx,
                            unsigned int wr_id,
-                           u64 addr, int len)
+                           struct ipoib_tx_buf *tx_req)
 {
        struct ib_send_wr *bad_wr;
 
-       priv->tx_sge[0].addr          = addr;
-       priv->tx_sge[0].length        = len;
+       ipoib_build_sge(priv, tx_req);
 
-       priv->tx_wr.num_sge     = 1;
        priv->tx_wr.wr_id       = wr_id | IPOIB_OP_CM;
 
        return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
@@ -710,8 +708,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
-       struct ipoib_cm_tx_buf *tx_req;
-       u64 addr;
+       struct ipoib_tx_buf *tx_req;
        int rc;
 
        if (unlikely(skb->len > tx->mtu)) {
@@ -735,24 +732,21 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
         */
        tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
        tx_req->skb = skb;
-       addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
-       if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
+
+       if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
                ++dev->stats.tx_errors;
                dev_kfree_skb_any(skb);
                return;
        }
 
-       tx_req->mapping = addr;
-
        skb_orphan(skb);
        skb_dst_drop(skb);
 
-       rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
-                      addr, skb->len);
+       rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
        if (unlikely(rc)) {
                ipoib_warn(priv, "post_send failed, error %d\n", rc);
                ++dev->stats.tx_errors;
-               ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
+               ipoib_dma_unmap_tx(priv, tx_req);
                dev_kfree_skb_any(skb);
        } else {
                dev->trans_start = jiffies;
@@ -777,7 +771,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_cm_tx *tx = wc->qp->qp_context;
        unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
-       struct ipoib_cm_tx_buf *tx_req;
+       struct ipoib_tx_buf *tx_req;
        unsigned long flags;
 
        ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
@@ -791,7 +785,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 
        tx_req = &tx->tx_ring[wr_id];
 
-       ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
+       ipoib_dma_unmap_tx(priv, tx_req);
 
        /* FIXME: is this right? Shouldn't we only increment on success? */
        ++dev->stats.tx_packets;
@@ -1036,6 +1030,9 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
 
        struct ib_qp *tx_qp;
 
+       if (dev->features & NETIF_F_SG)
+               attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+
        tx_qp = ib_create_qp(priv->pd, &attr);
        if (PTR_ERR(tx_qp) == -EINVAL) {
                ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
@@ -1170,7 +1167,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
 {
        struct ipoib_dev_priv *priv = netdev_priv(p->dev);
-       struct ipoib_cm_tx_buf *tx_req;
+       struct ipoib_tx_buf *tx_req;
        unsigned long begin;
 
        ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@@ -1197,8 +1194,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
 
        while ((int) p->tx_tail - (int) p->tx_head < 0) {
                tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
-               ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
-                                   DMA_TO_DEVICE);
+               ipoib_dma_unmap_tx(priv, tx_req);
                dev_kfree_skb_any(tx_req->skb);
                ++p->tx_tail;
                netif_tx_lock_bh(p->dev);
@@ -1455,7 +1451,6 @@ static void ipoib_cm_stale_task(struct work_struct *work)
        spin_unlock_irq(&priv->lock);
 }
 
-
 static ssize_t show_mode(struct device *d, struct device_attribute *attr,
                         char *buf)
 {
index 63b92cbb29ad0ad1f0165a738a47efbe0f650e04..d266667ca9b82273dd4b7abb4856f69b29f65174 100644 (file)
@@ -263,8 +263,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
                           "for buf %d\n", wr_id);
 }
 
-static int ipoib_dma_map_tx(struct ib_device *ca,
-                           struct ipoib_tx_buf *tx_req)
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
 {
        struct sk_buff *skb = tx_req->skb;
        u64 *mapping = tx_req->mapping;
@@ -305,8 +304,8 @@ static int ipoib_dma_map_tx(struct ib_device *ca,
        return -EIO;
 }
 
-static void ipoib_dma_unmap_tx(struct ib_device *ca,
-                              struct ipoib_tx_buf *tx_req)
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+                       struct ipoib_tx_buf *tx_req)
 {
        struct sk_buff *skb = tx_req->skb;
        u64 *mapping = tx_req->mapping;
@@ -314,7 +313,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
        int off;
 
        if (skb_headlen(skb)) {
-               ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
+               ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
+                                   DMA_TO_DEVICE);
                off = 1;
        } else
                off = 0;
@@ -322,8 +322,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-               ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag),
-                                 DMA_TO_DEVICE);
+               ib_dma_unmap_page(priv->ca, mapping[i + off],
+                                 skb_frag_size(frag), DMA_TO_DEVICE);
        }
 }
 
@@ -389,7 +389,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 
        tx_req = &priv->tx_ring[wr_id];
 
-       ipoib_dma_unmap_tx(priv->ca, tx_req);
+       ipoib_dma_unmap_tx(priv, tx_req);
 
        ++dev->stats.tx_packets;
        dev->stats.tx_bytes += tx_req->skb->len;
@@ -514,24 +514,10 @@ static inline int post_send(struct ipoib_dev_priv *priv,
                            void *head, int hlen)
 {
        struct ib_send_wr *bad_wr;
-       int i, off;
        struct sk_buff *skb = tx_req->skb;
-       skb_frag_t *frags = skb_shinfo(skb)->frags;
-       int nr_frags = skb_shinfo(skb)->nr_frags;
-       u64 *mapping = tx_req->mapping;
 
-       if (skb_headlen(skb)) {
-               priv->tx_sge[0].addr         = mapping[0];
-               priv->tx_sge[0].length       = skb_headlen(skb);
-               off = 1;
-       } else
-               off = 0;
+       ipoib_build_sge(priv, tx_req);
 
-       for (i = 0; i < nr_frags; ++i) {
-               priv->tx_sge[i + off].addr = mapping[i + off];
-               priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
-       }
-       priv->tx_wr.num_sge          = nr_frags + off;
        priv->tx_wr.wr_id            = wr_id;
        priv->tx_wr.wr.ud.remote_qpn = qpn;
        priv->tx_wr.wr.ud.ah         = address;
@@ -617,7 +603,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                ipoib_warn(priv, "post_send failed, error %d\n", rc);
                ++dev->stats.tx_errors;
                --priv->tx_outstanding;
-               ipoib_dma_unmap_tx(priv->ca, tx_req);
+               ipoib_dma_unmap_tx(priv, tx_req);
                dev_kfree_skb_any(skb);
                if (netif_queue_stopped(dev))
                        netif_wake_queue(dev);
@@ -868,7 +854,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
                        while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
                                tx_req = &priv->tx_ring[priv->tx_tail &
                                                        (ipoib_sendq_size - 1)];
-                               ipoib_dma_unmap_tx(priv->ca, tx_req);
+                               ipoib_dma_unmap_tx(priv, tx_req);
                                dev_kfree_skb_any(tx_req->skb);
                                ++priv->tx_tail;
                                --priv->tx_outstanding;
@@ -985,20 +971,21 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
 }
 
 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
-                               enum ipoib_flush_level level)
+                               enum ipoib_flush_level level,
+                               int nesting)
 {
        struct ipoib_dev_priv *cpriv;
        struct net_device *dev = priv->dev;
        int result;
 
-       down_read(&priv->vlan_rwsem);
+       down_read_nested(&priv->vlan_rwsem, nesting);
 
        /*
         * Flush any child interfaces too -- they might be up even if
         * the parent is down.
         */
        list_for_each_entry(cpriv, &priv->child_intfs, list)
-               __ipoib_ib_dev_flush(cpriv, level);
+               __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
 
        up_read(&priv->vlan_rwsem);
 
@@ -1076,7 +1063,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work)
        struct ipoib_dev_priv *priv =
                container_of(work, struct ipoib_dev_priv, flush_light);
 
-       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
+       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
 }
 
 void ipoib_ib_dev_flush_normal(struct work_struct *work)
@@ -1084,7 +1071,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work)
        struct ipoib_dev_priv *priv =
                container_of(work, struct ipoib_dev_priv, flush_normal);
 
-       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
+       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
 }
 
 void ipoib_ib_dev_flush_heavy(struct work_struct *work)
@@ -1092,7 +1079,7 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
        struct ipoib_dev_priv *priv =
                container_of(work, struct ipoib_dev_priv, flush_heavy);
 
-       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
+       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
 }
 
 void ipoib_ib_dev_cleanup(struct net_device *dev)
index da149c278cb8149a7541169c7b05147be82f8ed5..b2943c84a5dda0aecdd8904917f2ebbb02b9b013 100644 (file)
@@ -190,7 +190,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
        if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
-               features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+               features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
 
        return features;
 }
@@ -232,6 +232,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
                ipoib_warn(priv, "enabling connected mode "
                           "will cause multicast packet drops\n");
                netdev_update_features(dev);
+               dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
                rtnl_unlock();
                priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
 
@@ -1577,7 +1578,8 @@ static struct net_device *ipoib_add_port(const char *format,
        SET_NETDEV_DEV(priv->dev, hca->dma_device);
        priv->dev->dev_id = port - 1;
 
-       if (!ib_query_port(hca, port, &attr))
+       result = ib_query_port(hca, port, &attr);
+       if (!result)
                priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
        else {
                printk(KERN_WARNING "%s: ib_query_port %d failed\n",
@@ -1598,7 +1600,8 @@ static struct net_device *ipoib_add_port(const char *format,
                goto device_init_failed;
        }
 
-       if (ipoib_set_dev_features(priv, hca))
+       result = ipoib_set_dev_features(priv, hca);
+       if (result)
                goto device_init_failed;
 
        /*
@@ -1684,7 +1687,7 @@ static void ipoib_add_one(struct ib_device *device)
        struct list_head *dev_list;
        struct net_device *dev;
        struct ipoib_dev_priv *priv;
-       int s, e, p;
+       int p;
        int count = 0;
 
        dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
@@ -1693,15 +1696,7 @@ static void ipoib_add_one(struct ib_device *device)
 
        INIT_LIST_HEAD(dev_list);
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH) {
-               s = 0;
-               e = 0;
-       } else {
-               s = 1;
-               e = device->phys_port_cnt;
-       }
-
-       for (p = s; p <= e; ++p) {
+       for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
                if (!rdma_protocol_ib(device, p))
                        continue;
                dev = ipoib_add_port("ib%d", device, p);
index 267dc4f7550236e89fae58ff80c4a3953cff877f..31a20b462266611299aeeae5cd51fd19b69b635e 100644 (file)
@@ -161,13 +161,10 @@ static int srp_tmo_set(const char *val, const struct kernel_param *kp)
 {
        int tmo, res;
 
-       if (strncmp(val, "off", 3) != 0) {
-               res = kstrtoint(val, 0, &tmo);
-               if (res)
-                       goto out;
-       } else {
-               tmo = -1;
-       }
+       res = srp_parse_tmo(&tmo, val);
+       if (res)
+               goto out;
+
        if (kp->arg == &srp_reconnect_delay)
                res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
                                    srp_dev_loss_tmo);
@@ -3379,7 +3376,7 @@ static void srp_add_one(struct ib_device *device)
        struct srp_device *srp_dev;
        struct ib_device_attr *dev_attr;
        struct srp_host *host;
-       int mr_page_shift, s, e, p;
+       int mr_page_shift, p;
        u64 max_pages_per_mr;
 
        dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
@@ -3443,15 +3440,7 @@ static void srp_add_one(struct ib_device *device)
        if (IS_ERR(srp_dev->mr))
                goto err_pd;
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH) {
-               s = 0;
-               e = 0;
-       } else {
-               s = 1;
-               e = device->phys_port_cnt;
-       }
-
-       for (p = s; p <= e; ++p) {
+       for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
                host = srp_add_port(srp_dev, p);
                if (host)
                        list_add_tail(&host->list, &srp_dev->dev_list);
index 82897ca17f32349df3e3cc332b9b0204bbc524a3..60ff0a2390e5f02f7cffabb9a5154cc0c4b734e7 100644 (file)
@@ -302,7 +302,7 @@ static void srpt_get_iou(struct ib_dm_mad *mad)
        int i;
 
        ioui = (struct ib_dm_iou_info *)mad->data;
-       ioui->change_id = __constant_cpu_to_be16(1);
+       ioui->change_id = cpu_to_be16(1);
        ioui->max_controllers = 16;
 
        /* set present for slot 1 and empty for the rest */
@@ -330,13 +330,13 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
 
        if (!slot || slot > 16) {
                mad->mad_hdr.status
-                       = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+                       = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
                return;
        }
 
        if (slot > 2) {
                mad->mad_hdr.status
-                       = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+                       = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
                return;
        }
 
@@ -348,10 +348,10 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
        iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
        iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
        iocp->subsys_device_id = 0x0;
-       iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
-       iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
-       iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
-       iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
+       iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
+       iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
+       iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
+       iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
        iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
        iocp->rdma_read_depth = 4;
        iocp->send_size = cpu_to_be32(srp_max_req_size);
@@ -379,13 +379,13 @@ static void srpt_get_svc_entries(u64 ioc_guid,
 
        if (!slot || slot > 16) {
                mad->mad_hdr.status
-                       = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+                       = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
                return;
        }
 
        if (slot > 2 || lo > hi || hi > 1) {
                mad->mad_hdr.status
-                       = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+                       = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
                return;
        }
 
@@ -436,7 +436,7 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
                break;
        default:
                rsp_mad->mad_hdr.status =
-                   __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+                   cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
                break;
        }
 }
@@ -493,11 +493,11 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
                break;
        case IB_MGMT_METHOD_SET:
                dm_mad->mad_hdr.status =
-                   __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+                   cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
                break;
        default:
                dm_mad->mad_hdr.status =
-                   __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
+                   cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
                break;
        }
 
@@ -1535,7 +1535,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
        memset(srp_rsp, 0, sizeof *srp_rsp);
        srp_rsp->opcode = SRP_RSP;
        srp_rsp->req_lim_delta =
-               __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
+               cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
        srp_rsp->tag = tag;
        srp_rsp->status = status;
 
@@ -1585,8 +1585,8 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
        memset(srp_rsp, 0, sizeof *srp_rsp);
 
        srp_rsp->opcode = SRP_RSP;
-       srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
-                                   + atomic_xchg(&ch->req_lim_delta, 0));
+       srp_rsp->req_lim_delta =
+               cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
        srp_rsp->tag = tag;
 
        srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
@@ -1630,7 +1630,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
        switch (len) {
        case 8:
                if ((*((__be64 *)lun) &
-                    __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
+                    cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
                        goto out_err;
                break;
        case 4:
@@ -2449,8 +2449,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        }
 
        if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
-               rej->reason = __constant_cpu_to_be32(
-                               SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
                ret = -EINVAL;
                pr_err("rejected SRP_LOGIN_REQ because its"
                       " length (%d bytes) is out of range (%d .. %d)\n",
@@ -2459,8 +2459,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        }
 
        if (!sport->enabled) {
-               rej->reason = __constant_cpu_to_be32(
-                            SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
                ret = -EINVAL;
                pr_err("rejected SRP_LOGIN_REQ because the target port"
                       " has not yet been enabled\n");
@@ -2505,8 +2505,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
            || *(__be64 *)(req->target_port_id + 8) !=
               cpu_to_be64(srpt_service_guid)) {
-               rej->reason = __constant_cpu_to_be32(
-                               SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
                ret = -ENOMEM;
                pr_err("rejected SRP_LOGIN_REQ because it"
                       " has an invalid target port identifier.\n");
@@ -2515,8 +2515,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 
        ch = kzalloc(sizeof *ch, GFP_KERNEL);
        if (!ch) {
-               rej->reason = __constant_cpu_to_be32(
-                                       SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
                pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
                ret = -ENOMEM;
                goto reject;
@@ -2552,8 +2552,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 
        ret = srpt_create_ch_ib(ch);
        if (ret) {
-               rej->reason = __constant_cpu_to_be32(
-                               SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
                pr_err("rejected SRP_LOGIN_REQ because creating"
                       " a new RDMA channel failed.\n");
                goto free_ring;
@@ -2561,8 +2561,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 
        ret = srpt_ch_qp_rtr(ch, ch->qp);
        if (ret) {
-               rej->reason = __constant_cpu_to_be32(
-                               SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
                pr_err("rejected SRP_LOGIN_REQ because enabling"
                       " RTR failed (error code = %d)\n", ret);
                goto destroy_ib;
@@ -2580,15 +2579,15 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        if (!nacl) {
                pr_info("Rejected login because no ACL has been"
                        " configured yet for initiator %s.\n", ch->sess_name);
-               rej->reason = __constant_cpu_to_be32(
-                               SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
                goto destroy_ib;
        }
 
        ch->sess = transport_init_session(TARGET_PROT_NORMAL);
        if (IS_ERR(ch->sess)) {
-               rej->reason = __constant_cpu_to_be32(
-                               SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
                pr_debug("Failed to create session\n");
                goto deregister_session;
        }
@@ -2604,8 +2603,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        rsp->max_it_iu_len = req->req_it_iu_len;
        rsp->max_ti_iu_len = req->req_it_iu_len;
        ch->max_ti_iu_len = it_iu_len;
-       rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
-                                             | SRP_BUF_FORMAT_INDIRECT);
+       rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+                                  | SRP_BUF_FORMAT_INDIRECT);
        rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
        atomic_set(&ch->req_lim, ch->rq_size);
        atomic_set(&ch->req_lim_delta, 0);
@@ -2655,8 +2654,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 reject:
        rej->opcode = SRP_LOGIN_REJ;
        rej->tag = req->tag;
-       rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
-                                             | SRP_BUF_FORMAT_INDIRECT);
+       rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+                                  | SRP_BUF_FORMAT_INDIRECT);
 
        ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
                             (void *)rej, sizeof *rej);
index 1aec8ff0b58743214ff2ad520dea009d0a1f3f96..f73d2f579a7ef26f053eb5d314dd71e54ed60b82 100644 (file)
@@ -1862,6 +1862,33 @@ static void __dasd_device_check_expire(struct dasd_device *device)
        }
 }
 
+/*
+ * return 1 when device is not eligible for IO
+ */
+static int __dasd_device_is_unusable(struct dasd_device *device,
+                                    struct dasd_ccw_req *cqr)
+{
+       int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
+
+       if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+               /* dasd is being set offline. */
+               return 1;
+       }
+       if (device->stopped) {
+               if (device->stopped & mask) {
+                       /* stopped and CQR will not change that. */
+                       return 1;
+               }
+               if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+                       /* CQR is not able to change device to
+                        * operational. */
+                       return 1;
+               }
+               /* CQR required to get device operational. */
+       }
+       return 0;
+}
+
 /*
  * Take a look at the first request on the ccw queue and check
  * if it needs to be started.
@@ -1876,13 +1903,8 @@ static void __dasd_device_start_head(struct dasd_device *device)
        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
        if (cqr->status != DASD_CQR_QUEUED)
                return;
-       /* when device is stopped, return request to previous layer
-        * exception: only the disconnect or unresumed bits are set and the
-        * cqr is a path verification request
-        */
-       if (device->stopped &&
-           !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
-             && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
+       /* if device is not usable return request to upper layer */
+       if (__dasd_device_is_unusable(device, cqr)) {
                cqr->intrc = -EAGAIN;
                cqr->status = DASD_CQR_CLEARED;
                dasd_schedule_device_bh(device);
index a2597e683e790237d45db2dfd4b4a5ad655d85e9..ee3a6faae22a0b07a8eb8c40684e9d36c825bcfe 100644 (file)
@@ -699,7 +699,8 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
                                               struct dasd_device, alias_list);
        spin_unlock_irqrestore(&lcu->lock, flags);
        alias_priv = (struct dasd_eckd_private *) alias_device->private;
-       if ((alias_priv->count < private->count) && !alias_device->stopped)
+       if ((alias_priv->count < private->count) && !alias_device->stopped &&
+           !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
                return alias_device;
        else
                return NULL;
index aeed7969fd792ba35f89cbf3b2f26e357b20de16..7bc6df3100efa8e507641172491b9d0039b9d6dd 100644 (file)
@@ -7,6 +7,7 @@
 #define KMSG_COMPONENT "sclp_early"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/errno.h>
 #include <asm/ctl_reg.h>
 #include <asm/sclp.h>
 #include <asm/ipl.h>
index 08f1830cbfc4020e5901b9ae1f12fae518f29ac5..01bf1f5cf2e95a7f40f722f51e4df87ea257e724 100644 (file)
@@ -54,6 +54,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
                   "Copyright IBM Corp. 2001, 2012");
 MODULE_LICENSE("GPL");
 
+static int zcrypt_hwrng_seed = 1;
+module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
+MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
+
 static DEFINE_SPINLOCK(zcrypt_device_lock);
 static LIST_HEAD(zcrypt_device_list);
 static int zcrypt_device_count = 0;
@@ -1373,6 +1377,7 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
 static struct hwrng zcrypt_rng_dev = {
        .name           = "zcrypt",
        .data_read      = zcrypt_rng_data_read,
+       .quality        = 990,
 };
 
 static int zcrypt_rng_device_add(void)
@@ -1387,6 +1392,8 @@ static int zcrypt_rng_device_add(void)
                        goto out;
                }
                zcrypt_rng_buffer_index = 0;
+               if (!zcrypt_hwrng_seed)
+                       zcrypt_rng_dev.quality = 0;
                rc = hwrng_register(&zcrypt_rng_dev);
                if (rc)
                        goto out_free;
index a85292b1d09d090261f89f2f6eed018f6277abb9..e3cd3ece44121c7d8ee3806a40a42acf069f6958 100644 (file)
@@ -203,7 +203,7 @@ static ssize_t srp_show_tmo(char *buf, int tmo)
        return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
 }
 
-static int srp_parse_tmo(int *tmo, const char *buf)
+int srp_parse_tmo(int *tmo, const char *buf)
 {
        int res = 0;
 
@@ -214,6 +214,7 @@ static int srp_parse_tmo(int *tmo, const char *buf)
 
        return res;
 }
+EXPORT_SYMBOL(srp_parse_tmo);
 
 static ssize_t show_reconnect_delay(struct device *dev,
                                    struct device_attribute *attr, char *buf)
index 653faabb07f46b70f4d414519ae6093e2d52e5cf..d3d558ba4da7966de9699aeb98b2630bd7c6854e 100644 (file)
@@ -862,12 +862,11 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
  * whether or not a lock was successfully freed by testing the return
  * value for -ENOENT.
  */
-static int flock_lock_file(struct file *filp, struct file_lock *request)
+static int flock_lock_inode(struct inode *inode, struct file_lock *request)
 {
        struct file_lock *new_fl = NULL;
        struct file_lock *fl;
        struct file_lock_context *ctx;
-       struct inode *inode = file_inode(filp);
        int error = 0;
        bool found = false;
        LIST_HEAD(dispose);
@@ -890,7 +889,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
                goto find_conflict;
 
        list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
-               if (filp != fl->fl_file)
+               if (request->fl_file != fl->fl_file)
                        continue;
                if (request->fl_type == fl->fl_type)
                        goto out;
@@ -1164,20 +1163,19 @@ int posix_lock_file(struct file *filp, struct file_lock *fl,
 EXPORT_SYMBOL(posix_lock_file);
 
 /**
- * posix_lock_file_wait - Apply a POSIX-style lock to a file
- * @filp: The file to apply the lock to
+ * posix_lock_inode_wait - Apply a POSIX-style lock to a file
+ * @inode: inode of file to which lock request should be applied
  * @fl: The lock to be applied
  *
- * Add a POSIX style lock to a file.
- * We merge adjacent & overlapping locks whenever possible.
- * POSIX locks are sorted by owner task, then by starting address
+ * Variant of posix_lock_file_wait that does not take a filp, and so can be
+ * used after the filp has already been torn down.
  */
-int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
 {
        int error;
        might_sleep ();
        for (;;) {
-               error = posix_lock_file(filp, fl, NULL);
+               error = __posix_lock_file(inode, fl, NULL);
                if (error != FILE_LOCK_DEFERRED)
                        break;
                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1189,7 +1187,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
        }
        return error;
 }
-EXPORT_SYMBOL(posix_lock_file_wait);
+EXPORT_SYMBOL(posix_lock_inode_wait);
 
 /**
  * locks_mandatory_locked - Check for an active lock
@@ -1851,18 +1849,18 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
 }
 
 /**
- * flock_lock_file_wait - Apply a FLOCK-style lock to a file
- * @filp: The file to apply the lock to
+ * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
+ * @inode: inode of the file to apply to
  * @fl: The lock to be applied
  *
- * Add a FLOCK style lock to a file.
+ * Apply a FLOCK style lock request to an inode.
  */
-int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
+int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
 {
        int error;
        might_sleep();
        for (;;) {
-               error = flock_lock_file(filp, fl);
+               error = flock_lock_inode(inode, fl);
                if (error != FILE_LOCK_DEFERRED)
                        break;
                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1874,8 +1872,7 @@ int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
        }
        return error;
 }
-
-EXPORT_SYMBOL(flock_lock_file_wait);
+EXPORT_SYMBOL(flock_lock_inode_wait);
 
 /**
  *     sys_flock: - flock() system call.
@@ -2401,7 +2398,8 @@ locks_remove_flock(struct file *filp)
                .fl_type = F_UNLCK,
                .fl_end = OFFSET_MAX,
        };
-       struct file_lock_context *flctx = file_inode(filp)->i_flctx;
+       struct inode *inode = file_inode(filp);
+       struct file_lock_context *flctx = inode->i_flctx;
 
        if (list_empty(&flctx->flc_flock))
                return;
@@ -2409,7 +2407,7 @@ locks_remove_flock(struct file *filp)
        if (filp->f_op->flock)
                filp->f_op->flock(filp, F_SETLKW, &fl);
        else
-               flock_lock_file(filp, &fl);
+               flock_lock_inode(inode, &fl);
 
        if (fl.fl_ops && fl.fl_ops->fl_release_private)
                fl.fl_ops->fl_release_private(&fl);
index 6f228b5af819ea576240c40869c1da74d823e460..8bee93469617eeddffa0e56038deed9051221420 100644 (file)
@@ -5439,15 +5439,15 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *
        return err;
 }
 
-static int do_vfs_lock(struct file *file, struct file_lock *fl)
+static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
 {
        int res = 0;
        switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
                case FL_POSIX:
-                       res = posix_lock_file_wait(file, fl);
+                       res = posix_lock_inode_wait(inode, fl);
                        break;
                case FL_FLOCK:
-                       res = flock_lock_file_wait(file, fl);
+                       res = flock_lock_inode_wait(inode, fl);
                        break;
                default:
                        BUG();
@@ -5484,7 +5484,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
        atomic_inc(&lsp->ls_count);
        /* Ensure we don't close file until we're done freeing locks! */
        p->ctx = get_nfs_open_context(ctx);
-       get_file(fl->fl_file);
        memcpy(&p->fl, fl, sizeof(p->fl));
        p->server = NFS_SERVER(inode);
        return p;
@@ -5496,7 +5495,6 @@ static void nfs4_locku_release_calldata(void *data)
        nfs_free_seqid(calldata->arg.seqid);
        nfs4_put_lock_state(calldata->lsp);
        put_nfs_open_context(calldata->ctx);
-       fput(calldata->fl.fl_file);
        kfree(calldata);
 }
 
@@ -5509,7 +5507,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
        switch (task->tk_status) {
                case 0:
                        renew_lease(calldata->server, calldata->timestamp);
-                       do_vfs_lock(calldata->fl.fl_file, &calldata->fl);
+                       do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
                        if (nfs4_update_lock_stateid(calldata->lsp,
                                        &calldata->res.stateid))
                                break;
@@ -5617,7 +5615,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
        mutex_lock(&sp->so_delegreturn_mutex);
        /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
        down_read(&nfsi->rwsem);
-       if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
+       if (do_vfs_lock(inode, request) == -ENOENT) {
                up_read(&nfsi->rwsem);
                mutex_unlock(&sp->so_delegreturn_mutex);
                goto out;
@@ -5758,7 +5756,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
                                data->timestamp);
                if (data->arg.new_lock) {
                        data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
-                       if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) {
+                       if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
                                rpc_restart_call_prepare(task);
                                break;
                        }
@@ -6000,7 +5998,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
        if (status != 0)
                goto out;
        request->fl_flags |= FL_ACCESS;
-       status = do_vfs_lock(request->fl_file, request);
+       status = do_vfs_lock(state->inode, request);
        if (status < 0)
                goto out;
        down_read(&nfsi->rwsem);
@@ -6008,7 +6006,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
                /* Yes: cache locks! */
                /* ...but avoid races with delegation recall... */
                request->fl_flags = fl_flags & ~FL_SLEEP;
-               status = do_vfs_lock(request->fl_file, request);
+               status = do_vfs_lock(state->inode, request);
                up_read(&nfsi->rwsem);
                goto out;
        }
index a0653e560c2679a2eea870035a55cd3282e47894..cc008c338f5a9bcb66076da96e929d6104697373 100644 (file)
@@ -1046,12 +1046,12 @@ extern void locks_remove_file(struct file *);
 extern void locks_release_private(struct file_lock *);
 extern void posix_test_lock(struct file *, struct file_lock *);
 extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_lock_file_wait(struct file *, struct file_lock *);
+extern int posix_lock_inode_wait(struct inode *, struct file_lock *);
 extern int posix_unblock_lock(struct file_lock *);
 extern int vfs_test_lock(struct file *, struct file_lock *);
 extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
 extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
-extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
+extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
 extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
 extern void lease_get_mtime(struct inode *, struct timespec *time);
 extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
@@ -1137,7 +1137,8 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
        return -ENOLCK;
 }
 
-static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+static inline int posix_lock_inode_wait(struct inode *inode,
+                                       struct file_lock *fl)
 {
        return -ENOLCK;
 }
@@ -1163,8 +1164,8 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
        return 0;
 }
 
-static inline int flock_lock_file_wait(struct file *filp,
-                                      struct file_lock *request)
+static inline int flock_lock_inode_wait(struct inode *inode,
+                                       struct file_lock *request)
 {
        return -ENOLCK;
 }
@@ -1202,6 +1203,20 @@ static inline void show_fd_locks(struct seq_file *f,
                        struct file *filp, struct files_struct *files) {}
 #endif /* !CONFIG_FILE_LOCKING */
 
+static inline struct inode *file_inode(const struct file *f)
+{
+       return f->f_inode;
+}
+
+static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+       return posix_lock_inode_wait(file_inode(filp), fl);
+}
+
+static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+       return flock_lock_inode_wait(file_inode(filp), fl);
+}
 
 struct fasync_struct {
        spinlock_t              fa_lock;
@@ -2011,11 +2026,6 @@ extern void ihold(struct inode * inode);
 extern void iput(struct inode *);
 extern int generic_update_time(struct inode *, struct timespec *, int);
 
-static inline struct inode *file_inode(const struct file *f)
-{
-       return f->f_inode;
-}
-
 /* /sys/fs */
 extern struct kobject *fs_kobj;
 
index 7c68c36d3fd88788f043447c5bed889cdd408bb6..b449f378f995ae647077f521d9f7af3af9480a70 100644 (file)
@@ -282,68 +282,8 @@ void __init parse_early_param(void);
 void __init parse_early_options(char *cmdline);
 #endif /* __ASSEMBLY__ */
 
-/**
- * module_init() - driver initialization entry point
- * @x: function to be run at kernel boot time or module insertion
- * 
- * module_init() will either be called during do_initcalls() (if
- * builtin) or at module insertion time (if a module).  There can only
- * be one per module.
- */
-#define module_init(x) __initcall(x);
-
-/**
- * module_exit() - driver exit entry point
- * @x: function to be run when driver is removed
- * 
- * module_exit() will wrap the driver clean-up code
- * with cleanup_module() when used with rmmod when
- * the driver is a module.  If the driver is statically
- * compiled into the kernel, module_exit() has no effect.
- * There can only be one per module.
- */
-#define module_exit(x) __exitcall(x);
-
 #else /* MODULE */
 
-/*
- * In most cases loadable modules do not need custom
- * initcall levels. There are still some valid cases where
- * a driver may be needed early if built in, and does not
- * matter when built as a loadable module. Like bus
- * snooping debug drivers.
- */
-#define early_initcall(fn)             module_init(fn)
-#define core_initcall(fn)              module_init(fn)
-#define core_initcall_sync(fn)         module_init(fn)
-#define postcore_initcall(fn)          module_init(fn)
-#define postcore_initcall_sync(fn)     module_init(fn)
-#define arch_initcall(fn)              module_init(fn)
-#define subsys_initcall(fn)            module_init(fn)
-#define subsys_initcall_sync(fn)       module_init(fn)
-#define fs_initcall(fn)                        module_init(fn)
-#define fs_initcall_sync(fn)           module_init(fn)
-#define rootfs_initcall(fn)            module_init(fn)
-#define device_initcall(fn)            module_init(fn)
-#define device_initcall_sync(fn)       module_init(fn)
-#define late_initcall(fn)              module_init(fn)
-#define late_initcall_sync(fn)         module_init(fn)
-
-#define console_initcall(fn)           module_init(fn)
-#define security_initcall(fn)          module_init(fn)
-
-/* Each module must use one module_init(). */
-#define module_init(initfn)                                    \
-       static inline initcall_t __inittest(void)               \
-       { return initfn; }                                      \
-       int init_module(void) __attribute__((alias(#initfn)));
-
-/* This is only required if you want to be unloadable. */
-#define module_exit(exitfn)                                    \
-       static inline exitcall_t __exittest(void)               \
-       { return exitfn; }                                      \
-       void cleanup_module(void) __attribute__((alias(#exitfn)));
-
 #define __setup_param(str, unique_id, fn)      /* nothing */
 #define __setup(str, func)                     /* nothing */
 #endif
@@ -351,24 +291,6 @@ void __init parse_early_options(char *cmdline);
 /* Data marked not to be saved by software suspend */
 #define __nosavedata __section(.data..nosave)
 
-/* This means "can be init if no module support, otherwise module load
-   may call it." */
-#ifdef CONFIG_MODULES
-#define __init_or_module
-#define __initdata_or_module
-#define __initconst_or_module
-#define __INIT_OR_MODULE       .text
-#define __INITDATA_OR_MODULE   .data
-#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
-#else
-#define __init_or_module __init
-#define __initdata_or_module __initdata
-#define __initconst_or_module __initconst
-#define __INIT_OR_MODULE __INIT
-#define __INITDATA_OR_MODULE __INITDATA
-#define __INITRODATA_OR_MODULE __INITRODATA
-#endif /*CONFIG_MODULES*/
-
 #ifdef MODULE
 #define __exit_p(x) x
 #else
index 9564fd78c547b6128ddf8304639e2215b6190e3f..05e99b8ef465bcc10ca979b26b68277191951d86 100644 (file)
@@ -734,6 +734,24 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
        return false;
 }
 #endif
+#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
+void kvm_arch_start_assignment(struct kvm *kvm);
+void kvm_arch_end_assignment(struct kvm *kvm);
+bool kvm_arch_has_assigned_device(struct kvm *kvm);
+#else
+static inline void kvm_arch_start_assignment(struct kvm *kvm)
+{
+}
+
+static inline void kvm_arch_end_assignment(struct kvm *kvm)
+{
+}
+
+static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
+{
+       return false;
+}
+#endif
 
 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
 {
index d67b1932cc59869cd5c3dc5d24efa5994b368386..3a19c79918e02d37c3e77ed32a88eb9f36bb16c3 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/compiler.h>
 #include <linux/cache.h>
 #include <linux/kmod.h>
+#include <linux/init.h>
 #include <linux/elf.h>
 #include <linux/stringify.h>
 #include <linux/kobject.h>
@@ -71,6 +72,89 @@ extern struct module_attribute module_uevent;
 extern int init_module(void);
 extern void cleanup_module(void);
 
+#ifndef MODULE
+/**
+ * module_init() - driver initialization entry point
+ * @x: function to be run at kernel boot time or module insertion
+ *
+ * module_init() will either be called during do_initcalls() (if
+ * builtin) or at module insertion time (if a module).  There can only
+ * be one per module.
+ */
+#define module_init(x) __initcall(x);
+
+/**
+ * module_exit() - driver exit entry point
+ * @x: function to be run when driver is removed
+ *
+ * module_exit() will wrap the driver clean-up code
+ * with cleanup_module() when used with rmmod when
+ * the driver is a module.  If the driver is statically
+ * compiled into the kernel, module_exit() has no effect.
+ * There can only be one per module.
+ */
+#define module_exit(x) __exitcall(x);
+
+#else /* MODULE */
+
+/*
+ * In most cases loadable modules do not need custom
+ * initcall levels. There are still some valid cases where
+ * a driver may be needed early if built in, and does not
+ * matter when built as a loadable module. Like bus
+ * snooping debug drivers.
+ */
+#define early_initcall(fn)             module_init(fn)
+#define core_initcall(fn)              module_init(fn)
+#define core_initcall_sync(fn)         module_init(fn)
+#define postcore_initcall(fn)          module_init(fn)
+#define postcore_initcall_sync(fn)     module_init(fn)
+#define arch_initcall(fn)              module_init(fn)
+#define subsys_initcall(fn)            module_init(fn)
+#define subsys_initcall_sync(fn)       module_init(fn)
+#define fs_initcall(fn)                        module_init(fn)
+#define fs_initcall_sync(fn)           module_init(fn)
+#define rootfs_initcall(fn)            module_init(fn)
+#define device_initcall(fn)            module_init(fn)
+#define device_initcall_sync(fn)       module_init(fn)
+#define late_initcall(fn)              module_init(fn)
+#define late_initcall_sync(fn)         module_init(fn)
+
+#define console_initcall(fn)           module_init(fn)
+#define security_initcall(fn)          module_init(fn)
+
+/* Each module must use one module_init(). */
+#define module_init(initfn)                                    \
+       static inline initcall_t __inittest(void)               \
+       { return initfn; }                                      \
+       int init_module(void) __attribute__((alias(#initfn)));
+
+/* This is only required if you want to be unloadable. */
+#define module_exit(exitfn)                                    \
+       static inline exitcall_t __exittest(void)               \
+       { return exitfn; }                                      \
+       void cleanup_module(void) __attribute__((alias(#exitfn)));
+
+#endif
+
+/* This means "can be init if no module support, otherwise module load
+   may call it." */
+#ifdef CONFIG_MODULES
+#define __init_or_module
+#define __initdata_or_module
+#define __initconst_or_module
+#define __INIT_OR_MODULE       .text
+#define __INITDATA_OR_MODULE   .data
+#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
+#else
+#define __init_or_module __init
+#define __initdata_or_module __initdata
+#define __initconst_or_module __initconst
+#define __INIT_OR_MODULE __INIT
+#define __INITDATA_OR_MODULE __INITDATA
+#define __INITRODATA_OR_MODULE __INITRODATA
+#endif /*CONFIG_MODULES*/
+
 /* Archs provide a method of finding the correct exception table. */
 struct exception_table_entry;
 
index 986fddb085796035a44c69e48779ec84393415f8..b0f898e3b2e733307100cd4cf80159bc88972b6d 100644 (file)
@@ -1745,6 +1745,7 @@ struct ib_device {
        char                         node_desc[64];
        __be64                       node_guid;
        u32                          local_dma_lkey;
+       u16                          is_switch:1;
        u8                           node_type;
        u8                           phys_port_cnt;
 
@@ -1823,6 +1824,20 @@ int ib_query_port(struct ib_device *device,
 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
                                               u8 port_num);
 
+/**
+ * rdma_cap_ib_switch - Check if the device is IB switch
+ * @device: Device to check
+ *
+ * Device driver is responsible for setting is_switch bit on
+ * in ib_device structure at init time.
+ *
+ * Return: true if the device is IB switch.
+ */
+static inline bool rdma_cap_ib_switch(const struct ib_device *device)
+{
+       return device->is_switch;
+}
+
 /**
  * rdma_start_port - Return the first valid port number for the device
  * specified
@@ -1833,7 +1848,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
  */
 static inline u8 rdma_start_port(const struct ib_device *device)
 {
-       return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
+       return rdma_cap_ib_switch(device) ? 0 : 1;
 }
 
 /**
@@ -1846,8 +1861,7 @@ static inline u8 rdma_start_port(const struct ib_device *device)
  */
 static inline u8 rdma_end_port(const struct ib_device *device)
 {
-       return (device->node_type == RDMA_NODE_IB_SWITCH) ?
-               0 : device->phys_port_cnt;
+       return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
 }
 
 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
index cdb05dd1d4401134fcef914cefaa1338eb629437..d40d3ef25707bd7979a36d9e6e094a394d9f915e 100644 (file)
@@ -119,6 +119,7 @@ extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
 extern void srp_rport_del(struct srp_rport *);
 extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
                         int dev_loss_tmo);
+int srp_parse_tmo(int *tmo, const char *buf);
 extern int srp_reconnect_rport(struct srp_rport *rport);
 extern void srp_start_tl_fail_timers(struct srp_rport *rport);
 extern void srp_remove_host(struct Scsi_Host *);
index f060716b02ae25b45494b0f350df6fe2fc0f369b..74bde81601a902759e5714b04a5a987ddbde2d15 100644 (file)
@@ -444,6 +444,7 @@ enum {
 
        TRACE_CONTROL_BIT,
 
+       TRACE_BRANCH_BIT,
 /*
  * Abuse of the trace_recursion.
  * As we need a way to maintain state if we are tracing the function
index a87b43f49eb448afa3f1cc2bc44763d71f06dd52..e2e12ad3186f440f7841819a9f260509d5afde43 100644 (file)
@@ -36,9 +36,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        struct trace_branch *entry;
        struct ring_buffer *buffer;
        unsigned long flags;
-       int cpu, pc;
+       int pc;
        const char *p;
 
+       if (current->trace_recursion & TRACE_BRANCH_BIT)
+               return;
+
        /*
         * I would love to save just the ftrace_likely_data pointer, but
         * this code can also be used by modules. Ugly things can happen
@@ -49,10 +52,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        if (unlikely(!tr))
                return;
 
-       local_irq_save(flags);
-       cpu = raw_smp_processor_id();
-       data = per_cpu_ptr(tr->trace_buffer.data, cpu);
-       if (atomic_inc_return(&data->disabled) != 1)
+       raw_local_irq_save(flags);
+       current->trace_recursion |= TRACE_BRANCH_BIT;
+       data = this_cpu_ptr(tr->trace_buffer.data);
+       if (atomic_read(&data->disabled))
                goto out;
 
        pc = preempt_count();
@@ -81,8 +84,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
                __buffer_unlock_commit(buffer, event);
 
  out:
-       atomic_dec(&data->disabled);
-       local_irq_restore(flags);
+       current->trace_recursion &= ~TRACE_BRANCH_BIT;
+       raw_local_irq_restore(flags);
 }
 
 static inline
index 273b8bff6ba448aa013932f5ac7c9f929f49aa70..657ba9f5d30862a1f8add1302e8364bcd4ad44dc 100644 (file)
@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
        }
 
        ibmr = rds_ib_alloc_fmr(rds_ibdev);
-       if (IS_ERR(ibmr))
+       if (IS_ERR(ibmr)) {
+               rds_ib_dev_put(rds_ibdev);
                return ibmr;
+       }
 
        ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
        if (ret == 0)
index 620e37f741b868a231a414a71511cd8872704a3c..1dd087da6f31ae2f38c70042213ee6e5159cee10 100644 (file)
@@ -155,6 +155,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
                list_add_tail(&kvg->node, &kv->group_list);
                kvg->vfio_group = vfio_group;
 
+               kvm_arch_start_assignment(dev->kvm);
+
                mutex_unlock(&kv->lock);
 
                kvm_vfio_update_coherency(dev);
@@ -190,6 +192,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
                        break;
                }
 
+               kvm_arch_end_assignment(dev->kvm);
+
                mutex_unlock(&kv->lock);
 
                kvm_vfio_group_put_external_user(vfio_group);
@@ -239,6 +243,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
                kvm_vfio_group_put_external_user(kvg->vfio_group);
                list_del(&kvg->node);
                kfree(kvg);
+               kvm_arch_end_assignment(dev->kvm);
        }
 
        kvm_vfio_update_coherency(dev);