]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag 'regulator-fix-v4.2-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 24 Jul 2015 20:14:06 +0000 (13:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 24 Jul 2015 20:14:06 +0000 (13:14 -0700)
Pull regulator fixes from Mark Brown:
 "As well as some driver specific fixes there's several fixes here for
  the core support for regulators supplying other regulators fixing both
  an issue with ACPI support (which had never been tested before) and
  some error handling and device removal issues that Javier noticed"

* tag 'regulator-fix-v4.2-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator:
  regulator: core: Fix memory leak in regulator_resolve_supply()
  regulator: core: Increase refcount for regulator supply's module
  regulator: core: Handle full constraints systems when resolving supplies
  regulator: 88pm800: fix LDO vsel_mask value
  regulator: max8973: Fix up control flag option for bias control
  regulator: s2mps11: Fix GPIO suspend enable shift wrapping bug

147 files changed:
Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
MAINTAINERS
arch/arm/net/bpf_jit_32.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/irq.c
arch/avr32/kernel/time.c
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/entry.S
arch/s390/kernel/traps.c
arch/tile/kernel/setup.c
arch/x86/include/uapi/asm/kvm.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mtrr.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.h
drivers/bluetooth/btbcm.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/intel-iommu.c
drivers/isdn/gigaset/ser-gigaset.c
drivers/net/bonding/bond_main.c
drivers/net/can/at91_can.c
drivers/net/can/bfin_can.c
drivers/net/can/cc770/cc770.c
drivers/net/can/flexcan.c
drivers/net/can/grcan.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/slcan.c
drivers/net/can/spi/mcp251x.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/peak_usb/pcan_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/can/usb/usb_8dev.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ipvlan/ipvlan.h
drivers/net/ipvlan/ipvlan_core.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/phy/dp83867.c
drivers/net/phy/mdio_bus.c
drivers/net/usb/qmi_wwan.c
drivers/net/virtio_net.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/iwlwifi/iwl-fh.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/xen-netback/netback.c
drivers/pinctrl/bcm/pinctrl-bcm2835.c
drivers/pinctrl/freescale/pinctrl-imx1-core.c
drivers/pinctrl/nomadik/pinctrl-abx500.c
drivers/pinctrl/pinctrl-lpc18xx.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/samsung/pinctrl-samsung.c
drivers/pinctrl/sh-pfc/sh_pfc.h
drivers/s390/Makefile
drivers/s390/virtio/Makefile [moved from drivers/s390/kvm/Makefile with 100% similarity]
drivers/s390/virtio/kvm_virtio.c [moved from drivers/s390/kvm/kvm_virtio.c with 100% similarity]
drivers/s390/virtio/virtio_ccw.c [moved from drivers/s390/kvm/virtio_ccw.c with 100% similarity]
drivers/scsi/virtio_scsi.c
drivers/spi/Kconfig
drivers/spi/spi-img-spfi.c
drivers/spi/spi-imx.c
drivers/spi/spi-zynqmp-gqspi.c
drivers/spi/spidev.c
drivers/vhost/vhost.c
fs/namespace.c
fs/notify/mark.c
fs/pnode.h
fs/udf/inode.c
include/net/cfg80211.h
include/net/ip.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/i915_drm.h
include/uapi/linux/virtio_net.h
include/uapi/linux/virtio_pci.h
include/uapi/linux/virtio_ring.h
net/9p/trans_virtio.c
net/ax25/ax25_subr.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/caif/caif_socket.c
net/core/datagram.c
net/core/dst.c
net/core/rtnetlink.c
net/ipv4/datagram.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_fragment.c
net/ipv4/tcp_input.c
net/ipv6/datagram.c
net/ipv6/ip6_offload.c
net/mac80211/debugfs_netdev.c
net/mac80211/iface.c
net/mac80211/mesh_plink.c
net/mac80211/pm.c
net/mac80211/tdls.c
net/mac80211/tx.c
net/netlink/af_netlink.c
net/openvswitch/flow_table.c
net/sched/act_bpf.c
net/sched/cls_bpf.c
net/sched/cls_flow.c
net/sched/cls_flower.c
net/sched/sch_fq_codel.c
net/sched/sch_sfq.c
net/wireless/chan.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/trace.h
samples/trace_events/trace-events-sample.h
sound/core/pcm_native.c
sound/hda/hdac_i915.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/sparc/amd7930.c

index c03eec1168721bdb1c851f7c42eba34ec48f4ba7..3443e0f838dfc8a53e548527a05cf9892f2c2a92 100644 (file)
@@ -35,3 +35,6 @@ the PCIe specification.
 
                       NOTE: this only applies to the SMMU itself, not
                       masters connected upstream of the SMMU.
+
+- hisilicon,broken-prefetch-cmd
+                    : Avoid sending CMD_PREFETCH_* commands to the SMMU.
index a2264167791acd0d309b1097937ad1ba8aa36cb8..2eb627252239f6d71590636ab0068fe42d87f311 100644 (file)
@@ -5899,7 +5899,6 @@ S:        Supported
 F:     Documentation/s390/kvm.txt
 F:     arch/s390/include/asm/kvm*
 F:     arch/s390/kvm/
-F:     drivers/s390/kvm/
 
 KERNEL VIRTUAL MACHINE (KVM) FOR ARM
 M:     Christoffer Dall <christoffer.dall@linaro.org>
@@ -10896,6 +10895,15 @@ F:     drivers/block/virtio_blk.c
 F:     include/linux/virtio_*.h
 F:     include/uapi/linux/virtio_*.h
 
+VIRTIO DRIVERS FOR S390
+M:     Christian Borntraeger <borntraeger@de.ibm.com>
+M:     Cornelia Huck <cornelia.huck@de.ibm.com>
+L:     linux-s390@vger.kernel.org
+L:     virtualization@lists.linux-foundation.org
+L:     kvm@vger.kernel.org
+S:     Supported
+F:     drivers/s390/virtio/
+
 VIRTIO GPU DRIVER
 M:     David Airlie <airlied@linux.ie>
 M:     Gerd Hoffmann <kraxel@redhat.com>
index 4550d247e308be128b439b0735d853f4745f3618..c011e2296cb1e681863338972c6ce79f836bb6be 100644 (file)
@@ -74,32 +74,52 @@ struct jit_ctx {
 
 int bpf_jit_enable __read_mostly;
 
-static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
+static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
+                     unsigned int size)
+{
+       void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
+
+       if (!ptr)
+               return -EFAULT;
+       memcpy(ret, ptr, size);
+       return 0;
+}
+
+static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
 {
        u8 ret;
        int err;
 
-       err = skb_copy_bits(skb, offset, &ret, 1);
+       if (offset < 0)
+               err = call_neg_helper(skb, offset, &ret, 1);
+       else
+               err = skb_copy_bits(skb, offset, &ret, 1);
 
        return (u64)err << 32 | ret;
 }
 
-static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
+static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
 {
        u16 ret;
        int err;
 
-       err = skb_copy_bits(skb, offset, &ret, 2);
+       if (offset < 0)
+               err = call_neg_helper(skb, offset, &ret, 2);
+       else
+               err = skb_copy_bits(skb, offset, &ret, 2);
 
        return (u64)err << 32 | ntohs(ret);
 }
 
-static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
+static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
 {
        u32 ret;
        int err;
 
-       err = skb_copy_bits(skb, offset, &ret, 4);
+       if (offset < 0)
+               err = call_neg_helper(skb, offset, &ret, 4);
+       else
+               err = skb_copy_bits(skb, offset, &ret, 4);
 
        return (u64)err << 32 | ntohl(ret);
 }
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
                case BPF_LD | BPF_B | BPF_ABS:
                        load_order = 0;
 load:
-                       /* the interpreter will deal with the negative K */
-                       if ((int)k < 0)
-                               return -ENOTSUPP;
                        emit_mov_i(r_off, k, ctx);
 load_common:
                        ctx->seen |= SEEN_DATA | SEEN_CALL;
@@ -547,12 +564,24 @@ static int build_body(struct jit_ctx *ctx)
                                emit(ARM_SUB_I(r_scratch, r_skb_hl,
                                               1 << load_order), ctx);
                                emit(ARM_CMP_R(r_scratch, r_off), ctx);
-                               condt = ARM_COND_HS;
+                               condt = ARM_COND_GE;
                        } else {
                                emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
                                condt = ARM_COND_HI;
                        }
 
+                       /*
+                        * test for negative offset, only if we are
+                        * currently scheduled to take the fast
+                        * path. this will update the flags so that
+                        * the slowpath instruction are ignored if the
+                        * offset is negative.
+                        *
+                        * for loard_order == 0 the HI condition will
+                        * make loads at offset 0 take the slow path too.
+                        */
+                       _emit(condt, ARM_CMP_I(r_off, 0), ctx);
+
                        _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
                              ctx);
 
@@ -860,9 +889,11 @@ static int build_body(struct jit_ctx *ctx)
                        off = offsetof(struct sk_buff, vlan_tci);
                        emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
                        if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
-                               OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
-                       else
-                               OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
+                               OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
+                       else {
+                               OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
+                               OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
+                       }
                        break;
                case BPF_ANC | SKF_AD_QUEUE:
                        ctx->seen |= SEEN_SKB;
index f860bfda454afb9de7007684ba60eaf7f375e130..e16351819fed9ada8575f117c4777232da0d72b9 100644 (file)
@@ -585,7 +585,8 @@ ENDPROC(el0_irq)
  *
  */
 ENTRY(cpu_switch_to)
-       add     x8, x0, #THREAD_CPU_CONTEXT
+       mov     x10, #THREAD_CPU_CONTEXT
+       add     x8, x0, x10
        mov     x9, sp
        stp     x19, x20, [x8], #16             // store callee-saved registers
        stp     x21, x22, [x8], #16
@@ -594,7 +595,7 @@ ENTRY(cpu_switch_to)
        stp     x27, x28, [x8], #16
        stp     x29, x9, [x8], #16
        str     lr, [x8]
-       add     x8, x1, #THREAD_CPU_CONTEXT
+       add     x8, x1, x10
        ldp     x19, x20, [x8], #16             // restore callee-saved registers
        ldp     x21, x22, [x8], #16
        ldp     x23, x24, [x8], #16
index 240b75c0e94fdc435a672b740b08d4a848915295..463fa2e7e34c10ba56f2cd4a10e208062feff3d9 100644 (file)
@@ -61,7 +61,7 @@ void __init init_IRQ(void)
 static bool migrate_one_irq(struct irq_desc *desc)
 {
        struct irq_data *d = irq_desc_get_irq_data(desc);
-       const struct cpumask *affinity = d->affinity;
+       const struct cpumask *affinity = irq_data_get_affinity_mask(d);
        struct irq_chip *c;
        bool ret = false;
 
@@ -81,7 +81,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
        if (!c->irq_set_affinity)
                pr_debug("IRQ%u: unable to set affinity\n", d->irq);
        else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
-               cpumask_copy(d->affinity, affinity);
+               cpumask_copy(irq_data_get_affinity_mask(d), affinity);
 
        return ret;
 }
index d0f771be9e96eda02c1045bbb5702cc9b9f74b8d..a124c55733dbf7dc19e61aacbf74671ff149de83 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <mach/pm.h>
 
+static bool disable_cpu_idle_poll;
 
 static cycle_t read_cycle_count(struct clocksource *cs)
 {
@@ -80,45 +81,45 @@ static int comparator_next_event(unsigned long delta,
        return 0;
 }
 
-static void comparator_mode(enum clock_event_mode mode,
-               struct clock_event_device *evdev)
+static int comparator_shutdown(struct clock_event_device *evdev)
 {
-       switch (mode) {
-       case CLOCK_EVT_MODE_ONESHOT:
-               pr_debug("%s: start\n", evdev->name);
-               /* FALLTHROUGH */
-       case CLOCK_EVT_MODE_RESUME:
+       pr_debug("%s: %s\n", __func__, evdev->name);
+       sysreg_write(COMPARE, 0);
+
+       if (disable_cpu_idle_poll) {
+               disable_cpu_idle_poll = false;
                /*
-                * If we're using the COUNT and COMPARE registers we
-                * need to force idle poll.
+                * Only disable idle poll if we have forced that
+                * in a previous call.
                 */
-               cpu_idle_poll_ctrl(true);
-               break;
-       case CLOCK_EVT_MODE_UNUSED:
-       case CLOCK_EVT_MODE_SHUTDOWN:
-               sysreg_write(COMPARE, 0);
-               pr_debug("%s: stop\n", evdev->name);
-               if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
-                   evdev->mode == CLOCK_EVT_MODE_RESUME) {
-                       /*
-                        * Only disable idle poll if we have forced that
-                        * in a previous call.
-                        */
-                       cpu_idle_poll_ctrl(false);
-               }
-               break;
-       default:
-               BUG();
+               cpu_idle_poll_ctrl(false);
        }
+       return 0;
+}
+
+static int comparator_set_oneshot(struct clock_event_device *evdev)
+{
+       pr_debug("%s: %s\n", __func__, evdev->name);
+
+       disable_cpu_idle_poll = true;
+       /*
+        * If we're using the COUNT and COMPARE registers we
+        * need to force idle poll.
+        */
+       cpu_idle_poll_ctrl(true);
+
+       return 0;
 }
 
 static struct clock_event_device comparator = {
-       .name           = "avr32_comparator",
-       .features       = CLOCK_EVT_FEAT_ONESHOT,
-       .shift          = 16,
-       .rating         = 50,
-       .set_next_event = comparator_next_event,
-       .set_mode       = comparator_mode,
+       .name                   = "avr32_comparator",
+       .features               = CLOCK_EVT_FEAT_ONESHOT,
+       .shift                  = 16,
+       .rating                 = 50,
+       .set_next_event         = comparator_next_event,
+       .set_state_shutdown     = comparator_shutdown,
+       .set_state_oneshot      = comparator_set_oneshot,
+       .tick_resume            = comparator_set_oneshot,
 };
 
 void read_persistent_clock(struct timespec *ts)
index c7d1b9d0901147ba359fa03e2e4e026699413fa2..a2da259d932741614c4f6b78642c491b3829b223 100644 (file)
 
 int main(void)
 {
-       DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
-       DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
-       DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
-       BLANK();
+       DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack));
+       DEFINE(__TASK_thread, offsetof(struct task_struct, thread));
        DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
        BLANK();
-       DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause));
-       DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address));
-       DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
+       DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
+       DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
+       DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
+       DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
+       DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
        BLANK();
        DEFINE(__TI_task, offsetof(struct thread_info, task));
        DEFINE(__TI_flags, offsetof(struct thread_info, flags));
@@ -176,7 +176,6 @@ int main(void)
        DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
        DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
        DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
-       DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
        DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
        DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
        DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
index 3238893c9d4ff66d492ce2a04421b85fa0cc0c34..84062e7a77dad75c50fa60822255ee2b20b34181 100644 (file)
@@ -178,17 +178,21 @@ _PIF_WORK = (_PIF_PER_TRAP)
  */
 ENTRY(__switch_to)
        stmg    %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
-       stg     %r15,__THREAD_ksp(%r2)          # store kernel stack of prev
-       lg      %r4,__THREAD_info(%r2)          # get thread_info of prev
-       lg      %r5,__THREAD_info(%r3)          # get thread_info of next
+       lgr     %r1,%r2
+       aghi    %r1,__TASK_thread               # thread_struct of prev task
+       lg      %r4,__TASK_thread_info(%r2)     # get thread_info of prev
+       lg      %r5,__TASK_thread_info(%r3)     # get thread_info of next
+       stg     %r15,__THREAD_ksp(%r1)          # store kernel stack of prev
+       lgr     %r1,%r3
+       aghi    %r1,__TASK_thread               # thread_struct of next task
        lgr     %r15,%r5
        aghi    %r15,STACK_INIT                 # end of kernel stack of next
        stg     %r3,__LC_CURRENT                # store task struct of next
        stg     %r5,__LC_THREAD_INFO            # store thread info of next
        stg     %r15,__LC_KERNEL_STACK          # store end of kernel stack
+       lg      %r15,__THREAD_ksp(%r1)          # load kernel stack of next
        lctl    %c4,%c4,__TASK_pid(%r3)         # load pid to control reg. 4
        mvc     __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
-       lg      %r15,__THREAD_ksp(%r3)          # load kernel stack of next
        lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
        br      %r14
 
@@ -417,6 +421,7 @@ ENTRY(pgm_check_handler)
        LAST_BREAK %r14
        lg      %r15,__LC_KERNEL_STACK
        lg      %r14,__TI_task(%r12)
+       aghi    %r14,__TASK_thread      # pointer to thread_struct
        lghi    %r13,__LC_PGM_TDB
        tm      __LC_PGM_ILC+2,0x02     # check for transaction abort
        jz      2f
index 4d96c9f5345538471cea4cd62620439c88a7f867..7bea81d8a3635025b0f372c3f908ade71b188e1b 100644 (file)
@@ -259,7 +259,7 @@ void vector_exception(struct pt_regs *regs)
        }
 
        /* get vector interrupt code from fpc */
-       asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
+       asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
        vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
        switch (vic) {
        case 1: /* invalid vector operation */
@@ -297,7 +297,7 @@ void data_exception(struct pt_regs *regs)
 
        location = get_trap_ip(regs);
 
-       asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
+       asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
        /* Check for vector register enablement */
        if (MACHINE_HAS_VX && !current->thread.vxrs &&
            (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
index 99c9ff87e0187502179f8012afa5bd54309d2eb9..6b755d125783f45b05de02e3fe5f7b5e32d1af45 100644 (file)
@@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void)
 
 void __init free_initrd_mem(unsigned long begin, unsigned long end)
 {
-       free_bootmem(__pa(begin), end - begin);
+       free_bootmem_late(__pa(begin), end - begin);
 }
 
 static int __init setup_initrd(char *str)
index a4ae82eb82aa8485146aee83376ba26fb9306afe..cd54147cb365fb3622121c782eee8e7fb3ddcc77 100644 (file)
@@ -354,7 +354,7 @@ struct kvm_xcrs {
 struct kvm_sync_regs {
 };
 
-#define KVM_QUIRK_LINT0_REENABLED      (1 << 0)
-#define KVM_QUIRK_CD_NW_CLEARED                (1 << 1)
+#define KVM_X86_QUIRK_LINT0_REENABLED  (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED    (1 << 1)
 
 #endif /* _ASM_X86_KVM_H */
index 954e98a8c2e38bf9861d4d6349eb721264e8cb18..2a5ca97c263bb48092ea80f88c7d30120ea63b6e 100644 (file)
@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        for (i = 0; i < APIC_LVT_NUM; i++)
                apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
        apic_update_lvtt(apic);
-       if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED))
+       if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
                apic_set_reg(apic, APIC_LVT0,
                             SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
        apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
index de1d2d8062e24048232af909d684f0c2ed9e21bc..dc0a84a6f3094ac997701de74c868763e6843229 100644 (file)
@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
        return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
 }
 
+static u8 mtrr_disabled_type(void)
+{
+       /*
+        * Intel SDM 11.11.2.2: all MTRRs are disabled when
+        * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
+        * memory type is applied to all of physical memory.
+        */
+       return MTRR_TYPE_UNCACHABLE;
+}
+
 /*
 * Three terms are used in the following code:
 * - segment, it indicates the address segments covered by fixed MTRRs.
@@ -434,6 +444,8 @@ struct mtrr_iter {
 
        /* output fields. */
        int mem_type;
+       /* mtrr is completely disabled? */
+       bool mtrr_disabled;
        /* [start, end) is not fully covered in MTRRs? */
        bool partial_map;
 
@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter)
 static void mtrr_lookup_start(struct mtrr_iter *iter)
 {
        if (!mtrr_is_enabled(iter->mtrr_state)) {
-               iter->partial_map = true;
+               iter->mtrr_disabled = true;
                return;
        }
 
@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter,
        iter->mtrr_state = mtrr_state;
        iter->start = start;
        iter->end = end;
+       iter->mtrr_disabled = false;
        iter->partial_map = false;
        iter->fixed = false;
        iter->range = NULL;
@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
                return MTRR_TYPE_WRBACK;
        }
 
-       /* It is not covered by MTRRs. */
-       if (iter.partial_map) {
-               /*
-                * We just check one page, partially covered by MTRRs is
-                * impossible.
-                */
-               WARN_ON(type != -1);
-               type = mtrr_default_type(mtrr_state);
-       }
+       if (iter.mtrr_disabled)
+               return mtrr_disabled_type();
+
+       /*
+        * We just check one page, partially covered by MTRRs is
+        * impossible.
+        */
+       WARN_ON(iter.partial_map);
+
+       /* not contained in any MTRRs. */
+       if (type == -1)
+               return mtrr_default_type(mtrr_state);
+
        return type;
 }
 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
                        return false;
        }
 
+       if (iter.mtrr_disabled)
+               return true;
+
        if (!iter.partial_map)
                return true;
 
index bbc678a66b18719287b091787db96cf1f9f98981..8e0c0844c6b9681e31e64bdeba0f1822108bec3e 100644 (file)
@@ -1672,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
         * does not do it - this results in some delay at
         * reboot
         */
-       if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED))
+       if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
                cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
        svm->vmcb->save.cr0 = cr0;
        mark_dirty(svm->vmcb, VMCB_CR);
index 5b4e9384717a17257ea20ef47031dd5f158273a2..83b7b5cd75d52dd67976274da3c11807f1c35490 100644 (file)
@@ -8650,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
 
        if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
                ipat = VMX_EPT_IPAT_BIT;
-               cache = MTRR_TYPE_UNCACHABLE;
+               if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+                       cache = MTRR_TYPE_WRBACK;
+               else
+                       cache = MTRR_TYPE_UNCACHABLE;
                goto exit;
        }
 
index edc8cdcd786b00627a1029c8bb3b2aedcb291d09..0ca2f3e4803c2a5846be297a51eb5c1cf67aff4a 100644 (file)
@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
        return kvm_register_write(vcpu, reg, val);
 }
 
+static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
+{
+       return !(kvm->arch.disabled_quirks & quirk);
+}
+
 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
index 1e1a4323a71fd304d410e421b488758d2300b34d..9ceb8ac68fdca2518c953cb6a9a36c6fd5b5f268 100644 (file)
@@ -472,12 +472,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)
 
        /* Read Verbose Config Version Info */
        skb = btbcm_read_verbose_config(hdev);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
-
-       BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
-               get_unaligned_le16(skb->data + 5));
-       kfree_skb(skb);
+       if (!IS_ERR(skb)) {
+               BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
+                       get_unaligned_le16(skb->data + 5));
+               kfree_skb(skb);
+       }
 
        set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
 
index 01657830b470a49e8209fd39fa829d4a1fbb3610..e9fde72cf038a991dae93cd00223daa307ed585b 100644 (file)
@@ -1614,6 +1614,9 @@ struct amdgpu_uvd {
 #define AMDGPU_MAX_VCE_HANDLES 16
 #define AMDGPU_VCE_FIRMWARE_OFFSET 256
 
+#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
+#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
+
 struct amdgpu_vce {
        struct amdgpu_bo        *vcpu_bo;
        uint64_t                gpu_addr;
@@ -1626,6 +1629,7 @@ struct amdgpu_vce {
        const struct firmware   *fw;    /* VCE firmware */
        struct amdgpu_ring      ring[AMDGPU_MAX_VCE_RINGS];
        struct amdgpu_irq_src   irq;
+       unsigned                harvest_config;
 };
 
 /*
index 5533434c7a8fad8dd22bab138c84236befe13167..31ad444c6386d9f07c505889d98ab476ca34e6cb 100644 (file)
@@ -459,6 +459,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
                dev_info.vram_type = adev->mc.vram_type;
                dev_info.vram_bit_width = adev->mc.vram_width;
+               dev_info.vce_harvest_config = adev->vce.harvest_config;
 
                return copy_to_user(out, &dev_info,
                                    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
index 1a2d419cbf164e8939cf7e584480dc17c0b93ecb..ace870afc7d45154a6bb6013cb45b773a8e63512 100644 (file)
@@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev)
        amdgpu_free_extended_power_table(adev);
 }
 
+#define ixSMUSVI_NB_CURRENTVID 0xD8230044
+#define CURRENT_NB_VID_MASK 0xff000000
+#define CURRENT_NB_VID__SHIFT 24
+#define ixSMUSVI_GFX_CURRENTVID  0xD8230048
+#define CURRENT_GFX_VID_MASK 0xff000000
+#define CURRENT_GFX_VID__SHIFT 24
+
 static void
 cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
                                               struct seq_file *m)
 {
+       struct cz_power_info *pi = cz_get_pi(adev);
        struct amdgpu_clock_voltage_dependency_table *table =
                &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-       u32 current_index =
-               (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
-               TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
-               TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
-       u32 sclk, tmp;
-       u16 vddc;
-
-       if (current_index >= NUM_SCLK_LEVELS) {
-               seq_printf(m, "invalid dpm profile %d\n", current_index);
+       struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
+               &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+       struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
+               &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+       u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
+                                      TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+       u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+                                     TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+       u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+                                     TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+       u32 sclk, vclk, dclk, ecclk, tmp;
+       u16 vddnb, vddgfx;
+
+       if (sclk_index >= NUM_SCLK_LEVELS) {
+               seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
        } else {
-               sclk = table->entries[current_index].clk;
-               tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
-                       SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
-                       SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
-               vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
-               seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
-                          current_index, sclk, vddc);
+               sclk = table->entries[sclk_index].clk;
+               seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
+       }
+
+       tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
+              CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+       vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+       tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
+              CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+       vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+       seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
+
+       seq_printf(m, "uvd    %sabled\n", pi->uvd_power_gated ? "dis" : "en");
+       if (!pi->uvd_power_gated) {
+               if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+                       seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
+               } else {
+                       vclk = uvd_table->entries[uvd_index].vclk;
+                       dclk = uvd_table->entries[uvd_index].dclk;
+                       seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
+               }
+       }
+
+       seq_printf(m, "vce    %sabled\n", pi->vce_power_gated ? "dis" : "en");
+       if (!pi->vce_power_gated) {
+               if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+                       seq_printf(m, "invalid vce dpm level %d\n", vce_index);
+               } else {
+                       ecclk = vce_table->entries[vce_index].ecclk;
+                       seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
+               }
        }
 }
 
index 6e77964f1b640d6841ca216ab2c7f5eed73077f5..e70a26f587a03f7dde6788f88add8f32a25ad764 100644 (file)
@@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
        struct drm_device *dev = crtc->dev;
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       unsigned type;
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
@@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v10_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v10_0_vga_enable(crtc, false);
+               /* Make sure VBLANK interrupt is still enabled */
+               type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+               amdgpu_irq_update(adev, &adev->crtc_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v10_0_crtc_load_lut(crtc);
                break;
index 7f7abb0e0be53026d98e074d4c0a756484eb3e1b..dcb402ee048a602ecf03aade115a872212781811 100644 (file)
@@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
        struct drm_device *dev = crtc->dev;
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       unsigned type;
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
@@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v11_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v11_0_vga_enable(crtc, false);
+               /* Make sure VBLANK interrupt is still enabled */
+               type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+               amdgpu_irq_update(adev, &adev->crtc_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v11_0_crtc_load_lut(crtc);
                break;
index d62c4002e39cc7acc5a3427d0aed92bedf12c984..d1064ca3670ec31376293950c76452414e94ff23 100644 (file)
@@ -35,6 +35,8 @@
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 #include "gca/gfx_8_0_d.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT    0x04
 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK      0x10
@@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (idx = 0; idx < 2; ++idx) {
+
+               if (adev->vce.harvest_config & (1 << idx))
+                       continue;
+
                if(idx == 0)
                        WREG32_P(mmGRBM_GFX_INDEX, 0,
                                ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
@@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
        return 0;
 }
 
+#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS     0xC0014074
+#define VCE_HARVEST_FUSE_MACRO__SHIFT       27
+#define VCE_HARVEST_FUSE_MACRO__MASK        0x18000000
+
+static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
+{
+       u32 tmp;
+       unsigned ret;
+
+       if (adev->flags & AMDGPU_IS_APU)
+               tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
+                      VCE_HARVEST_FUSE_MACRO__MASK) >>
+                       VCE_HARVEST_FUSE_MACRO__SHIFT;
+       else
+               tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
+                      CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
+                       CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
+
+       switch (tmp) {
+       case 1:
+               ret = AMDGPU_VCE_HARVEST_VCE0;
+               break;
+       case 2:
+               ret = AMDGPU_VCE_HARVEST_VCE1;
+               break;
+       case 3:
+               ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
+               break;
+       default:
+               ret = 0;
+       }
+
+       return ret;
+}
+
 static int vce_v3_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
+
+       if ((adev->vce.harvest_config &
+            (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
+           (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
+               return -ENOENT;
+
        vce_v3_0_set_ring_funcs(adev);
        vce_v3_0_set_irq_funcs(adev);
 
index f69b92535505b5ae1c899d6f9b08f76501851fa7..5ae5c69231280a5e1c23882289388f9ceb75a701 100644 (file)
@@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
                planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
 
        drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
+       drm_crtc_vblank_reset(&crtc->base);
 
        dc->crtc = &crtc->base;
 
index 60b0c13d7ff5cc6f4c338c9ed3d7f423d684c84e..6fad1f9648f38870b2162cb74a6320f50c34aabc 100644 (file)
@@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
 
        pm_runtime_enable(dev->dev);
 
-       ret = atmel_hlcdc_dc_modeset_init(dev);
+       ret = drm_vblank_init(dev, 1);
        if (ret < 0) {
-               dev_err(dev->dev, "failed to initialize mode setting\n");
+               dev_err(dev->dev, "failed to initialize vblank\n");
                goto err_periph_clk_disable;
        }
 
-       drm_mode_config_reset(dev);
-
-       ret = drm_vblank_init(dev, 1);
+       ret = atmel_hlcdc_dc_modeset_init(dev);
        if (ret < 0) {
-               dev_err(dev->dev, "failed to initialize vblank\n");
+               dev_err(dev->dev, "failed to initialize mode setting\n");
                goto err_periph_clk_disable;
        }
 
+       drm_mode_config_reset(dev);
+
        pm_runtime_get_sync(dev->dev);
        ret = drm_irq_install(dev, dc->hlcdc->irq);
        pm_runtime_put_sync(dev->dev);
index 357bd04a173b6aa51b896394a2e2f9f5754f7a21..fed748311b928cc534f781505a37686ec7a46ba3 100644 (file)
@@ -5398,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev)
                if (encoder->funcs->reset)
                        encoder->funcs->reset(encoder);
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               connector->status = connector_status_unknown;
-
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
                if (connector->funcs->reset)
                        connector->funcs->reset(connector);
-       }
 }
 EXPORT_SYMBOL(drm_mode_config_reset);
 
index a6d8a3ee7750adecb552415c8ffab06d599f46ff..260389acfb7752d01ce0ebd59597f06a671b2ea1 100644 (file)
@@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_reg_read *reg = data;
        struct register_whitelist const *entry = whitelist;
+       unsigned size;
+       u64 offset;
        int i, ret = 0;
 
        for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
-               if (entry->offset == reg->offset &&
+               if (entry->offset == (reg->offset & -entry->size) &&
                    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
                        break;
        }
@@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
        if (i == ARRAY_SIZE(whitelist))
                return -EINVAL;
 
+       /* We use the low bits to encode extra flags as the register should
+        * be naturally aligned (and those that are not so aligned merely
+        * limit the available flags for that register).
+        */
+       offset = entry->offset;
+       size = entry->size;
+       size |= reg->offset ^ offset;
+
        intel_runtime_pm_get(dev_priv);
 
-       switch (entry->size) {
+       switch (size) {
+       case 8 | 1:
+               reg->val = I915_READ64_2x32(offset, offset+4);
+               break;
        case 8:
-               reg->val = I915_READ64(reg->offset);
+               reg->val = I915_READ64(offset);
                break;
        case 4:
-               reg->val = I915_READ(reg->offset);
+               reg->val = I915_READ(offset);
                break;
        case 2:
-               reg->val = I915_READ16(reg->offset);
+               reg->val = I915_READ16(offset);
                break;
        case 1:
-               reg->val = I915_READ8(reg->offset);
+               reg->val = I915_READ8(offset);
                break;
        default:
-               MISSING_CASE(entry->size);
                ret = -EINVAL;
                goto out;
        }
index 882cccdad27249c8e3afa992419fb0fb16bec97b..ac6fe40b99f753b267a5e78647c0bdb2e078c5bd 100644 (file)
@@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
        else if (boot_cpu_data.x86 > 3)
                tmp = pgprot_noncached(tmp);
 #endif
-#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
+#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
+    defined(__powerpc__)
        if (caching_flags & TTM_PL_FLAG_WC)
                tmp = pgprot_writecombine(tmp);
        else
index 3318de690e00666bf7da60c84189a1fc54a0a544..a2dbbbe0d8d7e81b06ac6d646737413fe4d1d357 100644 (file)
@@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
        struct cp2112_force_read_report report;
        int ret;
 
+       if (size > sizeof(dev->read_data))
+               size = sizeof(dev->read_data);
        report.report = CP2112_DATA_READ_FORCE_SEND;
        report.length = cpu_to_be16(size);
 
index 6a9b05b328a9d40e06b09ed8f8c6e6010669374b..7c811252c1cefebb2418944a7f391117a8a92b6d 100644 (file)
@@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
        /*
         * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
         * for the stylus.
+        * The check for mt_report_id ensures we don't process
+        * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical
+        * collection, but within the report ID.
         */
        if (field->physical == HID_DG_STYLUS)
                return 0;
+       else if ((field->physical == 0) &&
+                (field->report->id != td->mt_report_id) &&
+                (td->mt_report_id != -1))
+               return 0;
 
        if (field->application == HID_DG_TOUCHSCREEN ||
            field->application == HID_DG_TOUCHPAD)
index 53e7de7cb9e25e6861f1acb5e3438e590b2e70ad..20f9a653444c21d0ef89f4561d4d68544dff4bf0 100644 (file)
@@ -87,6 +87,9 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
index 4c0ffca97befd61cf3cdd947138d14070c4e7895..44958d79d598dfc3a7e6938a2babbf3e1fdc2188 100644 (file)
@@ -1271,11 +1271,13 @@ static int wacom_register_inputs(struct wacom *wacom)
        pad_input_dev = NULL;
        wacom_wac->pad_registered = false;
 fail_register_pad_input:
-       input_unregister_device(touch_input_dev);
+       if (touch_input_dev)
+               input_unregister_device(touch_input_dev);
        wacom_wac->touch_input = NULL;
        wacom_wac->touch_registered = false;
 fail_register_touch_input:
-       input_unregister_device(pen_input_dev);
+       if (pen_input_dev)
+               input_unregister_device(pen_input_dev);
        wacom_wac->pen_input = NULL;
        wacom_wac->pen_registered = false;
 fail_register_pen_input:
index 232da89f4e886fe02b82d452c1a0868f0b65b967..0d244239e55def103f786254f3f617a84b2f0ba2 100644 (file)
@@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom)
                        features->x_max = 4096;
                        features->y_max = 4096;
                }
+               else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
+                       features->device_type |= WACOM_DEVICETYPE_PAD;
+               }
        }
 
        /*
index 8e9ec81ce4bbd85473d6d6a35e7c3567569187ee..da902baaa7946aac569b7ebe8a316c647dfd8187 100644 (file)
  * Stream table.
  *
  * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
- * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus)
+ * 2lvl: 128k L1 entries,
+ *       256 lazy entries per table (each table covers a PCI bus)
  */
-#define STRTAB_L1_SZ_SHIFT             16
+#define STRTAB_L1_SZ_SHIFT             20
 #define STRTAB_SPLIT                   8
 
 #define STRTAB_L1_DESC_DWORDS          1
 #define ARM64_TCR_TG0_SHIFT            14
 #define ARM64_TCR_TG0_MASK             0x3UL
 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT   8
-#define ARM64_TCR_IRGN0_SHIFT          24
+#define ARM64_TCR_IRGN0_SHIFT          8
 #define ARM64_TCR_IRGN0_MASK           0x3UL
 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT   10
-#define ARM64_TCR_ORGN0_SHIFT          26
+#define ARM64_TCR_ORGN0_SHIFT          10
 #define ARM64_TCR_ORGN0_MASK           0x3UL
 #define CTXDESC_CD_0_TCR_SH0_SHIFT     12
 #define ARM64_TCR_SH0_SHIFT            12
@@ -542,6 +543,9 @@ struct arm_smmu_device {
 #define ARM_SMMU_FEAT_HYP              (1 << 12)
        u32                             features;
 
+#define ARM_SMMU_OPT_SKIP_PREFETCH     (1 << 0)
+       u32                             options;
+
        struct arm_smmu_cmdq            cmdq;
        struct arm_smmu_evtq            evtq;
        struct arm_smmu_priq            priq;
@@ -602,11 +606,35 @@ struct arm_smmu_domain {
 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
 static LIST_HEAD(arm_smmu_devices);
 
+struct arm_smmu_option_prop {
+       u32 opt;
+       const char *prop;
+};
+
+static struct arm_smmu_option_prop arm_smmu_options[] = {
+       { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
+       { 0, NULL},
+};
+
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
        return container_of(dom, struct arm_smmu_domain, domain);
 }
 
+static void parse_driver_options(struct arm_smmu_device *smmu)
+{
+       int i = 0;
+
+       do {
+               if (of_property_read_bool(smmu->dev->of_node,
+                                               arm_smmu_options[i].prop)) {
+                       smmu->options |= arm_smmu_options[i].opt;
+                       dev_notice(smmu->dev, "option %s\n",
+                               arm_smmu_options[i].prop);
+               }
+       } while (arm_smmu_options[++i].opt);
+}
+
 /* Low-level queue manipulation functions */
 static bool queue_full(struct arm_smmu_queue *q)
 {
@@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
        arm_smmu_sync_ste_for_sid(smmu, sid);
 
        /* It's likely that we'll want to use the new STE soon */
-       arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+       if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
+               arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
 }
 
 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
@@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
                return 0;
 
        size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
-       strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS];
+       strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
 
        desc->span = STRTAB_SPLIT + 1;
        desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
@@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
 {
        void *strtab;
        u64 reg;
-       u32 size;
+       u32 size, l1size;
        int ret;
        struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
 
        /* Calculate the L1 size, capped to the SIDSIZE */
        size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
        size = min(size, smmu->sid_bits - STRTAB_SPLIT);
-       if (size + STRTAB_SPLIT < smmu->sid_bits)
+       cfg->num_l1_ents = 1 << size;
+
+       size += STRTAB_SPLIT;
+       if (size < smmu->sid_bits)
                dev_warn(smmu->dev,
                         "2-level strtab only covers %u/%u bits of SID\n",
-                        size + STRTAB_SPLIT, smmu->sid_bits);
+                        size, smmu->sid_bits);
 
-       cfg->num_l1_ents = 1 << size;
-       size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
-       strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
+       l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
+       strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
                                     GFP_KERNEL);
        if (!strtab) {
                dev_err(smmu->dev,
@@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
        ret = arm_smmu_init_l1_strtab(smmu);
        if (ret)
                dma_free_coherent(smmu->dev,
-                                 cfg->num_l1_ents *
-                                 (STRTAB_L1_DESC_DWORDS << 3),
+                                 l1size,
                                  strtab,
                                  cfg->strtab_dma);
        return ret;
@@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
        if (irq > 0)
                smmu->gerr_irq = irq;
 
+       parse_driver_options(smmu);
+
        /* Probe the h/w */
        ret = arm_smmu_device_probe(smmu);
        if (ret)
index a98a7b27aca1dec2cb2f53319df8a49abcf8e645..0649b94f59584ca5b885cd0ecad595a84af0d89d 100644 (file)
@@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
 
 static void domain_exit(struct dmar_domain *domain)
 {
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
        struct page *freelist = NULL;
-       int i;
 
        /* Domain 0 is reserved, so dont process it */
        if (!domain)
@@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain)
 
        /* clear attached or cached domains */
        rcu_read_lock();
-       for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
-               iommu_detach_domain(domain, g_iommus[i]);
+       for_each_active_iommu(iommu, drhd)
+               if (domain_type_is_vm(domain) ||
+                   test_bit(iommu->seq_id, domain->iommu_bmp))
+                       iommu_detach_domain(domain, iommu);
        rcu_read_unlock();
 
        dma_free_pagelist(freelist);
index 8c91fd5eb6fdd4696f8b6d298eef8684525fc94e..375be509e95f5bd302da79446cb8867f8ecb8ec7 100644 (file)
@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
        cs->hw.ser->tty = tty;
        atomic_set(&cs->hw.ser->refcnt, 1);
        init_completion(&cs->hw.ser->dead_cmp);
-
        tty->disc_data = cs;
 
+       /* Set the amount of data we're willing to receive per call
+        * from the hardware driver to half of the input buffer size
+        * to leave some reserve.
+        * Note: We don't do flow control towards the hardware driver.
+        * If more data is received than will fit into the input buffer,
+        * it will be dropped and an error will be logged. This should
+        * never happen as the device is slow and the buffer size ample.
+        */
+       tty->receive_room = RBUFSIZE/2;
+
        /* OK.. Initialization of the datastructures and the HW is done.. Now
         * startup system and notify the LL that we are ready to run
         */
@@ -597,28 +606,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty)
        return 0;
 }
 
-/*
- * Read on the tty.
- * Unused, received data goes only to the Gigaset driver.
- */
-static ssize_t
-gigaset_tty_read(struct tty_struct *tty, struct file *file,
-                unsigned char __user *buf, size_t count)
-{
-       return -EAGAIN;
-}
-
-/*
- * Write on the tty.
- * Unused, transmit data comes only from the Gigaset driver.
- */
-static ssize_t
-gigaset_tty_write(struct tty_struct *tty, struct file *file,
-                 const unsigned char *buf, size_t count)
-{
-       return -EAGAIN;
-}
-
 /*
  * Ioctl on the tty.
  * Called in process context only.
@@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = {
        .open           = gigaset_tty_open,
        .close          = gigaset_tty_close,
        .hangup         = gigaset_tty_hangup,
-       .read           = gigaset_tty_read,
-       .write          = gigaset_tty_write,
        .ioctl          = gigaset_tty_ioctl,
        .receive_buf    = gigaset_tty_receive,
        .write_wakeup   = gigaset_tty_wakeup,
index 317a49480475d2c87fb5a4aa56716844ce21005a..e1ccefce9a9de629505344f9fc22042ab09b9684 100644 (file)
@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
        call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
 }
 
+static struct slave *bond_get_old_active(struct bonding *bond,
+                                        struct slave *new_active)
+{
+       struct slave *slave;
+       struct list_head *iter;
+
+       bond_for_each_slave(bond, slave, iter) {
+               if (slave == new_active)
+                       continue;
+
+               if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
+                       return slave;
+       }
+
+       return NULL;
+}
+
 /* bond_do_fail_over_mac
  *
  * Perform special MAC address swapping for fail_over_mac settings
@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
                if (!new_active)
                        return;
 
+               if (!old_active)
+                       old_active = bond_get_old_active(bond, new_active);
+
                if (old_active) {
                        ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
                        ether_addr_copy(saddr.sa_data,
@@ -1725,9 +1745,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
 err_undo_flags:
        /* Enslave of first slave has failed and we need to fix master's mac */
-       if (!bond_has_slaves(bond) &&
-           ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr))
-               eth_hw_addr_random(bond_dev);
+       if (!bond_has_slaves(bond)) {
+               if (ether_addr_equal_64bits(bond_dev->dev_addr,
+                                           slave_dev->dev_addr))
+                       eth_hw_addr_random(bond_dev);
+               if (bond_dev->type != ARPHRD_ETHER) {
+                       ether_setup(bond_dev);
+                       bond_dev->flags |= IFF_MASTER;
+                       bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+               }
+       }
 
        return res;
 }
@@ -1916,6 +1943,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
                bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
                netdev_info(bond_dev, "Destroying bond %s\n",
                            bond_dev->name);
+               bond_remove_proc_entry(bond);
                unregister_netdevice(bond_dev);
        }
        return ret;
index f4e40aa4d2a21db2be91eb39f46d1e410ac6c0e0..945c0955a9675198a8b0945ddc49dd6668380838 100644 (file)
@@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev)
 
        cf->can_id |= CAN_ERR_CRTL;
        cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
-       netif_receive_skb(skb);
 
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 }
 
 /**
@@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
        }
 
        at91_read_mb(dev, mb, cf);
-       netif_receive_skb(skb);
 
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 
        can_led_event(dev, CAN_LED_EVENT_RX);
 }
@@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
                return 0;
 
        at91_poll_err_frame(dev, cf, reg_sr);
-       netif_receive_skb(skb);
 
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 
        return 1;
 }
@@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev)
                return;
 
        at91_irq_err_state(dev, cf, new_state);
-       netif_rx(skb);
 
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        priv->can.state = new_state;
 }
index 27ad312e7abf34bdf94a86ce66fa5ae900da1aed..57dadd52b428a536d71f2364c006641e5e765083 100644 (file)
@@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
                cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
        }
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 }
 
 static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
@@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
 
        priv->can.state = state;
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
index c11d4498403617e4dddcea42d085e08355c9a5d2..70a8cbb29e75844a02bea49a937bf0c05a200910 100644 (file)
@@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
                for (i = 0; i < cf->can_dlc; i++)
                        cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
        }
-       netif_rx(skb);
 
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 }
 
 static int cc770_err(struct net_device *dev, u8 status)
@@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status)
                }
        }
 
-       netif_rx(skb);
 
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
index 6201c5a1a8845f2e3e68f3921a2fcae8e4469217..b1e8d729851cbb5173c1bbec2b34c4893b2ff595 100644 (file)
@@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
                return 0;
 
        do_bus_err(dev, cf, reg_esr);
-       netif_receive_skb(skb);
 
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 
        return 1;
 }
@@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
        if (unlikely(new_state == CAN_STATE_BUS_OFF))
                can_bus_off(dev);
 
-       netif_receive_skb(skb);
-
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 
        return 1;
 }
@@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev)
        }
 
        flexcan_read_fifo(dev, cf);
-       netif_receive_skb(skb);
 
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 
        can_led_event(dev, CAN_LED_EVENT_RX);
 
index e3d7e22a4fa080504544a245d9e21adb80aea036..db9538d4b3586e7357ae8d46e4db9f25a27cd469 100644 (file)
@@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget)
                                cf->data[i] = (u8)(slot[j] >> shift);
                        }
                }
-               netif_receive_skb(skb);
 
                /* Update statistics and read pointer */
                stats->rx_packets++;
                stats->rx_bytes += cf->can_dlc;
+               netif_receive_skb(skb);
+
                rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
        }
 
index 32bd7f451aa42b53f0a479af667861db6693d619..7b92e911a6168badb3e30f8fc55b2e6fdd0f61dc 100644 (file)
@@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev)
        /* release receive buffer */
        sja1000_write_cmdreg(priv, CMD_RRB);
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        can_led_event(dev, CAN_LED_EVENT_RX);
 }
@@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
                        can_bus_off(dev);
        }
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
index a23a7af8eb9a0ccfdffc39518ee9ee7cd43485d1..9a3f15cb7ef4883abebe6bac8ffc5909077d475f 100644 (file)
@@ -218,10 +218,10 @@ static void slc_bump(struct slcan *sl)
 
        memcpy(skb_put(skb, sizeof(struct can_frame)),
               &cf, sizeof(struct can_frame));
-       netif_rx_ni(skb);
 
        sl->dev->stats.rx_packets++;
        sl->dev->stats.rx_bytes += cf.can_dlc;
+       netif_rx_ni(skb);
 }
 
 /* parse tty input stream */
index c1a95a34d62ea682d8a2582ddc068ca52c7e592d..b7e83c2120235e133141422ebe037a3bf43e7a15 100644 (file)
@@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
        if (ret)
                goto out_clk;
 
-       priv->power = devm_regulator_get(&spi->dev, "vdd");
-       priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
+       priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
+       priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
        if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
            (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
                ret = -EPROBE_DEFER;
@@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
        struct spi_device *spi = to_spi_device(dev);
        struct mcp251x_priv *priv = spi_get_drvdata(spi);
 
-       if (priv->after_suspend & AFTER_SUSPEND_POWER) {
+       if (priv->after_suspend & AFTER_SUSPEND_POWER)
                mcp251x_power_enable(priv->power, 1);
+
+       if (priv->after_suspend & AFTER_SUSPEND_UP) {
+               mcp251x_power_enable(priv->transceiver, 1);
                queue_work(priv->wq, &priv->restart_work);
        } else {
-               if (priv->after_suspend & AFTER_SUSPEND_UP) {
-                       mcp251x_power_enable(priv->transceiver, 1);
-                       queue_work(priv->wq, &priv->restart_work);
-               } else {
-                       priv->after_suspend = 0;
-               }
+               priv->after_suspend = 0;
        }
+
        priv->force_quit = 0;
        enable_irq(spi->irq);
        return 0;
index e95a9e1a889f19c4673d9735e5e932eb83340767..cf345cbfe8198ef23ee328fc2eb67f5841751ee1 100644 (file)
@@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
                }
        }
 
-       netif_rx(skb);
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
index 866bac0ae7e966855d1085f5a8735988832a1bb7..2d390384ef3bb3d3845fcf6102bef70715f1dd21 100644 (file)
@@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
                        cf->data[i] = msg->msg.can_msg.msg[i];
        }
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 }
 
 static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
                stats->rx_errors++;
        }
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 }
 
 /*
index 411c1af92c62c1c93985e4adf7232d1b6b78d8bc..0e5a4493ba4fee6d3c4fb5626a676f18802a6ef3 100644 (file)
@@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
                        cf->data[7] = rxerr;
                }
 
-               netif_rx(skb);
-
                priv->bec.txerr = txerr;
                priv->bec.rxerr = rxerr;
 
                stats->rx_packets++;
                stats->rx_bytes += cf->can_dlc;
+               netif_rx(skb);
        }
 }
 
@@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
                                cf->data[i] = msg->msg.rx.data[i];
                }
 
-               netif_rx(skb);
-
                stats->rx_packets++;
                stats->rx_bytes += cf->can_dlc;
+               netif_rx(skb);
        }
 
        return;
index 72427f21edffaa9fed0874085e277aa4c83783b7..6b94007ae05221c94d3dedce77a412e732834337 100644 (file)
@@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
                hwts->hwtstamp = timeval_to_ktime(tv);
        }
 
-       netif_rx(skb);
        mc->netdev->stats.rx_packets++;
        mc->netdev->stats.rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
@@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
        hwts = skb_hwtstamps(skb);
        hwts->hwtstamp = timeval_to_ktime(tv);
 
-       /* push the skb */
-       netif_rx(skb);
-
        /* update statistics */
        mc->netdev->stats.rx_packets++;
        mc->netdev->stats.rx_bytes += cf->can_dlc;
+       /* push the skb */
+       netif_rx(skb);
 
        return 0;
 
index dec51717635e900aafc91b771e5bfa0a2b594fae..7d61b3279798b936f4a3238afb57c73437ba22f2 100644 (file)
@@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
        hwts = skb_hwtstamps(skb);
        hwts->hwtstamp = timeval_to_ktime(tv);
 
-       netif_rx(skb);
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += can_frame->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
@@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
        peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
        hwts = skb_hwtstamps(skb);
        hwts->hwtstamp = timeval_to_ktime(tv);
-       netif_rx(skb);
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += can_frame->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
index dd52c7a4c80d9f26faa74ece8d109ee21045ba47..de95b1ccba3e3b6d4d00e313acb280cd178a000d 100644 (file)
@@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
        priv->bec.txerr = txerr;
        priv->bec.rxerr = rxerr;
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 }
 
 /* Read data and status frames */
@@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
                else
                        memcpy(cf->data, msg->data, cf->can_dlc);
 
-               netif_rx(skb);
-
                stats->rx_packets++;
                stats->rx_bytes += cf->can_dlc;
+               netif_rx(skb);
 
                can_led_event(priv->netdev, CAN_LED_EVENT_RX);
        } else {
index 972982f8bea7af16f253b58d3540cc0b9aa6682b..079897b3a9554b55918c97a94f5ba718c314da38 100644 (file)
@@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
        }
 
        /* Include the pseudo-PHY address and the broadcast PHY address to
-        * divert reads towards our workaround
+        * divert reads towards our workaround. This is only required for
+        * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
+        * that we can use the regular SWITCH_MDIO master controller instead.
+        *
+        * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
+        * to have a 1:1 mapping between Port address and PHY address in order
+        * to utilize the slave_mii_bus instance to read from Port PHYs. This is
+        * not what we want here, so we initialize phys_mii_mask 0 to always
+        * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
         */
-       ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+       if (of_machine_is_compatible("brcm,bcm7445d0"))
+               ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+       else
+               ds->phys_mii_mask = 0;
 
        rev = reg_readl(priv, REG_SWITCH_REVISION);
        priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
index fd8547c2b79d46786b10807a0c62f338b6a60e27..561342466076c57888bcd92b4f463756f4d10a49 100644 (file)
@@ -1163,7 +1163,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
 
        newfid = __ffs(ps->fid_mask);
        ps->fid[port] = newfid;
-       ps->fid_mask &= (1 << newfid);
+       ps->fid_mask &= ~(1 << newfid);
        ps->bridge_mask[fid] &= ~(1 << port);
        ps->bridge_mask[newfid] = 1 << port;
 
index 42e20e5385acb553b528ee8a6b275791e9ad1b44..1f89c59b43535f9b65e946c7468cb1fcb13a2022 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
-#include <linux/pm_runtime.h>
 #include <linux/ptrace.h>
 #include <linux/errno.h>
 #include <linux/ioport.h>
@@ -78,7 +77,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
 #define FEC_ENET_RAEM_V        0x8
 #define FEC_ENET_RAFL_V        0x8
 #define FEC_ENET_OPD_V 0xFFF0
-#define FEC_MDIO_PM_TIMEOUT  100 /* ms */
 
 static struct platform_device_id fec_devtype[] = {
        {
@@ -1769,13 +1767,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
        struct fec_enet_private *fep = bus->priv;
-       struct device *dev = &fep->pdev->dev;
        unsigned long time_left;
-       int ret = 0;
-
-       ret = pm_runtime_get_sync(dev);
-       if (IS_ERR_VALUE(ret))
-               return ret;
 
        fep->mii_timeout = 0;
        init_completion(&fep->mdio_done);
@@ -1791,30 +1783,18 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
        if (time_left == 0) {
                fep->mii_timeout = 1;
                netdev_err(fep->netdev, "MDIO read timeout\n");
-               ret = -ETIMEDOUT;
-               goto out;
+               return -ETIMEDOUT;
        }
 
-       ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
-
-out:
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_put_autosuspend(dev);
-
-       return ret;
+       /* return value */
+       return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
 }
 
 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
                           u16 value)
 {
        struct fec_enet_private *fep = bus->priv;
-       struct device *dev = &fep->pdev->dev;
        unsigned long time_left;
-       int ret = 0;
-
-       ret = pm_runtime_get_sync(dev);
-       if (IS_ERR_VALUE(ret))
-               return ret;
 
        fep->mii_timeout = 0;
        init_completion(&fep->mdio_done);
@@ -1831,13 +1811,10 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
        if (time_left == 0) {
                fep->mii_timeout = 1;
                netdev_err(fep->netdev, "MDIO write timeout\n");
-               ret  = -ETIMEDOUT;
+               return -ETIMEDOUT;
        }
 
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_put_autosuspend(dev);
-
-       return ret;
+       return 0;
 }
 
 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
@@ -1849,6 +1826,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
                ret = clk_prepare_enable(fep->clk_ahb);
                if (ret)
                        return ret;
+               ret = clk_prepare_enable(fep->clk_ipg);
+               if (ret)
+                       goto failed_clk_ipg;
                if (fep->clk_enet_out) {
                        ret = clk_prepare_enable(fep->clk_enet_out);
                        if (ret)
@@ -1872,6 +1852,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
                }
        } else {
                clk_disable_unprepare(fep->clk_ahb);
+               clk_disable_unprepare(fep->clk_ipg);
                if (fep->clk_enet_out)
                        clk_disable_unprepare(fep->clk_enet_out);
                if (fep->clk_ptp) {
@@ -1893,6 +1874,8 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
        if (fep->clk_enet_out)
                clk_disable_unprepare(fep->clk_enet_out);
 failed_clk_enet_out:
+               clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
                clk_disable_unprepare(fep->clk_ahb);
 
        return ret;
@@ -2864,14 +2847,10 @@ fec_enet_open(struct net_device *ndev)
        struct fec_enet_private *fep = netdev_priv(ndev);
        int ret;
 
-       ret = pm_runtime_get_sync(&fep->pdev->dev);
-       if (IS_ERR_VALUE(ret))
-               return ret;
-
        pinctrl_pm_select_default_state(&fep->pdev->dev);
        ret = fec_enet_clk_enable(ndev, true);
        if (ret)
-               goto clk_enable;
+               return ret;
 
        /* I should reset the ring buffers here, but I don't yet know
         * a simple way to do that.
@@ -2902,9 +2881,6 @@ fec_enet_open(struct net_device *ndev)
        fec_enet_free_buffers(ndev);
 err_enet_alloc:
        fec_enet_clk_enable(ndev, false);
-clk_enable:
-       pm_runtime_mark_last_busy(&fep->pdev->dev);
-       pm_runtime_put_autosuspend(&fep->pdev->dev);
        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
        return ret;
 }
@@ -2927,9 +2903,6 @@ fec_enet_close(struct net_device *ndev)
 
        fec_enet_clk_enable(ndev, false);
        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
-       pm_runtime_mark_last_busy(&fep->pdev->dev);
-       pm_runtime_put_autosuspend(&fep->pdev->dev);
-
        fec_enet_free_buffers(ndev);
 
        return 0;
@@ -3415,10 +3388,6 @@ fec_probe(struct platform_device *pdev)
        if (ret)
                goto failed_clk;
 
-       ret = clk_prepare_enable(fep->clk_ipg);
-       if (ret)
-               goto failed_clk_ipg;
-
        fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
        if (!IS_ERR(fep->reg_phy)) {
                ret = regulator_enable(fep->reg_phy);
@@ -3465,8 +3434,6 @@ fec_probe(struct platform_device *pdev)
        netif_carrier_off(ndev);
        fec_enet_clk_enable(ndev, false);
        pinctrl_pm_select_sleep_state(&pdev->dev);
-       pm_runtime_set_active(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
 
        ret = register_netdev(ndev);
        if (ret)
@@ -3480,12 +3447,6 @@ fec_probe(struct platform_device *pdev)
 
        fep->rx_copybreak = COPYBREAK_DEFAULT;
        INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
-
-       pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
-       pm_runtime_use_autosuspend(&pdev->dev);
-       pm_runtime_mark_last_busy(&pdev->dev);
-       pm_runtime_put_autosuspend(&pdev->dev);
-
        return 0;
 
 failed_register:
@@ -3496,8 +3457,6 @@ fec_probe(struct platform_device *pdev)
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
 failed_regulator:
-       clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
        fec_enet_clk_enable(ndev, false);
 failed_clk:
 failed_phy:
@@ -3609,28 +3568,7 @@ static int __maybe_unused fec_resume(struct device *dev)
        return ret;
 }
 
-static int __maybe_unused fec_runtime_suspend(struct device *dev)
-{
-       struct net_device *ndev = dev_get_drvdata(dev);
-       struct fec_enet_private *fep = netdev_priv(ndev);
-
-       clk_disable_unprepare(fep->clk_ipg);
-
-       return 0;
-}
-
-static int __maybe_unused fec_runtime_resume(struct device *dev)
-{
-       struct net_device *ndev = dev_get_drvdata(dev);
-       struct fec_enet_private *fep = netdev_priv(ndev);
-
-       return clk_prepare_enable(fep->clk_ipg);
-}
-
-static const struct dev_pm_ops fec_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
-       SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
-};
+static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
 
 static struct platform_driver fec_driver = {
        .driver = {
index 370e20ed224c5c76eaca92954be5800d09d81ada..62e48bc0cb23ba98ed2cb2c447af288bd1c3906d 100644 (file)
@@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
                     struct mvneta_rx_queue *rxq)
 {
        struct net_device *dev = pp->dev;
-       int rx_done, rx_filled;
+       int rx_done;
        u32 rcvd_pkts = 0;
        u32 rcvd_bytes = 0;
 
@@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
                rx_todo = rx_done;
 
        rx_done = 0;
-       rx_filled = 0;
 
        /* Fairness NAPI loop */
        while (rx_done < rx_todo) {
@@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
                int rx_bytes, err;
 
                rx_done++;
-               rx_filled++;
                rx_status = rx_desc->status;
                rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
                data = (unsigned char *)rx_desc->buf_cookie;
@@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
                        continue;
                }
 
+               /* Refill processing */
+               err = mvneta_rx_refill(pp, rx_desc);
+               if (err) {
+                       netdev_err(dev, "Linux processing - Can't refill\n");
+                       rxq->missed++;
+                       goto err_drop_frame;
+               }
+
                skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
                if (!skb)
                        goto err_drop_frame;
@@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
                mvneta_rx_csum(pp, rx_status, skb);
 
                napi_gro_receive(&pp->napi, skb);
-
-               /* Refill processing */
-               err = mvneta_rx_refill(pp, rx_desc);
-               if (err) {
-                       netdev_err(dev, "Linux processing - Can't refill\n");
-                       rxq->missed++;
-                       rx_filled--;
-               }
        }
 
        if (rcvd_pkts) {
@@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
        }
 
        /* Update rxq management counters */
-       mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
+       mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
 
        return rx_done;
 }
index fd9745714d903fa5945956709491be934ea02c67..78849dd4ef8e9463c2420c84cc583a927492df58 100644 (file)
@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
        struct ravb_desc *desc = NULL;
        int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
        int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
-       struct sk_buff *skb;
        dma_addr_t dma_addr;
-       void *buffer;
        int i;
 
        priv->cur_rx[q] = 0;
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q)
        memset(priv->rx_ring[q], 0, rx_ring_size);
        /* Build RX ring buffer */
        for (i = 0; i < priv->num_rx_ring[q]; i++) {
-               priv->rx_skb[q][i] = NULL;
-               skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
-               if (!skb)
-                       break;
-               ravb_set_buffer_align(skb);
                /* RX descriptor */
                rx_desc = &priv->rx_ring[q][i];
                /* The size of the buffer should be on 16-byte boundary. */
                rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
-               dma_addr = dma_map_single(&ndev->dev, skb->data,
+               dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
                                          ALIGN(PKT_BUF_SZ, 16),
                                          DMA_FROM_DEVICE);
-               if (dma_mapping_error(&ndev->dev, dma_addr)) {
-                       dev_kfree_skb(skb);
-                       break;
-               }
-               priv->rx_skb[q][i] = skb;
+               /* We just set the data size to 0 for a failed mapping which
+                * should prevent DMA from happening...
+                */
+               if (dma_mapping_error(&ndev->dev, dma_addr))
+                       rx_desc->ds_cc = cpu_to_le16(0);
                rx_desc->dptr = cpu_to_le32(dma_addr);
                rx_desc->die_dt = DT_FEMPTY;
        }
        rx_desc = &priv->rx_ring[q][i];
        rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
        rx_desc->die_dt = DT_LINKFIX; /* type */
-       priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
 
        memset(priv->tx_ring[q], 0, tx_ring_size);
        /* Build TX ring buffer */
        for (i = 0; i < priv->num_tx_ring[q]; i++) {
-               priv->tx_skb[q][i] = NULL;
-               priv->tx_buffers[q][i] = NULL;
-               buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
-               if (!buffer)
-                       break;
-               /* Aligned TX buffer */
-               priv->tx_buffers[q][i] = buffer;
                tx_desc = &priv->tx_ring[q][i];
                tx_desc->die_dt = DT_EEMPTY;
        }
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q)
 static int ravb_ring_init(struct net_device *ndev, int q)
 {
        struct ravb_private *priv = netdev_priv(ndev);
+       struct sk_buff *skb;
        int ring_size;
+       void *buffer;
+       int i;
 
        /* Allocate RX and TX skb rings */
        priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q)
        if (!priv->rx_skb[q] || !priv->tx_skb[q])
                goto error;
 
+       for (i = 0; i < priv->num_rx_ring[q]; i++) {
+               skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
+               if (!skb)
+                       goto error;
+               ravb_set_buffer_align(skb);
+               priv->rx_skb[q][i] = skb;
+       }
+
        /* Allocate rings for the aligned buffers */
        priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
                                      sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
        if (!priv->tx_buffers[q])
                goto error;
 
+       for (i = 0; i < priv->num_tx_ring[q]; i++) {
+               buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
+               if (!buffer)
+                       goto error;
+               /* Aligned TX buffer */
+               priv->tx_buffers[q][i] = buffer;
+       }
+
        /* Allocate all RX descriptors. */
        ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
        priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
                if (--boguscnt < 0)
                        break;
 
+               /* We use 0-byte descriptors to mark the DMA mapping errors */
+               if (!pkt_len)
+                       continue;
+
                if (desc_status & MSC_MC)
                        stats->multicast++;
 
@@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
 
                        skb = priv->rx_skb[q][entry];
                        priv->rx_skb[q][entry] = NULL;
-                       dma_sync_single_for_cpu(&ndev->dev,
-                                               le32_to_cpu(desc->dptr),
-                                               ALIGN(PKT_BUF_SZ, 16),
-                                               DMA_FROM_DEVICE);
+                       dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+                                        ALIGN(PKT_BUF_SZ, 16),
+                                        DMA_FROM_DEVICE);
                        get_ts &= (q == RAVB_NC) ?
                                        RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
                                        ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
                        if (!skb)
                                break;  /* Better luck next round. */
                        ravb_set_buffer_align(skb);
-                       dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
-                                        ALIGN(PKT_BUF_SZ, 16),
-                                        DMA_FROM_DEVICE);
                        dma_addr = dma_map_single(&ndev->dev, skb->data,
                                                  le16_to_cpu(desc->ds_cc),
                                                  DMA_FROM_DEVICE);
                        skb_checksum_none_assert(skb);
-                       if (dma_mapping_error(&ndev->dev, dma_addr)) {
-                               dev_kfree_skb_any(skb);
-                               break;
-                       }
+                       /* We just set the data size to 0 for a failed mapping
+                        * which should prevent DMA  from happening...
+                        */
+                       if (dma_mapping_error(&ndev->dev, dma_addr))
+                               desc->ds_cc = cpu_to_le16(0);
                        desc->dptr = cpu_to_le32(dma_addr);
                        priv->rx_skb[q][entry] = skb;
                }
@@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        u32 dma_addr;
        void *buffer;
        u32 entry;
-       u32 tccr;
 
        spin_lock_irqsave(&priv->lock, flags);
        if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
@@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        dma_wmb();
        desc->die_dt = DT_FSINGLE;
 
-       tccr = ravb_read(ndev, TCCR);
-       if (!(tccr & (TCCR_TSRQ0 << q)))
-               ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
+       ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
 
        priv->cur_tx[q]++;
        if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
index 50f7a7a26821c7a40cb73294a4844406ea9caee5..864b476f7fd5a33b81ac2f6ea9b08e0cf99d299d 100644 (file)
@@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device,
        if (res->mac)
                memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
 
-       dev_set_drvdata(device, priv);
+       dev_set_drvdata(device, priv->dev);
 
        /* Verify driver arguments */
        stmmac_verify_args();
index f335bf119ab57d3e209a81e28cc7cbce5f9208c7..d155bf2573cd0ef00abc133074831cccc7f937b1 100644 (file)
@@ -793,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
 static int cpsw_poll(struct napi_struct *napi, int budget)
 {
        struct cpsw_priv        *priv = napi_to_priv(napi);
-       int                     num_tx, num_rx;
-
-       num_tx = cpdma_chan_process(priv->txch, 128);
+       int                     num_rx;
 
        num_rx = cpdma_chan_process(priv->rxch, budget);
        if (num_rx < budget) {
@@ -810,9 +808,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
                }
        }
 
-       if (num_rx || num_tx)
-               cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
-                        num_rx, num_tx);
+       if (num_rx)
+               cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
 
        return num_rx;
 }
index 5ec4ed3f6c8def7a6a6cf527cd9ac7ca73c09844..ec8ed30196f3b8bd8e0ecf670d35a80c93ad4639 100644 (file)
@@ -1617,11 +1617,11 @@ static int netcp_ndo_open(struct net_device *ndev)
        }
        mutex_unlock(&netcp_modules_lock);
 
-       netcp_rxpool_refill(netcp);
        napi_enable(&netcp->rx_napi);
        napi_enable(&netcp->tx_napi);
        knav_queue_enable_notify(netcp->tx_compl_q);
        knav_queue_enable_notify(netcp->rx_queue);
+       netcp_rxpool_refill(netcp);
        netif_tx_wake_all_queues(ndev);
        dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
        return 0;
index 953a97492fabf46eda9986ad713e4cc9ec4275dd..9542b7bac61afab0f4537d91f8cc004160521f85 100644 (file)
@@ -67,8 +67,6 @@ struct ipvl_dev {
        struct ipvl_port        *port;
        struct net_device       *phy_dev;
        struct list_head        addrs;
-       int                     ipv4cnt;
-       int                     ipv6cnt;
        struct ipvl_pcpu_stats  __percpu *pcpu_stats;
        DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
        netdev_features_t       sfeatures;
@@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
        return rcu_dereference(d->rx_handler_data);
 }
 
+static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d)
+{
+       return rcu_dereference_bh(d->rx_handler_data);
+}
+
 static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
 {
        return rtnl_dereference(d->rx_handler_data);
@@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
 struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
                                        const void *iaddr, bool is_v6);
-void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
+void ipvlan_ht_addr_del(struct ipvl_addr *addr);
 #endif /* __IPVLAN_H */
index 8afbedad620d9ed27576dc6426878ae858979a51..207f62e8de9a93415cc76eb5fd75f987b3de53b6 100644 (file)
@@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
                hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
 }
 
-void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
+void ipvlan_ht_addr_del(struct ipvl_addr *addr)
 {
        hlist_del_init_rcu(&addr->hlnode);
-       if (sync)
-               synchronize_rcu();
 }
 
 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
@@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
-       struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev);
+       struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
 
        if (!port)
                goto out;
index 1acc283160d924e0754f3da99fc120c638c257a2..20b58bdecf7540100edc5522e74804e6a7544d95 100644 (file)
@@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev)
        else
                dev->flags &= ~IFF_NOARP;
 
-       if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
-               list_for_each_entry(addr, &ipvlan->addrs, anode)
-                       ipvlan_ht_addr_add(ipvlan, addr);
-       }
+       list_for_each_entry(addr, &ipvlan->addrs, anode)
+               ipvlan_ht_addr_add(ipvlan, addr);
+
        return dev_uc_add(phy_dev, phy_dev->dev_addr);
 }
 
@@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev)
 
        dev_uc_del(phy_dev, phy_dev->dev_addr);
 
-       if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
-               list_for_each_entry(addr, &ipvlan->addrs, anode)
-                       ipvlan_ht_addr_del(addr, !dev->dismantle);
-       }
+       list_for_each_entry(addr, &ipvlan->addrs, anode)
+               ipvlan_ht_addr_del(addr);
+
        return 0;
 }
 
@@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
        ipvlan->port = port;
        ipvlan->sfeatures = IPVLAN_FEATURES;
        INIT_LIST_HEAD(&ipvlan->addrs);
-       ipvlan->ipv4cnt = 0;
-       ipvlan->ipv6cnt = 0;
 
        /* TODO Probably put random address here to be presented to the
         * world but keep using the physical-dev address for the outgoing
@@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
        struct ipvl_dev *ipvlan = netdev_priv(dev);
        struct ipvl_addr *addr, *next;
 
-       if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
-               list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
-                       ipvlan_ht_addr_del(addr, !dev->dismantle);
-                       list_del(&addr->anode);
-               }
+       list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
+               ipvlan_ht_addr_del(addr);
+               list_del(&addr->anode);
+               kfree_rcu(addr, rcu);
        }
+
        list_del_rcu(&ipvlan->pnode);
        unregister_netdevice_queue(dev, head);
        netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
@@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
        memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
        addr->atype = IPVL_IPV6;
        list_add_tail(&addr->anode, &ipvlan->addrs);
-       ipvlan->ipv6cnt++;
+
        /* If the interface is not up, the address will be added to the hash
         * list by ipvlan_open.
         */
@@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
        if (!addr)
                return;
 
-       ipvlan_ht_addr_del(addr, true);
+       ipvlan_ht_addr_del(addr);
        list_del(&addr->anode);
-       ipvlan->ipv6cnt--;
-       WARN_ON(ipvlan->ipv6cnt < 0);
        kfree_rcu(addr, rcu);
 
        return;
@@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
        struct net_device *dev = (struct net_device *)if6->idev->dev;
        struct ipvl_dev *ipvlan = netdev_priv(dev);
 
+       /* FIXME IPv6 autoconf calls us from bh without RTNL */
+       if (in_softirq())
+               return NOTIFY_DONE;
+
        if (!netif_is_ipvlan(dev))
                return NOTIFY_DONE;
 
@@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
        memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
        addr->atype = IPVL_IPV4;
        list_add_tail(&addr->anode, &ipvlan->addrs);
-       ipvlan->ipv4cnt++;
+
        /* If the interface is not up, the address will be added to the hash
         * list by ipvlan_open.
         */
@@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
        if (!addr)
                return;
 
-       ipvlan_ht_addr_del(addr, true);
+       ipvlan_ht_addr_del(addr);
        list_del(&addr->anode);
-       ipvlan->ipv4cnt--;
-       WARN_ON(ipvlan->ipv4cnt < 0);
        kfree_rcu(addr, rcu);
 
        return;
index c7a12e2e07b7670a55a682ea2988cc4cca86d7b7..8a3bf546989212734aa9a5a5a5b58948ecf264da 100644 (file)
@@ -164,7 +164,7 @@ static int dp83867_config_init(struct phy_device *phydev)
                        return ret;
        }
 
-       if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) ||
+       if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
            (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
                val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
                                            DP83867_DEVADDR, phydev->addr);
index 095ef3fe369af5ebe08254384abc38176df1aef1..46a14cbb021541095a22016a7ad712d79db9307a 100644 (file)
@@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
 {
        struct phy_device *phydev = to_phy_device(dev);
        struct phy_driver *phydrv = to_phy_driver(drv);
+       const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
+       int i;
 
        if (of_driver_match_device(dev, drv))
                return 1;
@@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
        if (phydrv->match_phy_device)
                return phydrv->match_phy_device(phydev);
 
-       return (phydrv->phy_id & phydrv->phy_id_mask) ==
-               (phydev->phy_id & phydrv->phy_id_mask);
+       if (phydev->is_c45) {
+               for (i = 1; i < num_ids; i++) {
+                       if (!(phydev->c45_ids.devices_in_package & (1 << i)))
+                               continue;
+
+                       if ((phydrv->phy_id & phydrv->phy_id_mask) ==
+                           (phydev->c45_ids.device_ids[i] &
+                            phydrv->phy_id_mask))
+                               return 1;
+               }
+               return 0;
+       } else {
+               return (phydrv->phy_id & phydrv->phy_id_mask) ==
+                       (phydev->phy_id & phydrv->phy_id_mask);
+       }
 }
 
 #ifdef CONFIG_PM
index f603f362504bce0c1cb2656e1d29232eb05db846..9d43460ce3c71f0b54c69b84fa5a0ec8b7341d5f 100644 (file)
@@ -757,6 +757,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
        {QMI_FIXED_INTF(0x1199, 0x901f, 8)},    /* Sierra Wireless EM7355 */
        {QMI_FIXED_INTF(0x1199, 0x9041, 8)},    /* Sierra Wireless MC7305/MC7355 */
+       {QMI_FIXED_INTF(0x1199, 0x9041, 10)},   /* Sierra Wireless MC7305/MC7355 */
        {QMI_FIXED_INTF(0x1199, 0x9051, 8)},    /* Netgear AirCard 340U */
        {QMI_FIXED_INTF(0x1199, 0x9053, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1199, 0x9054, 8)},    /* Sierra Wireless Modem */
index 63c7810e1545a357eda7578af862ed18322de933..7fbca37a1adffe5d46d3603e9fd44d4dbd16d331 100644 (file)
@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
        else
                vi->hdr_len = sizeof(struct virtio_net_hdr);
 
-       if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
+       if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
+           virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
                vi->any_header_sg = true;
 
        if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
index 5e15e8e10ed39f0b605783176fe260faa387d083..a31a6804dc34eff8174b06e15d8ee14bb5405888 100644 (file)
@@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
                return;
        case AR9300_DEVID_QCA956X:
                ah->hw_version.macVersion = AR_SREV_VERSION_9561;
+               return;
        }
 
        val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
index d56064861a9c353dfb9fcf1720e1abde6c3fcf9d..d45dc021cda2c0715b8d7e740ff90b46589ae141 100644 (file)
@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_SIZE_LOG                     8
 
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
 /**
  * struct iwl_rb_status - reserve buffer status
  *     host memory mapped FH registers
index 80fefe7d7b8cb3b46581b32f299fed425e878980..3b8e85e51002560a0a860db0999ec947c29fb0d2 100644 (file)
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                hw_addr = (const u8 *)(mac_override +
                                 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
 
-               /* The byte order is little endian 16 bit, meaning 214365 */
-               data->hw_addr[0] = hw_addr[1];
-               data->hw_addr[1] = hw_addr[0];
-               data->hw_addr[2] = hw_addr[3];
-               data->hw_addr[3] = hw_addr[2];
-               data->hw_addr[4] = hw_addr[5];
-               data->hw_addr[5] = hw_addr[4];
+               /*
+                * Store the MAC address from MAO section.
+                * No byte swapping is required in MAO section
+                */
+               memcpy(data->hw_addr, hw_addr, ETH_ALEN);
 
                /*
                 * Force the use of the OTP MAC address in case of reserved MAC
index 5e4cbdb44c607ec8399bae489f28990dc907eaeb..737774a01c74a500b3934928449531889701d7f5 100644 (file)
@@ -660,7 +660,8 @@ struct iwl_scan_config {
  * iwl_umac_scan_flags
  *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
  *     can be preempted by other scan requests with higher priority.
- *     The low priority scan is aborted.
+ *     The low priority scan will be resumed when the higher proirity scan is
+ *     completed.
  *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
  *     when scan starts.
  */
index 5de144968723d4f2a5446f0d6f9fc0607baf2efb..5000bfcded617f32ca177ae7a0711f13a3ce65d3 100644 (file)
@@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        cmd->uid = cpu_to_le32(uid);
        cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
 
+       if (type == IWL_MVM_SCAN_SCHED)
+               cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
+
        if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
                cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
                                     IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
index d68dc697a4a06ef2b9c786de0fc98f3e5fe35544..26f076e821491e09d7805c5f37d229d01480229c 100644 (file)
@@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
        u8 sta_id;
        int ret;
+       static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
 end:
        IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
                      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
-                     sta->addr, ret);
+                     sta ? sta->addr : zero_addr, ret);
        return ret;
 }
 
index d24b6a83e68cfcd4281301c907cb62a24661da96..e472729e5f149a451fd7128d45fa54856c8a77ce 100644 (file)
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
 {
        lockdep_assert_held(&mvm->time_event_lock);
 
-       if (te_data->id == TE_MAX)
+       if (!te_data->vif)
                return;
 
        list_del(&te_data->list);
index 7ba7a118ff5ca28615f5ceeb745f577999ea52dc..89116864d2a0ec346941ec8e4fded14691612ff3 100644 (file)
@@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
 
        if (info->band == IEEE80211_BAND_2GHZ &&
            !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
-               rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS;
+               rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
        else
                rate_flags =
                        BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
index 2ed1e4d2774da83f1cd609983c8383c48d37a128..9f65c1cff1b1958057ab3bedf385604b34a3323e 100644 (file)
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 /* 3165 Series */
        {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
 
 /* 7265 Series */
        {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
index 31f72a61cc3fe06b6d9189f718cc3315677625ad..376b84e54ad7e8bbb48d039d354c03748665451c 100644 (file)
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
 
-/*
- * RX related structures and functions
- */
-#define RX_NUM_QUEUES 1
-#define RX_POST_REQ_ALLOC 2
-#define RX_CLAIM_REQ_ALLOC 8
-#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
-#define RX_LOW_WATERMARK 8
-
 struct iwl_host_cmd;
 
 /*This file includes the declaration that are internal to the
@@ -86,29 +77,29 @@ struct isr_statistics {
  * struct iwl_rxq - Rx queue
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @pool:
+ * @queue:
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
- * @used_count: Number of RBDs handled to allocator to use for allocation
  * @write_actual:
- * @rx_free: list of RBDs with allocated RB ready for use
- * @rx_used: list of RBDs with no RB attached
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
  * @need_update: flag to indicate we need to update read/write index
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @lock:
- * @pool: initial pool of iwl_rx_mem_buffer for the queue
- * @queue: actual rx queue
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
 struct iwl_rxq {
        __le32 *bd;
        dma_addr_t bd_dma;
+       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
        u32 read;
        u32 write;
        u32 free_count;
-       u32 used_count;
        u32 write_actual;
        struct list_head rx_free;
        struct list_head rx_used;
@@ -116,32 +107,6 @@ struct iwl_rxq {
        struct iwl_rb_status *rb_stts;
        dma_addr_t rb_stts_dma;
        spinlock_t lock;
-       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
-       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
-};
-
-/**
- * struct iwl_rb_allocator - Rx allocator
- * @pool: initial pool of allocator
- * @req_pending: number of requests the allcator had not processed yet
- * @req_ready: number of requests honored and ready for claiming
- * @rbd_allocated: RBDs with pages allocated and ready to be handled to
- *     the queue. This is a list of &struct iwl_rx_mem_buffer
- * @rbd_empty: RBDs with no page attached for allocator use. This is a list
- *     of &struct iwl_rx_mem_buffer
- * @lock: protects the rbd_allocated and rbd_empty lists
- * @alloc_wq: work queue for background calls
- * @rx_alloc: work struct for background calls
- */
-struct iwl_rb_allocator {
-       struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
-       atomic_t req_pending;
-       atomic_t req_ready;
-       struct list_head rbd_allocated;
-       struct list_head rbd_empty;
-       spinlock_t lock;
-       struct workqueue_struct *alloc_wq;
-       struct work_struct rx_alloc;
 };
 
 struct iwl_dma_ptr {
@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
- * @rba: allocator for RX replenishing
+ * @rx_replenish: work that will be called when buffers need to be allocated
  * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
@@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  */
 struct iwl_trans_pcie {
        struct iwl_rxq rxq;
-       struct iwl_rb_allocator rba;
+       struct work_struct rx_replenish;
        struct iwl_trans *trans;
        struct iwl_drv *drv;
 
index a3fbaa0ef5e04de7d1032c79ca35e82364dac7ea..adad8d0fae7f2766812826377c46d29f1f77d4ed 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
  * resets the Rx queue buffers with new memory.
  *
  * The management in the driver is as follows:
- * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
- *   When the interrupt handler is called, the request is processed.
- *   The page is either stolen - transferred to the upper layer
- *   or reused - added immediately to the iwl->rxq->rx_free list.
- * + When the page is stolen - the driver updates the matching queue's used
- *   count, detaches the RBD and transfers it to the queue used list.
- *   When there are two used RBDs - they are transferred to the allocator empty
- *   list. Work is then scheduled for the allocator to start allocating
- *   eight buffers.
- *   When there are another 6 used RBDs - they are transferred to the allocator
- *   empty list and the driver tries to claim the pre-allocated buffers and
- *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
- *   until ready.
- *   When there are 8+ buffers in the free list - either from allocation or from
- *   8 reused unstolen pages - restock is called to update the FW and indexes.
- * + In order to make sure the allocator always has RBDs to use for allocation
- *   the allocator has initial pool in the size of num_queues*(8-2) - the
- *   maximum missing RBDs per allocation request (request posted with 2
- *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
- *   The queues supplies the recycle of the rest of the RBDs.
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
+ *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replenish the iwl->rxq->rx_free.
+ * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
+ *   iwl->rxq is replenished and the READ INDEX is updated (updating the
+ *   'processed' and 'read' driver indexes as well)
  * + A received packet is processed and handed to the kernel network stack,
  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + If there are no allocated buffers in iwl->rxq->rx_free,
+ * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
+ *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  *   If there were enough free buffers and RX_STALLED is set it is cleared.
  *
  *
  * iwl_rxq_alloc()            Allocates rx_free
  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
- *                            iwl_pcie_rxq_restock.
- *                            Used only during initialization.
+ *                            iwl_pcie_rxq_restock
  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
  *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.
- * iwl_pcie_rx_allocator()     Background work for allocating pages.
+ *                            the WRITE index.  If insufficient rx_free buffers
+ *                            are available, schedules iwl_pcie_rx_replenish
  *
  * -- enable interrupts --
  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
  *                            READ INDEX, detaching the SKB from the pool.
  *                            Moves the packet buffer from queue to rx_used.
- *                            Posts and claims requests to the allocator.
  *                            Calls iwl_pcie_rxq_restock to refill any empty
  *                            slots.
- *
- * RBD life-cycle:
- *
- * Init:
- * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
- *
- * Regular Receive interrupt:
- * Page Stolen:
- * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
- * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
- * Page not Stolen:
- * rxq.queue -> rxq.rx_free -> rxq.queue
  * ...
  *
  */
@@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
                rxq->free_count--;
        }
        spin_unlock(&rxq->lock);
+       /* If the pre-allocated buffer pool is dropping low, schedule to
+        * refill it */
+       if (rxq->free_count <= RX_LOW_WATERMARK)
+               schedule_work(&trans_pcie->rx_replenish);
 
        /* If we've added more space for the firmware to place data, tell it.
         * Increment device's write pointer in multiples of 8. */
@@ -277,44 +254,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
        }
 }
 
-/*
- * iwl_pcie_rx_alloc_page - allocates and returns a page.
- *
- */
-static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
-       struct page *page;
-       gfp_t gfp_mask = GFP_KERNEL;
-
-       if (rxq->free_count > RX_LOW_WATERMARK)
-               gfp_mask |= __GFP_NOWARN;
-
-       if (trans_pcie->rx_page_order > 0)
-               gfp_mask |= __GFP_COMP;
-
-       /* Alloc a new receive buffer */
-       page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
-       if (!page) {
-               if (net_ratelimit())
-                       IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
-                                      trans_pcie->rx_page_order);
-               /* Issue an error if the hardware has consumed more than half
-                * of its free buffer list and we don't have enough
-                * pre-allocated buffers.
-`               */
-               if (rxq->free_count <= RX_LOW_WATERMARK &&
-                   iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
-                   net_ratelimit())
-                       IWL_CRIT(trans,
-                                "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
-                                rxq->free_count);
-               return NULL;
-       }
-       return page;
-}
-
 /*
  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  *
@@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  * allocated buffers.
  */
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rx_mem_buffer *rxb;
        struct page *page;
+       gfp_t gfp_mask = priority;
 
        while (1) {
                spin_lock(&rxq->lock);
@@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
                }
                spin_unlock(&rxq->lock);
 
+               if (rxq->free_count > RX_LOW_WATERMARK)
+                       gfp_mask |= __GFP_NOWARN;
+
+               if (trans_pcie->rx_page_order > 0)
+                       gfp_mask |= __GFP_COMP;
+
                /* Alloc a new receive buffer */
-               page = iwl_pcie_rx_alloc_page(trans);
-               if (!page)
+               page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+               if (!page) {
+                       if (net_ratelimit())
+                               IWL_DEBUG_INFO(trans, "alloc_pages failed, "
+                                          "order: %d\n",
+                                          trans_pcie->rx_page_order);
+
+                       if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+                           net_ratelimit())
+                               IWL_CRIT(trans, "Failed to alloc_pages with %s."
+                                        "Only %u free buffers remaining.\n",
+                                        priority == GFP_ATOMIC ?
+                                        "GFP_ATOMIC" : "GFP_KERNEL",
+                                        rxq->free_count);
+                       /* We don't reschedule replenish work here -- we will
+                        * call the restock method and if it still needs
+                        * more buffers it will schedule replenish */
                        return;
+               }
 
                spin_lock(&rxq->lock);
 
@@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
 
        lockdep_assert_held(&rxq->lock);
 
-       for (i = 0; i < RX_QUEUE_SIZE; i++) {
+       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
                if (!rxq->pool[i].page)
                        continue;
                dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  * When moving to rx_free an page is allocated for the slot.
  *
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called only during initialization
+ * This is called as a scheduled work item (except for during initialization)
  */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
 {
-       iwl_pcie_rxq_alloc_rbs(trans);
+       iwl_pcie_rxq_alloc_rbs(trans, gfp);
 
        iwl_pcie_rxq_restock(trans);
 }
 
-/*
- * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
- *
- * Allocates for each received request 8 pages
- * Called as a scheduled work item.
- */
-static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
-       while (atomic_read(&rba->req_pending)) {
-               int i;
-               struct list_head local_empty;
-               struct list_head local_allocated;
-
-               INIT_LIST_HEAD(&local_allocated);
-               spin_lock(&rba->lock);
-               /* swap out the entire rba->rbd_empty to a local list */
-               list_replace_init(&rba->rbd_empty, &local_empty);
-               spin_unlock(&rba->lock);
-
-               for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
-                       struct iwl_rx_mem_buffer *rxb;
-                       struct page *page;
-
-                       /* List should never be empty - each reused RBD is
-                        * returned to the list, and initial pool covers any
-                        * possible gap between the time the page is allocated
-                        * to the time the RBD is added.
-                        */
-                       BUG_ON(list_empty(&local_empty));
-                       /* Get the first rxb from the rbd list */
-                       rxb = list_first_entry(&local_empty,
-                                              struct iwl_rx_mem_buffer, list);
-                       BUG_ON(rxb->page);
-
-                       /* Alloc a new receive buffer */
-                       page = iwl_pcie_rx_alloc_page(trans);
-                       if (!page)
-                               continue;
-                       rxb->page = page;
-
-                       /* Get physical address of the RB */
-                       rxb->page_dma = dma_map_page(trans->dev, page, 0,
-                                       PAGE_SIZE << trans_pcie->rx_page_order,
-                                       DMA_FROM_DEVICE);
-                       if (dma_mapping_error(trans->dev, rxb->page_dma)) {
-                               rxb->page = NULL;
-                               __free_pages(page, trans_pcie->rx_page_order);
-                               continue;
-                       }
-                       /* dma address must be no more than 36 bits */
-                       BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
-                       /* and also 256 byte aligned! */
-                       BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
-                       /* move the allocated entry to the out list */
-                       list_move(&rxb->list, &local_allocated);
-                       i++;
-               }
-
-               spin_lock(&rba->lock);
-               /* add the allocated rbds to the allocator allocated list */
-               list_splice_tail(&local_allocated, &rba->rbd_allocated);
-               /* add the unused rbds back to the allocator empty list */
-               list_splice_tail(&local_empty, &rba->rbd_empty);
-               spin_unlock(&rba->lock);
-
-               atomic_dec(&rba->req_pending);
-               atomic_inc(&rba->req_ready);
-       }
-}
-
-/*
- * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
-.*
-.* Called by queue when the queue posted allocation request and
- * has freed 8 RBDs in order to restock itself.
- */
-static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
-                                    struct iwl_rx_mem_buffer
-                                    *out[RX_CLAIM_REQ_ALLOC])
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-       int i;
-
-       if (atomic_dec_return(&rba->req_ready) < 0) {
-               atomic_inc(&rba->req_ready);
-               IWL_DEBUG_RX(trans,
-                            "Allocation request not ready, pending requests = %d\n",
-                            atomic_read(&rba->req_pending));
-               return -ENOMEM;
-       }
-
-       spin_lock(&rba->lock);
-       for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
-               /* Get next free Rx buffer, remove it from free list */
-               out[i] = list_first_entry(&rba->rbd_allocated,
-                              struct iwl_rx_mem_buffer, list);
-               list_del(&out[i]->list);
-       }
-       spin_unlock(&rba->lock);
-
-       return 0;
-}
-
-static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+static void iwl_pcie_rx_replenish_work(struct work_struct *data)
 {
-       struct iwl_rb_allocator *rba_p =
-               container_of(data, struct iwl_rb_allocator, rx_alloc);
        struct iwl_trans_pcie *trans_pcie =
-               container_of(rba_p, struct iwl_trans_pcie, rba);
+           container_of(data, struct iwl_trans_pcie, rx_replenish);
 
-       iwl_pcie_rx_allocator(trans_pcie->trans);
+       iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
 }
 
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
        struct device *dev = trans->dev;
 
        memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
 
        spin_lock_init(&rxq->lock);
-       spin_lock_init(&rba->lock);
 
        if (WARN_ON(rxq->bd || rxq->rb_stts))
                return -EINVAL;
@@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
        INIT_LIST_HEAD(&rxq->rx_free);
        INIT_LIST_HEAD(&rxq->rx_used);
        rxq->free_count = 0;
-       rxq->used_count = 0;
 
-       for (i = 0; i < RX_QUEUE_SIZE; i++)
+       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
                list_add(&rxq->pool[i].list, &rxq->rx_used);
 }
 
-static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
-{
-       int i;
-
-       lockdep_assert_held(&rba->lock);
-
-       INIT_LIST_HEAD(&rba->rbd_allocated);
-       INIT_LIST_HEAD(&rba->rbd_empty);
-
-       for (i = 0; i < RX_POOL_SIZE; i++)
-               list_add(&rba->pool[i].list, &rba->rbd_empty);
-}
-
-static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-       int i;
-
-       lockdep_assert_held(&rba->lock);
-
-       for (i = 0; i < RX_POOL_SIZE; i++) {
-               if (!rba->pool[i].page)
-                       continue;
-               dma_unmap_page(trans->dev, rba->pool[i].page_dma,
-                              PAGE_SIZE << trans_pcie->rx_page_order,
-                              DMA_FROM_DEVICE);
-               __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
-               rba->pool[i].page = NULL;
-       }
-}
-
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
        int i, err;
 
        if (!rxq->bd) {
@@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
                if (err)
                        return err;
        }
-       if (!rba->alloc_wq)
-               rba->alloc_wq = alloc_workqueue("rb_allocator",
-                                               WQ_HIGHPRI | WQ_UNBOUND, 1);
-       INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
-
-       spin_lock(&rba->lock);
-       atomic_set(&rba->req_pending, 0);
-       atomic_set(&rba->req_ready, 0);
-       /* free all first - we might be reconfigured for a different size */
-       iwl_pcie_rx_free_rba(trans);
-       iwl_pcie_rx_init_rba(rba);
-       spin_unlock(&rba->lock);
 
        spin_lock(&rxq->lock);
 
+       INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
+
        /* free all first - we might be reconfigured for a different size */
        iwl_pcie_rxq_free_rbs(trans);
        iwl_pcie_rx_init_rxb_lists(rxq);
@@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
        spin_unlock(&rxq->lock);
 
-       iwl_pcie_rx_replenish(trans);
+       iwl_pcie_rx_replenish(trans, GFP_KERNEL);
 
        iwl_pcie_rx_hw_init(trans, rxq);
 
@@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
 
        /*if rxq->bd is NULL, it means that nothing has been allocated,
         * exit now */
@@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
                return;
        }
 
-       cancel_work_sync(&rba->rx_alloc);
-       if (rba->alloc_wq) {
-               destroy_workqueue(rba->alloc_wq);
-               rba->alloc_wq = NULL;
-       }
-
-       spin_lock(&rba->lock);
-       iwl_pcie_rx_free_rba(trans);
-       spin_unlock(&rba->lock);
+       cancel_work_sync(&trans_pcie->rx_replenish);
 
        spin_lock(&rxq->lock);
        iwl_pcie_rxq_free_rbs(trans);
@@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
        rxq->rb_stts = NULL;
 }
 
-/*
- * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
- *
- * Called when a RBD can be reused. The RBD is transferred to the allocator.
- * When there are 2 empty RBDs - a request for allocation is posted
- */
-static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
-                                 struct iwl_rx_mem_buffer *rxb,
-                                 struct iwl_rxq *rxq)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
-       /* Count the used RBDs */
-       rxq->used_count++;
-
-       /* Move the RBD to the used list, will be moved to allocator in batches
-        * before claiming or posting a request*/
-       list_add_tail(&rxb->list, &rxq->rx_used);
-
-       /* If we have RX_POST_REQ_ALLOC new released rx buffers -
-        * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
-        * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
-        * after but we still need to post another request.
-        */
-       if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
-               /* Move the 2 RBDs to the allocator ownership.
-                Allocator has another 6 from pool for the request completion*/
-               spin_lock(&rba->lock);
-               list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
-               spin_unlock(&rba->lock);
-
-               atomic_inc(&rba->req_pending);
-               queue_work(rba->alloc_wq, &rba->rx_alloc);
-       }
-}
-
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                                struct iwl_rx_mem_buffer *rxb)
 {
@@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                         */
                        __free_pages(rxb->page, trans_pcie->rx_page_order);
                        rxb->page = NULL;
-                       iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
+                       list_add_tail(&rxb->list, &rxq->rx_used);
                } else {
                        list_add_tail(&rxb->list, &rxq->rx_free);
                        rxq->free_count++;
                }
        } else
-               iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
+               list_add_tail(&rxb->list, &rxq->rx_used);
 }
 
 /*
@@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       u32 r, i, j;
+       u32 r, i;
+       u8 fill_rx = 0;
+       u32 count = 8;
+       int total_empty;
 
 restart:
        spin_lock(&rxq->lock);
@@ -957,6 +720,14 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
        if (i == r)
                IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
 
+       /* calculate total frames need to be restock after handling RX */
+       total_empty = r - rxq->write_actual;
+       if (total_empty < 0)
+               total_empty += RX_QUEUE_SIZE;
+
+       if (total_empty > (RX_QUEUE_SIZE / 2))
+               fill_rx = 1;
+
        while (i != r) {
                struct iwl_rx_mem_buffer *rxb;
 
@@ -968,48 +739,29 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
                iwl_pcie_rx_handle_rb(trans, rxb);
 
                i = (i + 1) & RX_QUEUE_MASK;
-
-               /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
-                * try to claim the pre-allocated buffers from the allocator */
-               if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
-                       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-                       struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
-
-                       /* Add the remaining 6 empty RBDs for allocator use */
-                       spin_lock(&rba->lock);
-                       list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
-                       spin_unlock(&rba->lock);
-
-                       /* If not ready - continue, will try to reclaim later.
-                       * No need to reschedule work - allocator exits only on
-                       * success */
-                       if (!iwl_pcie_rx_allocator_get(trans, out)) {
-                               /* If success - then RX_CLAIM_REQ_ALLOC
-                                * buffers were retrieved and should be added
-                                * to free list */
-                               rxq->used_count -= RX_CLAIM_REQ_ALLOC;
-                               for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
-                                       list_add_tail(&out[j]->list,
-                                                     &rxq->rx_free);
-                                       rxq->free_count++;
-                               }
+               /* If there are a lot of unused frames,
+                * restock the Rx queue so ucode wont assert. */
+               if (fill_rx) {
+                       count++;
+                       if (count >= 8) {
+                               rxq->read = i;
+                               spin_unlock(&rxq->lock);
+                               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+                               count = 0;
+                               goto restart;
                        }
                }
-               /* handle restock for two cases:
-               * - we just pulled buffers from the allocator
-               * - we have 8+ unstolen pages accumulated */
-               if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
-                       rxq->read = i;
-                       spin_unlock(&rxq->lock);
-                       iwl_pcie_rxq_restock(trans);
-                       goto restart;
-               }
        }
 
        /* Backtrack one entry */
        rxq->read = i;
        spin_unlock(&rxq->lock);
 
+       if (fill_rx)
+               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+       else
+               iwl_pcie_rxq_restock(trans);
+
        if (trans_pcie->napi.poll)
                napi_gro_flush(&trans_pcie->napi, false);
 }
index 43ae658af6ec56506f9f0e8ed00022b890c3b9fb..6203c4ad9bba5d8ce3bb2f46f832c3bb4ebe85fc 100644 (file)
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
 
 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 {
-       if (!trans->cfg->apmg_not_supported)
+       if (trans->cfg->apmg_not_supported)
                return;
 
        if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -2459,7 +2459,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        struct iwl_trans_pcie *trans_pcie;
        struct iwl_trans *trans;
        u16 pci_cmd;
-       int err;
+       int ret;
 
        trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
                                &pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2474,8 +2474,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        spin_lock_init(&trans_pcie->ref_lock);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
-       err = pci_enable_device(pdev);
-       if (err)
+       ret = pci_enable_device(pdev);
+       if (ret)
                goto out_no_pci;
 
        if (!cfg->base_params->pcie_l1_allowed) {
@@ -2491,23 +2491,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
-       if (!err)
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
-       if (err) {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (!err)
-                       err = pci_set_consistent_dma_mask(pdev,
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+       if (!ret)
+               ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+       if (ret) {
+               ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (!ret)
+                       ret = pci_set_consistent_dma_mask(pdev,
                                                          DMA_BIT_MASK(32));
                /* both attempts failed: */
-               if (err) {
+               if (ret) {
                        dev_err(&pdev->dev, "No suitable DMA available\n");
                        goto out_pci_disable_device;
                }
        }
 
-       err = pci_request_regions(pdev, DRV_NAME);
-       if (err) {
+       ret = pci_request_regions(pdev, DRV_NAME);
+       if (ret) {
                dev_err(&pdev->dev, "pci_request_regions failed\n");
                goto out_pci_disable_device;
        }
@@ -2515,7 +2515,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
        if (!trans_pcie->hw_base) {
                dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
-               err = -ENODEV;
+               ret = -ENODEV;
                goto out_pci_release_regions;
        }
 
@@ -2527,9 +2527,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        trans_pcie->pci_dev = pdev;
        iwl_disable_interrupts(trans);
 
-       err = pci_enable_msi(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
+       ret = pci_enable_msi(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
                /* enable rfkill interrupt: hw bug w/a */
                pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
                if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -2547,11 +2547,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
         */
        if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
                unsigned long flags;
-               int ret;
 
                trans->hw_rev = (trans->hw_rev & 0xfff0) |
                                (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
 
+               ret = iwl_pcie_prepare_card_hw(trans);
+               if (ret) {
+                       IWL_WARN(trans, "Exit HW not ready\n");
+                       goto out_pci_disable_msi;
+               }
+
                /*
                 * in-order to recognize C step driver should read chip version
                 * id located at the AUX bus MISC address space.
@@ -2591,13 +2596,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        /* Initialize the wait queue for commands */
        init_waitqueue_head(&trans_pcie->wait_command_queue);
 
-       if (iwl_pcie_alloc_ict(trans))
+       ret = iwl_pcie_alloc_ict(trans);
+       if (ret)
                goto out_pci_disable_msi;
 
-       err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
+       ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
                                   iwl_pcie_irq_handler,
                                   IRQF_SHARED, DRV_NAME, trans);
-       if (err) {
+       if (ret) {
                IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
                goto out_free_ict;
        }
@@ -2617,5 +2623,5 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        pci_disable_device(pdev);
 out_no_pci:
        iwl_trans_free(trans);
-       return ERR_PTR(err);
+       return ERR_PTR(ret);
 }
index 880d0d63e872e5725d76fe998db0282c749f45d6..7d50711476fe1e88debca95beb790d770261f036 100644 (file)
@@ -1566,13 +1566,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
                smp_rmb();
 
                while (dc != dp) {
-                       BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
+                       BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
                        pending_idx =
                                queue->dealloc_ring[pending_index(dc++)];
 
-                       pending_idx_release[gop-queue->tx_unmap_ops] =
+                       pending_idx_release[gop - queue->tx_unmap_ops] =
                                pending_idx;
-                       queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
+                       queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
                                queue->mmap_pages[pending_idx];
                        gnttab_set_unmap_op(gop,
                                            idx_to_kaddr(queue, pending_idx),
index efcf2a2b3975c2cc35fda3806223f0578900d4c2..6177315ab74e5eb43cc92fa9d57c892fadce4b40 100644 (file)
@@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
 
        spin_lock_irqsave(&pc->irq_lock[bank], flags);
        bcm2835_gpio_irq_config(pc, gpio, false);
+       /* Clear events that were latched prior to clearing event sources */
+       bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
        clear_bit(offset, &pc->enabled_irq_map[bank]);
        spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
 }
index 5fd4437cee1584a6cc7a5251269d1078a0b62ddf..88a7fac11bd499f72c831b91f0f6c05bd29b19f7 100644 (file)
@@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
                             unsigned num_configs)
 {
        struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
-       const struct imx1_pinctrl_soc_info *info = ipctl->info;
        int i;
 
        for (i = 0; i != num_configs; ++i) {
                imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
 
                dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
-                       info->pins[pin_id].name);
+                       pin_desc_get(pctldev, pin_id)->name);
        }
 
        return 0;
index 557d0f2a3031b1a0295fe60796ec17ea7817fc52..97681fac082e71f449874a4a5a21a68b07e27fc2 100644 (file)
@@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = {
        .set_mux = abx500_pmx_set,
        .gpio_request_enable = abx500_gpio_request_enable,
        .gpio_disable_free = abx500_gpio_disable_free,
-       .strict = true,
 };
 
 static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
index ef0b697639a71e397ec545d17f6fe9ae9cec445d..347c763a6a78762302c89559a5ff92b64ec55872 100644 (file)
@@ -823,7 +823,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
                break;
 
        case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
-               if (param)
+               if (param_val)
                        *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift);
                else
                        *reg |= (LPC18XX_SCU_I2C0_ZIF << shift);
@@ -876,7 +876,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev,
                break;
 
        case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
-               if (param)
+               if (param_val)
                        *reg &= ~LPC18XX_SCU_PIN_ZIF;
                else
                        *reg |= LPC18XX_SCU_PIN_ZIF;
index b2de09d3b1a0c8e14b03e23fcb0131e6f41022ee..0b8d480171a3dfea7af0aa8b03e346d56b80f6bb 100644 (file)
@@ -1760,7 +1760,8 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
                int res;
 
                res = request_irq(pcs_soc->irq, pcs_irq_handler,
-                                 IRQF_SHARED | IRQF_NO_SUSPEND,
+                                 IRQF_SHARED | IRQF_NO_SUSPEND |
+                                 IRQF_NO_THREAD,
                                  name, pcs_soc);
                if (res) {
                        pcs_soc->irq = -1;
index 3dd5a3b2ac62344a19713e92d9e7ad8fa5cf2461..c760bf43d116cfaa83370d640deefd38abb9f407 100644 (file)
 #include "../core.h"
 #include "pinctrl-samsung.h"
 
-#define GROUP_SUFFIX           "-grp"
-#define GSUFFIX_LEN            sizeof(GROUP_SUFFIX)
-#define FUNCTION_SUFFIX                "-mux"
-#define FSUFFIX_LEN            sizeof(FUNCTION_SUFFIX)
-
 /* list of all possible config options supported */
 static struct pin_config {
        const char *property;
index c7508d5f688613b26e6eb226628f61ba9cd38f66..0874cfee6889afa149fc18895de55c12ac2e5ff0 100644 (file)
@@ -224,7 +224,7 @@ struct sh_pfc_soc_info {
 
 /* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */
 #define _GP_GPIO(bank, _pin, _name, sfx)                               \
-       [(bank * 32) + _pin] = {                                        \
+       {                                                               \
                .pin = (bank * 32) + _pin,                              \
                .name = __stringify(_name),                             \
                .enum_id = _name##_DATA,                                \
index 95bccfd3f169a8a3a167368f972cbbd08de7c869..e5225ad9c5b12fdd9d723704d66eb9ec2ff5b734 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the S/390 specific device drivers
 #
 
-obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
+obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
 
 drivers-y += drivers/s390/built-in.o
 
index 285f77544c36391a89b243001c3fc93d0703f46f..7dbbb29d24c6cf5290cd06a7a97df570a8860f18 100644 (file)
@@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
 {
        struct Scsi_Host *shost;
        struct virtio_scsi *vscsi;
-       int err, host_prot;
+       int err;
        u32 sg_elems, num_targets;
        u32 cmd_per_lun;
        u32 num_queues;
@@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
        if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
+               int host_prot;
+
                host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
                            SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
                            SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
index 0cae1694014dfbbaba171187fdbd4fc835dc1546..b0f30fb68914220ea75b401dd52b7d537c00687e 100644 (file)
@@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA
 
 config SPI_ZYNQMP_GQSPI
        tristate "Xilinx ZynqMP GQSPI controller"
-       depends on SPI_MASTER
+       depends on SPI_MASTER && HAS_DMA
        help
          Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
 
index 788e2b176a4f7707051bcc325538e7d2a6d599f4..acce90ac7371d58d94b47dc3cb1da029969fb95f 100644 (file)
@@ -40,6 +40,7 @@
 #define SPFI_CONTROL_SOFT_RESET                        BIT(11)
 #define SPFI_CONTROL_SEND_DMA                  BIT(10)
 #define SPFI_CONTROL_GET_DMA                   BIT(9)
+#define SPFI_CONTROL_SE                        BIT(8)
 #define SPFI_CONTROL_TMODE_SHIFT               5
 #define SPFI_CONTROL_TMODE_MASK                        0x7
 #define SPFI_CONTROL_TMODE_SINGLE              0
@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
        else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
                 xfer->rx_nbits == SPI_NBITS_QUAD)
                val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
+       val |= SPFI_CONTROL_SE;
        spfi_writel(spfi, val, SPFI_CONTROL);
 }
 
index eb7d3a6fb14c0694b55e5286933256c5962043ce..f9deb84e4e551bdd9c42c8ac204c80c18f0f8579 100644 (file)
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
 {
        struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
 
-       if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
-           && (transfer->len > spi_imx->tx_wml))
+       if (spi_imx->dma_is_inited
+           && transfer->len > spi_imx->rx_wml * sizeof(u32)
+           && transfer->len > spi_imx->tx_wml * sizeof(u32))
                return true;
        return false;
 }
index 87b20a511a6ba66cc4ba084df028a3c073691aa2..f23f36ebaf3dc700447da2e4da6e9b1b28e8ed82 100644 (file)
@@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
        case GQSPI_SELECT_FLASH_CS_BOTH:
                instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
                        GQSPI_GENFIFO_CS_UPPER;
+               break;
        case GQSPI_SELECT_FLASH_CS_UPPER:
                instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
                break;
index dd616ff0ffc52542c3c8f8aaaf7c7d30bb9fd219..c7de64171c452325ab43884b501977a33f724b93 100644 (file)
@@ -693,6 +693,7 @@ static struct class *spidev_class;
 #ifdef CONFIG_OF
 static const struct of_device_id spidev_dt_ids[] = {
        { .compatible = "rohm,dh2228fv" },
+       { .compatible = "lineartechnology,ltc2488" },
        {},
 };
 MODULE_DEVICE_TABLE(of, spidev_dt_ids);
index 9e8e004bb1c38d809c2af43b9d42c053db3a41a2..a9fe859f43c874b39d6a2e7f8bb884abe1b16736 100644 (file)
 #include <linux/file.h>
 #include <linux/highmem.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <linux/kthread.h>
 #include <linux/cgroup.h>
 #include <linux/module.h>
+#include <linux/sort.h>
 
 #include "vhost.h"
 
+static ushort max_mem_regions = 64;
+module_param(max_mem_regions, ushort, 0444);
+MODULE_PARM_DESC(max_mem_regions,
+       "Maximum number of memory regions in memory map. (default: 64)");
+
 enum {
-       VHOST_MEMORY_MAX_NREGIONS = 64,
        VHOST_MEMORY_F_LOG = 0x1,
 };
 
@@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
                fput(dev->log_file);
        dev->log_file = NULL;
        /* No one will access memory at this point */
-       kfree(dev->memory);
+       kvfree(dev->memory);
        dev->memory = NULL;
        WARN_ON(!list_empty(&dev->work_list));
        if (dev->worker) {
@@ -663,6 +669,28 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
 }
 EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
 
+static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
+{
+       const struct vhost_memory_region *r1 = p1, *r2 = p2;
+       if (r1->guest_phys_addr < r2->guest_phys_addr)
+               return 1;
+       if (r1->guest_phys_addr > r2->guest_phys_addr)
+               return -1;
+       return 0;
+}
+
+static void *vhost_kvzalloc(unsigned long size)
+{
+       void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+
+       if (!n) {
+               n = vzalloc(size);
+               if (!n)
+                       return ERR_PTR(-ENOMEM);
+       }
+       return n;
+}
+
 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 {
        struct vhost_memory mem, *newmem, *oldmem;
@@ -673,21 +701,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
                return -EFAULT;
        if (mem.padding)
                return -EOPNOTSUPP;
-       if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
+       if (mem.nregions > max_mem_regions)
                return -E2BIG;
-       newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
+       newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
        if (!newmem)
                return -ENOMEM;
 
        memcpy(newmem, &mem, size);
        if (copy_from_user(newmem->regions, m->regions,
                           mem.nregions * sizeof *m->regions)) {
-               kfree(newmem);
+               kvfree(newmem);
                return -EFAULT;
        }
+       sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
+               vhost_memory_reg_sort_cmp, NULL);
 
        if (!memory_access_ok(d, newmem, 0)) {
-               kfree(newmem);
+               kvfree(newmem);
                return -EFAULT;
        }
        oldmem = d->memory;
@@ -699,7 +729,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
                d->vqs[i]->memory = newmem;
                mutex_unlock(&d->vqs[i]->mutex);
        }
-       kfree(oldmem);
+       kvfree(oldmem);
        return 0;
 }
 
@@ -992,17 +1022,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
 static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
                                                     __u64 addr, __u32 len)
 {
-       struct vhost_memory_region *reg;
-       int i;
+       const struct vhost_memory_region *reg;
+       int start = 0, end = mem->nregions;
 
-       /* linear search is not brilliant, but we really have on the order of 6
-        * regions in practice */
-       for (i = 0; i < mem->nregions; ++i) {
-               reg = mem->regions + i;
-               if (reg->guest_phys_addr <= addr &&
-                   reg->guest_phys_addr + reg->memory_size - 1 >= addr)
-                       return reg;
+       while (start < end) {
+               int slot = start + (end - start) / 2;
+               reg = mem->regions + slot;
+               if (addr >= reg->guest_phys_addr)
+                       end = slot;
+               else
+                       start = slot + 1;
        }
+
+       reg = mem->regions + start;
+       if (addr >= reg->guest_phys_addr &&
+               reg->guest_phys_addr + reg->memory_size > addr)
+               return reg;
        return NULL;
 }
 
index c7cb8a526c05fbaa5ba18934cd04eda1eb2a6e60..2b8aa15fd6dfa755368ddf21136eaab5192f1e28 100644 (file)
@@ -1361,6 +1361,36 @@ enum umount_tree_flags {
        UMOUNT_PROPAGATE = 2,
        UMOUNT_CONNECTED = 4,
 };
+
+static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
+{
+       /* Leaving mounts connected is only valid for lazy umounts */
+       if (how & UMOUNT_SYNC)
+               return true;
+
+       /* A mount without a parent has nothing to be connected to */
+       if (!mnt_has_parent(mnt))
+               return true;
+
+       /* Because the reference counting rules change when mounts are
+        * unmounted and connected, umounted mounts may not be
+        * connected to mounted mounts.
+        */
+       if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
+               return true;
+
+       /* Has it been requested that the mount remain connected? */
+       if (how & UMOUNT_CONNECTED)
+               return false;
+
+       /* Is the mount locked such that it needs to remain connected? */
+       if (IS_MNT_LOCKED(mnt))
+               return false;
+
+       /* By default disconnect the mount */
+       return true;
+}
+
 /*
  * mount_lock must be held
  * namespace_sem must be held for write
@@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
                if (how & UMOUNT_SYNC)
                        p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
 
-               disconnect = !(((how & UMOUNT_CONNECTED) &&
-                               mnt_has_parent(p) &&
-                               (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
-                              IS_MNT_LOCKED_AND_LAZY(p));
+               disconnect = disconnect_mount(p, how);
 
                pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
                                 disconnect ? &unmounted : NULL);
@@ -1538,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry)
        while (!hlist_empty(&mp->m_list)) {
                mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
                if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
-                       struct mount *p, *tmp;
-                       list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts,  mnt_child) {
-                               hlist_add_head(&p->mnt_umount.s_list, &unmounted);
-                               umount_mnt(p);
-                       }
+                       hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
+                       umount_mnt(mnt);
                }
                else umount_tree(mnt, UMOUNT_CONNECTED);
        }
index 3e594ce410101bbab3e39b97e311f3aa79cf181c..92e48c70f0f05542a75804fe7aac752672abd214 100644 (file)
@@ -152,15 +152,31 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
                BUG();
 
        list_del_init(&mark->g_list);
+
        spin_unlock(&mark->lock);
 
        if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
                iput(inode);
+       /* release lock temporarily */
+       mutex_unlock(&group->mark_mutex);
 
        spin_lock(&destroy_lock);
        list_add(&mark->g_list, &destroy_list);
        spin_unlock(&destroy_lock);
        wake_up(&destroy_waitq);
+       /*
+        * We don't necessarily have a ref on mark from caller so the above destroy
+        * may have actually freed it, unless this group provides a 'freeing_mark'
+        * function which must be holding a reference.
+        */
+
+       /*
+        * Some groups like to know that marks are being freed.  This is a
+        * callback to the group function to let it know that this mark
+        * is being freed.
+        */
+       if (group->ops->freeing_mark)
+               group->ops->freeing_mark(mark, group);
 
        /*
         * __fsnotify_update_child_dentry_flags(inode);
@@ -175,6 +191,8 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
         */
 
        atomic_dec(&group->num_marks);
+
+       mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
 }
 
 void fsnotify_destroy_mark(struct fsnotify_mark *mark,
@@ -187,10 +205,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark,
 
 /*
  * Destroy all marks in the given list. The marks must be already detached from
- * the original inode / vfsmount. Note that we can race with
- * fsnotify_clear_marks_by_group_flags(). However we hold a reference to each
- * mark so they won't get freed from under us and nobody else touches our
- * free_list list_head.
+ * the original inode / vfsmount.
  */
 void fsnotify_destroy_marks(struct list_head *to_free)
 {
@@ -391,7 +406,7 @@ struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head,
 }
 
 /*
- * Clear any marks in a group in which mark->flags & flags is true.
+ * clear any marks in a group in which mark->flags & flags is true
  */
 void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
                                         unsigned int flags)
@@ -445,7 +460,6 @@ static int fsnotify_mark_destroy(void *ignored)
 {
        struct fsnotify_mark *mark, *next;
        struct list_head private_destroy_list;
-       struct fsnotify_group *group;
 
        for (;;) {
                spin_lock(&destroy_lock);
@@ -457,14 +471,6 @@ static int fsnotify_mark_destroy(void *ignored)
 
                list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
                        list_del_init(&mark->g_list);
-                       group = mark->group;
-                       /*
-                        * Some groups like to know that marks are being freed.
-                        * This is a callback to the group function to let it
-                        * know that this mark is being freed.
-                        */
-                       if (group && group->ops->freeing_mark)
-                               group->ops->freeing_mark(mark, group);
                        fsnotify_put_mark(mark);
                }
 
index 7114ce6e6b9ef038f415550b925ac50a95be978c..0fcdbe7ca6480e7950092cfd0dc7d0ce431ef548 100644 (file)
@@ -20,8 +20,6 @@
 #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
 #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
 #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
-#define IS_MNT_LOCKED_AND_LAZY(m) \
-       (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
 
 #define CL_EXPIRE              0x01
 #define CL_SLAVE               0x02
index 6afac3d561ac81f6f5861f86a0dc4eb2a7d1fca2..8d0b3ade0ff0ef8a65f8714330a3bfd49201c0d8 100644 (file)
@@ -1652,17 +1652,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
                       iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
                                        sizeof(struct unallocSpaceEntry));
                use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
-               use->descTag.tagLocation =
-                               cpu_to_le32(iinfo->i_location.logicalBlockNum);
-               crclen = sizeof(struct unallocSpaceEntry) +
-                               iinfo->i_lenAlloc - sizeof(struct tag);
-               use->descTag.descCRCLength = cpu_to_le16(crclen);
-               use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
-                                                          sizeof(struct tag),
-                                                          crclen));
-               use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
+               crclen = sizeof(struct unallocSpaceEntry);
 
-               goto out;
+               goto finish;
        }
 
        if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
@@ -1782,6 +1774,8 @@ static int udf_update_inode(struct inode *inode, int do_sync)
                efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
                crclen = sizeof(struct extendedFileEntry);
        }
+
+finish:
        if (iinfo->i_strat4096) {
                fe->icbTag.strategyType = cpu_to_le16(4096);
                fe->icbTag.strategyParameter = cpu_to_le16(1);
@@ -1791,7 +1785,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
                fe->icbTag.numEntries = cpu_to_le16(1);
        }
 
-       if (S_ISDIR(inode->i_mode))
+       if (iinfo->i_use)
+               fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
+       else if (S_ISDIR(inode->i_mode))
                fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
        else if (S_ISREG(inode->i_mode))
                fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
@@ -1828,7 +1824,6 @@ static int udf_update_inode(struct inode *inode, int do_sync)
                                                  crclen));
        fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
 
-out:
        set_buffer_uptodate(bh);
        unlock_buffer(bh);
 
index a741678f24a26d712e08775e213b1b1c50c8ca8a..883fe1e7c5a17e982651a68cfb17cbc0bbe5b367 100644 (file)
@@ -4868,6 +4868,23 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
                             struct cfg80211_chan_def *chandef,
                             enum nl80211_iftype iftype);
 
+/**
+ * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation
+ * @wiphy: the wiphy
+ * @chandef: the channel definition
+ * @iftype: interface type
+ *
+ * Return: %true if there is no secondary channel or the secondary channel(s)
+ * can be used for beaconing (i.e. is not a radar channel etc.). This version
+ * also checks if IR-relaxation conditions apply, to allow beaconing under
+ * more permissive conditions.
+ *
+ * Requires the RTNL to be held.
+ */
+bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
+                                  struct cfg80211_chan_def *chandef,
+                                  enum nl80211_iftype iftype);
+
 /*
  * cfg80211_ch_switch_notify - update wdev channel and notify userspace
  * @dev: the device which switched channels
index 0750a186ea635678efe15b2619f32e91f86fde99..d5fe9f2ab6996f0aa9482980ecc4cdc793d63a5a 100644 (file)
@@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
 }
 
 /* datagram.c */
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 
 void ip4_datagram_release_cb(struct sock *sk);
index b6fce900a8334613f45f169a0c28802327222f56..d708a53b8fb170a2faa04c0b6585daf43c2e9c73 100644 (file)
@@ -614,6 +614,8 @@ struct drm_amdgpu_info_device {
        uint32_t vram_type;
        /** video memory bit width*/
        uint32_t vram_bit_width;
+       /* vce harvesting instance */
+       uint32_t vce_harvest_config;
 };
 
 struct drm_amdgpu_info_hw_ip {
index 6e1a2ed116cb1410958f61631a7dc30839652738..db809b722985d3a1a03480cc1e3d36a656af2d2d 100644 (file)
@@ -1070,6 +1070,14 @@ struct drm_i915_reg_read {
        __u64 offset;
        __u64 val; /* Return value */
 };
+/* Known registers:
+ *
+ * Render engine timestamp - 0x2358 + 64bit - gen7+
+ * - Note this register returns an invalid value if using the default
+ *   single instruction 8byte read, in order to workaround that use
+ *   offset (0x2538 | 1) instead.
+ *
+ */
 
 struct drm_i915_reset_stats {
        __u32 ctx_id;
index 7bbee79ca2933f518a16f0250e7bbfd0a4e85fe0..ec32293a00db057eb7559c2e7cadce01deb3ef22 100644 (file)
@@ -34,6 +34,7 @@
 /* The feature bitmap for virtio net */
 #define VIRTIO_NET_F_CSUM      0       /* Host handles pkts w/ partial csum */
 #define VIRTIO_NET_F_GUEST_CSUM        1       /* Guest handles pkts w/ partial csum */
+#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */
 #define VIRTIO_NET_F_MAC       5       /* Host has given MAC address. */
 #define VIRTIO_NET_F_GUEST_TSO4        7       /* Guest can handle TSOv4 in. */
 #define VIRTIO_NET_F_GUEST_TSO6        8       /* Guest can handle TSOv6 in. */
@@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq {
  #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN        1
  #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX        0x8000
 
+/*
+ * Control network offloads
+ *
+ * Reconfigures the network offloads that Guest can handle.
+ *
+ * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
+ *
+ * Command data format matches the feature bit mask exactly.
+ *
+ * See VIRTIO_NET_F_GUEST_* for the list of offloads
+ * that can be enabled/disabled.
+ */
+#define VIRTIO_NET_CTRL_GUEST_OFFLOADS   5
+#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET        0
+
 #endif /* _LINUX_VIRTIO_NET_H */
index 75301468359f0c558ff8c3c12450301c80cff19a..90007a1abcab144ac3d6ac7d6e6f4001d58abb14 100644 (file)
@@ -157,6 +157,12 @@ struct virtio_pci_common_cfg {
        __le32 queue_used_hi;           /* read-write */
 };
 
+/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
+struct virtio_pci_cfg_cap {
+       struct virtio_pci_cap cap;
+       __u8 pci_cfg_data[4]; /* Data for BAR access. */
+};
+
 /* Macro versions of offsets for the Old Timers! */
 #define VIRTIO_PCI_CAP_VNDR            0
 #define VIRTIO_PCI_CAP_NEXT            1
index 915980ac68dfa8cc1dc973b8a7e659fc56383c22..c07295969b7e134ec85bf58b72b100949a6462fa 100644 (file)
@@ -31,6 +31,9 @@
  * SUCH DAMAGE.
  *
  * Copyright Rusty Russell IBM Corporation 2007. */
+#ifndef __KERNEL__
+#include <stdint.h>
+#endif
 #include <linux/types.h>
 #include <linux/virtio_types.h>
 
@@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
        vr->num = num;
        vr->desc = p;
        vr->avail = p + num*sizeof(struct vring_desc);
-       vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16)
+       vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
                + align-1) & ~(align - 1));
 }
 
index 9dd49ca67dbc22a905999de21b8355475ba40052..6e70ddb158b4bc121a0f32e7a53fecf8125e8354 100644 (file)
@@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
 
        mutex_unlock(&virtio_9p_lock);
 
+       vdev->config->reset(vdev);
        vdev->config->del_vqs(vdev);
 
        sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
index 1997538a5d23d93ddca9724fd3787dc0b0b2595a..3b78e8473a01b4a82e376266b04078e714ce1e26 100644 (file)
@@ -264,6 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
 {
        ax25_clear_queues(ax25);
 
+       ax25_stop_heartbeat(ax25);
        ax25_stop_t1timer(ax25);
        ax25_stop_t2timer(ax25);
        ax25_stop_t3timer(ax25);
index c11cf2611db0c870542969b6847d0a61d18b64d4..1198a3dbad95bc3c8819e7777b520074556a795a 100644 (file)
@@ -351,7 +351,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
        if (state == MDB_TEMPORARY)
                mod_timer(&p->timer, now + br->multicast_membership_interval);
 
-       br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
        return 0;
 }
 
index 742a6c27d7a222bc3c53c288b4a6915194310fe6..79db489cdade10c2f1412580b8ad1cba1f32ef1a 100644 (file)
@@ -39,6 +39,16 @@ static void br_multicast_start_querier(struct net_bridge *br,
                                       struct bridge_mcast_own_query *query);
 static void br_multicast_add_router(struct net_bridge *br,
                                    struct net_bridge_port *port);
+static void br_ip4_multicast_leave_group(struct net_bridge *br,
+                                        struct net_bridge_port *port,
+                                        __be32 group,
+                                        __u16 vid);
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_leave_group(struct net_bridge *br,
+                                        struct net_bridge_port *port,
+                                        const struct in6_addr *group,
+                                        __u16 vid);
+#endif
 unsigned int br_mdb_rehash_seq;
 
 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -1010,9 +1020,15 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
                        continue;
                }
 
-               err = br_ip4_multicast_add_group(br, port, group, vid);
-               if (err)
-                       break;
+               if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
+                    type == IGMPV3_MODE_IS_INCLUDE) &&
+                   ntohs(grec->grec_nsrcs) == 0) {
+                       br_ip4_multicast_leave_group(br, port, group, vid);
+               } else {
+                       err = br_ip4_multicast_add_group(br, port, group, vid);
+                       if (err)
+                               break;
+               }
        }
 
        return err;
@@ -1071,10 +1087,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                        continue;
                }
 
-               err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
-                                                vid);
-               if (err)
-                       break;
+               if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
+                    grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
+                   ntohs(*nsrcs) == 0) {
+                       br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
+                                                    vid);
+               } else {
+                       err = br_ip6_multicast_add_group(br, port,
+                                                        &grec->grec_mca, vid);
+                       if (!err)
+                               break;
+               }
        }
 
        return err;
index 3cc71b9f551756ca63b1299e95d9b6424e5afb72..cc858919108ee1f9645bce1046be8650a640d821 100644 (file)
@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
  * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
  * not dropped, but CAIF is sending flow off instead.
  */
-static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        int err;
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+       bool queued = false;
 
        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
                (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        err = sk_filter(sk, skb);
        if (err)
-               return err;
+               goto out;
+
        if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
                set_rx_flow_off(cf_sk);
                net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        }
        skb->dev = NULL;
        skb_set_owner_r(skb, sk);
-       /* Cache the SKB length before we tack it onto the receive
-        * queue. Once it is added it no longer belongs to us and
-        * may be freed by other threads of control pulling packets
-        * from the queue.
-        */
        spin_lock_irqsave(&list->lock, flags);
-       if (!sock_flag(sk, SOCK_DEAD))
+       queued = !sock_flag(sk, SOCK_DEAD);
+       if (queued)
                __skb_queue_tail(list, skb);
        spin_unlock_irqrestore(&list->lock, flags);
-
-       if (!sock_flag(sk, SOCK_DEAD))
+out:
+       if (queued)
                sk->sk_data_ready(sk);
        else
                kfree_skb(skb);
-       return 0;
 }
 
 /* Packet Receive Callback function called from CAIF Stack */
index b80fb91bb3f7e8dc630663cb5e012dc97ac6924f..4967262b27076af66347d20eca54b65a2e61d789 100644 (file)
@@ -131,6 +131,35 @@ static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
        goto out;
 }
 
+static int skb_set_peeked(struct sk_buff *skb)
+{
+       struct sk_buff *nskb;
+
+       if (skb->peeked)
+               return 0;
+
+       /* We have to unshare an skb before modifying it. */
+       if (!skb_shared(skb))
+               goto done;
+
+       nskb = skb_clone(skb, GFP_ATOMIC);
+       if (!nskb)
+               return -ENOMEM;
+
+       skb->prev->next = nskb;
+       skb->next->prev = nskb;
+       nskb->prev = skb->prev;
+       nskb->next = skb->next;
+
+       consume_skb(skb);
+       skb = nskb;
+
+done:
+       skb->peeked = 1;
+
+       return 0;
+}
+
 /**
  *     __skb_recv_datagram - Receive a datagram skbuff
  *     @sk: socket
@@ -165,7 +194,9 @@ static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                                    int *peeked, int *off, int *err)
 {
+       struct sk_buff_head *queue = &sk->sk_receive_queue;
        struct sk_buff *skb, *last;
+       unsigned long cpu_flags;
        long timeo;
        /*
         * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                 * Look at current nfs client by the way...
                 * However, this function was correct in any case. 8)
                 */
-               unsigned long cpu_flags;
-               struct sk_buff_head *queue = &sk->sk_receive_queue;
                int _off = *off;
 
                last = (struct sk_buff *)queue;
@@ -199,7 +228,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                                        _off -= skb->len;
                                        continue;
                                }
-                               skb->peeked = 1;
+
+                               error = skb_set_peeked(skb);
+                               if (error)
+                                       goto unlock_err;
+
                                atomic_inc(&skb->users);
                        } else
                                __skb_unlink(skb, queue);
@@ -223,6 +256,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
 
        return NULL;
 
+unlock_err:
+       spin_unlock_irqrestore(&queue->lock, cpu_flags);
 no_packet:
        *err = error;
        return NULL;
@@ -622,7 +657,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
                    !skb->csum_complete_sw)
                        netdev_rx_csum_fault(skb->dev);
        }
-       skb->csum_valid = !sum;
+       if (!skb_shared(skb))
+               skb->csum_valid = !sum;
        return sum;
 }
 EXPORT_SYMBOL(__skb_checksum_complete_head);
@@ -642,11 +678,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
                        netdev_rx_csum_fault(skb->dev);
        }
 
-       /* Save full packet checksum */
-       skb->csum = csum;
-       skb->ip_summed = CHECKSUM_COMPLETE;
-       skb->csum_complete_sw = 1;
-       skb->csum_valid = !sum;
+       if (!skb_shared(skb)) {
+               /* Save full packet checksum */
+               skb->csum = csum;
+               skb->ip_summed = CHECKSUM_COMPLETE;
+               skb->csum_complete_sw = 1;
+               skb->csum_valid = !sum;
+       }
 
        return sum;
 }
index e956ce6d13782f2da0a229cabafef663665159eb..002144bea93517d7e2e5b2c0ac00e70c028174a2 100644 (file)
@@ -284,7 +284,9 @@ void dst_release(struct dst_entry *dst)
                int newrefcnt;
 
                newrefcnt = atomic_dec_return(&dst->__refcnt);
-               WARN_ON(newrefcnt < 0);
+               if (unlikely(newrefcnt < 0))
+                       net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
+                                            __func__, dst, newrefcnt);
                if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
                        call_rcu(&dst->rcu_head, dst_destroy_rcu);
        }
index 9e433d58d2651cf867294a911d0f136e565730ae..dc004b1e1f8515250bb7c7f284b047d2f961f083 100644 (file)
@@ -1804,10 +1804,13 @@ static int do_setlink(const struct sk_buff *skb,
                        goto errout;
 
                nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
-                       if (nla_type(attr) != IFLA_VF_PORT)
-                               continue;
-                       err = nla_parse_nested(port, IFLA_PORT_MAX,
-                               attr, ifla_port_policy);
+                       if (nla_type(attr) != IFLA_VF_PORT ||
+                           nla_len(attr) < NLA_HDRLEN) {
+                               err = -EINVAL;
+                               goto errout;
+                       }
+                       err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
+                                              ifla_port_policy);
                        if (err < 0)
                                goto errout;
                        if (!port[IFLA_PORT_VF]) {
index 90c0e8386116177f4bbf412f2175aec93c64870c..574fad9cca052cb2970e283a4dc5568c4b3b8b23 100644 (file)
@@ -20,7 +20,7 @@
 #include <net/route.h>
 #include <net/tcp_states.h>
 
-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
        sk_dst_reset(sk);
 
-       lock_sock(sk);
-
        oif = sk->sk_bound_dev_if;
        saddr = inet->inet_saddr;
        if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        sk_dst_set(sk, &rt->dst);
        err = 0;
 out:
-       release_sock(sk);
        return err;
 }
+EXPORT_SYMBOL(__ip4_datagram_connect);
+
+int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+       int res;
+
+       lock_sock(sk);
+       res = __ip4_datagram_connect(sk, uaddr, addr_len);
+       release_sock(sk);
+       return res;
+}
 EXPORT_SYMBOL(ip4_datagram_connect);
 
 /* Because UDP xmit path can manipulate sk_dst_cache without holding
index 5f9b063bbe8ab4f3755a5711ae19b816a3bc2026..0cb9165421d450ae8f6aff81b88ef4bf2839ff51 100644 (file)
@@ -624,22 +624,21 @@ EXPORT_SYMBOL_GPL(inet_hashinfo_init);
 
 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
 {
+       unsigned int locksz = sizeof(spinlock_t);
        unsigned int i, nblocks = 1;
 
-       if (sizeof(spinlock_t) != 0) {
+       if (locksz != 0) {
                /* allocate 2 cache lines or at least one spinlock per cpu */
-               nblocks = max_t(unsigned int,
-                               2 * L1_CACHE_BYTES / sizeof(spinlock_t),
-                               1);
+               nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
                nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
 
                /* no more locks than number of hash buckets */
                nblocks = min(nblocks, hashinfo->ehash_mask + 1);
 
-               hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t),
+               hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
                                                      GFP_KERNEL | __GFP_NOWARN);
                if (!hashinfo->ehash_locks)
-                       hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t));
+                       hashinfo->ehash_locks = vmalloc(nblocks * locksz);
 
                if (!hashinfo->ehash_locks)
                        return -ENOMEM;
index a50dc6d408d11c339b38f2436216c8568c4149cf..31f71b15cfbad7aa7cd959d4505644df3dd80b2d 100644 (file)
@@ -351,7 +351,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
        ihl = ip_hdrlen(skb);
 
        /* Determine the position of this fragment. */
-       end = offset + skb->len - ihl;
+       end = offset + skb->len - skb_network_offset(skb) - ihl;
        err = -EINVAL;
 
        /* Is this the final fragment? */
@@ -381,7 +381,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                goto err;
 
        err = -ENOMEM;
-       if (!pskb_pull(skb, ihl))
+       if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
                goto err;
 
        err = pskb_trim_rcsum(skb, end - offset);
@@ -641,6 +641,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                iph->frag_off = 0;
        }
 
+       ip_send_check(iph);
+
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
        qp->q.fragments = NULL;
        qp->q.fragments_tail = NULL;
index 684f095d196e20333adb235fc96a8fb8f0dd691c..728f5b3d3c64197bb526240a078744d5a950c8ea 100644 (file)
@@ -1917,14 +1917,13 @@ void tcp_enter_loss(struct sock *sk)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       bool new_recovery = false;
+       bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
        bool is_reneg;                  /* is receiver reneging on SACKs? */
 
        /* Reduce ssthresh if it has not yet been made inside this window. */
        if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
            !after(tp->high_seq, tp->snd_una) ||
            (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
-               new_recovery = true;
                tp->prior_ssthresh = tcp_current_ssthresh(sk);
                tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
                tcp_ca_event(sk, CA_EVENT_LOSS);
index 62d908e64eeb53740d53ddfd57e26867c4e7e4d3..b10a88986a9896a4a33f8a4139e41d3f1013a41a 100644 (file)
@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
        return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
 }
 
-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct sockaddr_in6     *usin = (struct sockaddr_in6 *) uaddr;
        struct inet_sock        *inet = inet_sk(sk);
@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (usin->sin6_family == AF_INET) {
                if (__ipv6_only_sock(sk))
                        return -EAFNOSUPPORT;
-               err = ip4_datagram_connect(sk, uaddr, addr_len);
+               err = __ip4_datagram_connect(sk, uaddr, addr_len);
                goto ipv4_connected;
        }
 
@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                sin.sin_addr.s_addr = daddr->s6_addr32[3];
                sin.sin_port = usin->sin6_port;
 
-               err = ip4_datagram_connect(sk,
-                                          (struct sockaddr *) &sin,
-                                          sizeof(sin));
+               err = __ip4_datagram_connect(sk,
+                                            (struct sockaddr *) &sin,
+                                            sizeof(sin));
 
 ipv4_connected:
                if (err)
@@ -204,6 +204,16 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        fl6_sock_release(flowlabel);
        return err;
 }
+
+int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+       int res;
+
+       lock_sock(sk);
+       res = __ip6_datagram_connect(sk, uaddr, addr_len);
+       release_sock(sk);
+       return res;
+}
 EXPORT_SYMBOL_GPL(ip6_datagram_connect);
 
 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
index e893cd18612fcdc9e8577f0e060ffd32b5659eea..08b62047c67f311ca808533cb7a83b5caab0cfc8 100644 (file)
@@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
 static const struct net_offload sit_offload = {
        .callbacks = {
                .gso_segment    = ipv6_gso_segment,
-               .gro_receive    = ipv6_gro_receive,
-               .gro_complete   = ipv6_gro_complete,
        },
 };
 
index 29236e832e44470a4f13637847c2d1cbafb9bfb2..c09c0131bfa227e99346b180f501cc6fcf64644a 100644 (file)
@@ -723,6 +723,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
 
        debugfs_remove_recursive(sdata->vif.debugfs_dir);
        sdata->vif.debugfs_dir = NULL;
+       sdata->debugfs.subdir_stations = NULL;
 }
 
 void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
index ed1edac143729cc117c1cb63c5519285acff13a7..553ac6dd4867480048aed3ca0d430948f928d3c7 100644 (file)
@@ -1863,10 +1863,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
        ieee80211_teardown_sdata(sdata);
 }
 
-/*
- * Remove all interfaces, may only be called at hardware unregistration
- * time because it doesn't do RCU-safe list removals.
- */
 void ieee80211_remove_interfaces(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata, *tmp;
@@ -1875,14 +1871,21 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
 
        ASSERT_RTNL();
 
-       /*
-        * Close all AP_VLAN interfaces first, as otherwise they
-        * might be closed while the AP interface they belong to
-        * is closed, causing unregister_netdevice_many() to crash.
+       /* Before destroying the interfaces, make sure they're all stopped so
+        * that the hardware is stopped. Otherwise, the driver might still be
+        * iterating the interfaces during the shutdown, e.g. from a worker
+        * or from RX processing or similar, and if it does so (using atomic
+        * iteration) while we're manipulating the list, the iteration will
+        * crash.
+        *
+        * After this, the hardware should be stopped and the driver should
+        * have stopped all of its activities, so that we can do RCU-unaware
+        * manipulations of the interface list below.
         */
-       list_for_each_entry(sdata, &local->interfaces, list)
-               if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-                       dev_close(sdata->dev);
+       cfg80211_shutdown_all_interfaces(local->hw.wiphy);
+
+       WARN(local->open_count, "%s: open count remains %d\n",
+            wiphy_name(local->hw.wiphy), local->open_count);
 
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
index 5438d13e2f007d1bca01ac97bb0f54867bef15ce..3b59099413fb1770e2ee2228065899bc5f9eb302 100644 (file)
@@ -306,7 +306,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
                        /* AID */
                        pos = skb_put(skb, 2);
-                       put_unaligned_le16(plid, pos + 2);
+                       put_unaligned_le16(plid, pos);
                }
                if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
                    ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -1122,6 +1122,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
                                                WLAN_SP_MESH_PEERING_CONFIRM) {
                baseaddr += 4;
                baselen += 4;
+
+               if (baselen > len)
+                       return;
        }
        ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems);
        mesh_process_plink_frame(sdata, mgmt, &elems);
index 06b60980c62c62cf8c8f95e10052060be5d36ed8..b676b9fa707b3c8872a1065d8582b8ff1463e607 100644 (file)
@@ -76,6 +76,22 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                        if (sdata->vif.type != NL80211_IFTYPE_STATION)
                                continue;
                        ieee80211_mgd_quiesce(sdata);
+                       /* If suspended during TX in progress, and wowlan
+                        * is enabled (connection will be active) there
+                        * can be a race where the driver is put out
+                        * of power-save due to TX and during suspend
+                        * dynamic_ps_timer is cancelled and TX packet
+                        * is flushed, leaving the driver in ACTIVE even
+                        * after resuming until dynamic_ps_timer puts
+                        * driver back in DOZE.
+                        */
+                       if (sdata->u.mgd.associated &&
+                           sdata->u.mgd.powersave &&
+                            !(local->hw.conf.flags & IEEE80211_CONF_PS)) {
+                               local->hw.conf.flags |= IEEE80211_CONF_PS;
+                               ieee80211_hw_config(local,
+                                                   IEEE80211_CONF_CHANGE_PS);
+                       }
                }
 
                err = drv_suspend(local, wowlan);
index ad31b2dab4f5ab0731d53e54f00b30e65b085ec4..8db6e2994bbc59bb7cf38c36848a7d92b8e4a0e5 100644 (file)
@@ -60,6 +60,7 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_channel *ch;
        struct cfg80211_chan_def chandef;
        int i, subband_start;
+       struct wiphy *wiphy = sdata->local->hw.wiphy;
 
        for (i = start; i <= end; i += spacing) {
                if (!ch_cnt)
@@ -70,9 +71,8 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
                        /* we will be active on the channel */
                        cfg80211_chandef_create(&chandef, ch,
                                                NL80211_CHAN_NO_HT);
-                       if (cfg80211_reg_can_beacon(sdata->local->hw.wiphy,
-                                                   &chandef,
-                                                   sdata->wdev.iftype)) {
+                       if (cfg80211_reg_can_beacon_relax(wiphy, &chandef,
+                                                         sdata->wdev.iftype)) {
                                ch_cnt++;
                                /*
                                 * check if the next channel is also part of
index 8410bb3bf5e8ddf5552fb0af84d05152425148dd..b8233505bf9fd3bb4945c126eebee069f2b94878 100644 (file)
@@ -1117,7 +1117,9 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
                        queued = true;
                        info->control.vif = &tx->sdata->vif;
                        info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
-                       info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
+                       info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS |
+                                       IEEE80211_TX_CTL_NO_PS_BUFFER |
+                                       IEEE80211_TX_STATUS_EOSP;
                        __skb_queue_tail(&tid_tx->pending, skb);
                        if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
                                purge_skb = __skb_dequeue(&tid_tx->pending);
index 9a0ae7172f9271851f7a7c036e1c1980f45e5255..d8e2e3918ce2fd95637c4cba8bfc4886feb91ea6 100644 (file)
@@ -357,25 +357,52 @@ static void **alloc_pg_vec(struct netlink_sock *nlk,
        return NULL;
 }
 
+
+static void
+__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
+                  unsigned int order)
+{
+       struct netlink_sock *nlk = nlk_sk(sk);
+       struct sk_buff_head *queue;
+       struct netlink_ring *ring;
+
+       queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
+       ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
+
+       spin_lock_bh(&queue->lock);
+
+       ring->frame_max         = req->nm_frame_nr - 1;
+       ring->head              = 0;
+       ring->frame_size        = req->nm_frame_size;
+       ring->pg_vec_pages      = req->nm_block_size / PAGE_SIZE;
+
+       swap(ring->pg_vec_len, req->nm_block_nr);
+       swap(ring->pg_vec_order, order);
+       swap(ring->pg_vec, pg_vec);
+
+       __skb_queue_purge(queue);
+       spin_unlock_bh(&queue->lock);
+
+       WARN_ON(atomic_read(&nlk->mapped));
+
+       if (pg_vec)
+               free_pg_vec(pg_vec, order, req->nm_block_nr);
+}
+
 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
-                           bool closing, bool tx_ring)
+                           bool tx_ring)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
        struct netlink_ring *ring;
-       struct sk_buff_head *queue;
        void **pg_vec = NULL;
        unsigned int order = 0;
-       int err;
 
        ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
-       queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
 
-       if (!closing) {
-               if (atomic_read(&nlk->mapped))
-                       return -EBUSY;
-               if (atomic_read(&ring->pending))
-                       return -EBUSY;
-       }
+       if (atomic_read(&nlk->mapped))
+               return -EBUSY;
+       if (atomic_read(&ring->pending))
+               return -EBUSY;
 
        if (req->nm_block_nr) {
                if (ring->pg_vec != NULL)
@@ -407,31 +434,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
                        return -EINVAL;
        }
 
-       err = -EBUSY;
        mutex_lock(&nlk->pg_vec_lock);
-       if (closing || atomic_read(&nlk->mapped) == 0) {
-               err = 0;
-               spin_lock_bh(&queue->lock);
-
-               ring->frame_max         = req->nm_frame_nr - 1;
-               ring->head              = 0;
-               ring->frame_size        = req->nm_frame_size;
-               ring->pg_vec_pages      = req->nm_block_size / PAGE_SIZE;
-
-               swap(ring->pg_vec_len, req->nm_block_nr);
-               swap(ring->pg_vec_order, order);
-               swap(ring->pg_vec, pg_vec);
-
-               __skb_queue_purge(queue);
-               spin_unlock_bh(&queue->lock);
-
-               WARN_ON(atomic_read(&nlk->mapped));
+       if (atomic_read(&nlk->mapped) == 0) {
+               __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
+               mutex_unlock(&nlk->pg_vec_lock);
+               return 0;
        }
+
        mutex_unlock(&nlk->pg_vec_lock);
 
        if (pg_vec)
                free_pg_vec(pg_vec, order, req->nm_block_nr);
-       return err;
+
+       return -EBUSY;
 }
 
 static void netlink_mm_open(struct vm_area_struct *vma)
@@ -900,10 +915,10 @@ static void netlink_sock_destruct(struct sock *sk)
 
                memset(&req, 0, sizeof(req));
                if (nlk->rx_ring.pg_vec)
-                       netlink_set_ring(sk, &req, true, false);
+                       __netlink_set_ring(sk, &req, false, NULL, 0);
                memset(&req, 0, sizeof(req));
                if (nlk->tx_ring.pg_vec)
-                       netlink_set_ring(sk, &req, true, true);
+                       __netlink_set_ring(sk, &req, true, NULL, 0);
        }
 #endif /* CONFIG_NETLINK_MMAP */
 
@@ -2223,7 +2238,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                        return -EINVAL;
                if (copy_from_user(&req, optval, sizeof(req)))
                        return -EFAULT;
-               err = netlink_set_ring(sk, &req, false,
+               err = netlink_set_ring(sk, &req,
                                       optname == NETLINK_TX_RING);
                break;
        }
index 4613df8c82900e32a4e0188688c9e0354022484d..65523948fb95e7cf7843efd447c7fc62b0c51b71 100644 (file)
@@ -752,7 +752,7 @@ int ovs_flow_init(void)
        BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
 
        flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
-                                      + (num_possible_nodes()
+                                      + (nr_node_ids
                                          * sizeof(struct flow_stats *)),
                                       0, 0, NULL);
        if (flow_cache == NULL)
index 1d56903fd4c79aa008c4c540aabd8b4c099e81a1..1df78289e248179a502bd175810716273f6bfb91 100644 (file)
@@ -339,6 +339,9 @@ static void tcf_bpf_cleanup(struct tc_action *act, int bind)
                bpf_prog_put(prog->filter);
        else
                bpf_prog_destroy(prog->filter);
+
+       kfree(prog->bpf_ops);
+       kfree(prog->bpf_name);
 }
 
 static struct tc_action_ops act_bpf_ops __read_mostly = {
index c79ecfd36e0f028388ea5f96a64dbb23451b01b1..e5168f8b9640964ce2dd95a896a24f6c986a959a 100644 (file)
@@ -378,7 +378,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
                goto errout;
 
        if (oldprog) {
-               list_replace_rcu(&prog->link, &oldprog->link);
+               list_replace_rcu(&oldprog->link, &prog->link);
                tcf_unbind_filter(tp, &oldprog->res);
                call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
        } else {
index 76bc3a20ffdb31bb4c9b51942de74c64928c2a3a..bb2a0f529c1f519f79f22ffa046013d7ac863eb5 100644 (file)
@@ -425,6 +425,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
        if (!fnew)
                goto err2;
 
+       tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
+
        fold = (struct flow_filter *)*arg;
        if (fold) {
                err = -EINVAL;
@@ -486,7 +488,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
                fnew->mask  = ~0U;
                fnew->tp = tp;
                get_random_bytes(&fnew->hashrnd, 4);
-               tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
        }
 
        fnew->perturb_timer.function = flow_perturbation;
@@ -526,7 +527,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
        if (*arg == 0)
                list_add_tail_rcu(&fnew->list, &head->filters);
        else
-               list_replace_rcu(&fnew->list, &fold->list);
+               list_replace_rcu(&fold->list, &fnew->list);
 
        *arg = (unsigned long)fnew;
 
index 9d37ccd95062a6840d1bb1e140b173dd1fe0b9d0..2f3d03f99487ed35c4af1e5c7ede590e4b0e3721 100644 (file)
@@ -499,7 +499,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        *arg = (unsigned long) fnew;
 
        if (fold) {
-               list_replace_rcu(&fnew->list, &fold->list);
+               list_replace_rcu(&fold->list, &fnew->list);
                tcf_unbind_filter(tp, &fold->res);
                call_rcu(&fold->rcu, fl_destroy_filter);
        } else {
index d75993f89facc0ce8d5df0d26aedcd016714a43e..21ca33c9f0368b21cdb00fbdbbca4851c2ad87a2 100644 (file)
@@ -155,14 +155,23 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
        skb = dequeue_head(flow);
        len = qdisc_pkt_len(skb);
        q->backlogs[idx] -= len;
-       kfree_skb(skb);
        sch->q.qlen--;
        qdisc_qstats_drop(sch);
        qdisc_qstats_backlog_dec(sch, skb);
+       kfree_skb(skb);
        flow->dropped++;
        return idx;
 }
 
+static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
+{
+       unsigned int prev_backlog;
+
+       prev_backlog = sch->qstats.backlog;
+       fq_codel_drop(sch);
+       return prev_backlog - sch->qstats.backlog;
+}
+
 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -604,7 +613,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
        .enqueue        =       fq_codel_enqueue,
        .dequeue        =       fq_codel_dequeue,
        .peek           =       qdisc_peek_dequeued,
-       .drop           =       fq_codel_drop,
+       .drop           =       fq_codel_qdisc_drop,
        .init           =       fq_codel_init,
        .reset          =       fq_codel_reset,
        .destroy        =       fq_codel_destroy,
index 7d14926633601b85c2d281d914fa978c8a038e10..52f75a5473e120f8d0a02a2ff1c0215ff02437b5 100644 (file)
@@ -306,10 +306,10 @@ static unsigned int sfq_drop(struct Qdisc *sch)
                len = qdisc_pkt_len(skb);
                slot->backlog -= len;
                sfq_dec(q, x);
-               kfree_skb(skb);
                sch->q.qlen--;
                qdisc_qstats_drop(sch);
                qdisc_qstats_backlog_dec(sch, skb);
+               kfree_skb(skb);
                return len;
        }
 
index 915b328b9ac5e71afe3a979640288ad2bfab4a22..59cabc9bce693f5f39f1d2db8f436a18ac2efcaa 100644 (file)
@@ -797,23 +797,18 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
        return false;
 }
 
-bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
-                            struct cfg80211_chan_def *chandef,
-                            enum nl80211_iftype iftype)
+static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy,
+                                    struct cfg80211_chan_def *chandef,
+                                    enum nl80211_iftype iftype,
+                                    bool check_no_ir)
 {
        bool res;
        u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
                               IEEE80211_CHAN_RADAR;
 
-       trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype);
+       trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
 
-       /*
-        * Under certain conditions suggested by some regulatory bodies a
-        * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
-        * only if such relaxations are not enabled and the conditions are not
-        * met.
-        */
-       if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan))
+       if (check_no_ir)
                prohibited_flags |= IEEE80211_CHAN_NO_IR;
 
        if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
@@ -827,8 +822,36 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
        trace_cfg80211_return_bool(res);
        return res;
 }
+
+bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
+                            struct cfg80211_chan_def *chandef,
+                            enum nl80211_iftype iftype)
+{
+       return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true);
+}
 EXPORT_SYMBOL(cfg80211_reg_can_beacon);
 
+bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
+                                  struct cfg80211_chan_def *chandef,
+                                  enum nl80211_iftype iftype)
+{
+       bool check_no_ir;
+
+       ASSERT_RTNL();
+
+       /*
+        * Under certain conditions suggested by some regulatory bodies a
+        * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
+        * only if such relaxations are not enabled and the conditions are not
+        * met.
+        */
+       check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype,
+                                                  chandef->chan);
+
+       return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
+}
+EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax);
+
 int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
                                 struct cfg80211_chan_def *chandef)
 {
index c264effd00a69f97654e9a53959414321ea01668..76b41578a838e3bed8596c5950ffbd6881d54a64 100644 (file)
@@ -2003,7 +2003,8 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
        switch (iftype) {
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
-               if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) {
+               if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
+                                                  iftype)) {
                        result = -EINVAL;
                        break;
                }
@@ -3403,8 +3404,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
        } else if (!nl80211_get_ap_channel(rdev, &params))
                return -EINVAL;
 
-       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef,
-                                    wdev->iftype))
+       if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
+                                          wdev->iftype))
                return -EINVAL;
 
        if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
@@ -6492,8 +6493,8 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
        if (err)
                return err;
 
-       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef,
-                                    wdev->iftype))
+       if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
+                                          wdev->iftype))
                return -EINVAL;
 
        err = cfg80211_chandef_dfs_required(wdev->wiphy,
@@ -10170,7 +10171,8 @@ static int nl80211_tdls_channel_switch(struct sk_buff *skb,
                return -EINVAL;
 
        /* we will be active on the TDLS link */
-       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype))
+       if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
+                                          wdev->iftype))
                return -EINVAL;
 
        /* don't allow switching to DFS channels */
index d359e0610198c5c3d2094120e4ba3cf714d8dc21..aa2d75482017e1a258eb0cdb7271b2d615d8782e 100644 (file)
@@ -544,15 +544,15 @@ static int call_crda(const char *alpha2)
        reg_regdb_query(alpha2);
 
        if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) {
-               pr_info("Exceeded CRDA call max attempts. Not calling CRDA\n");
+               pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n");
                return -EINVAL;
        }
 
        if (!is_world_regdom((char *) alpha2))
-               pr_info("Calling CRDA for country: %c%c\n",
+               pr_debug("Calling CRDA for country: %c%c\n",
                        alpha2[0], alpha2[1]);
        else
-               pr_info("Calling CRDA to update world regulatory domain\n");
+               pr_debug("Calling CRDA to update world regulatory domain\n");
 
        return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
 }
@@ -1589,7 +1589,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
        case NL80211_IFTYPE_ADHOC:
-               return cfg80211_reg_can_beacon(wiphy, &chandef, iftype);
+               return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
                return cfg80211_chandef_usable(wiphy, &chandef,
index af3617c9879e33f8be5c325ffdc3b5558e60b217..a808279a432a9ef1a23976ba169ac33ba377fe6d 100644 (file)
@@ -2358,20 +2358,23 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
 
 TRACE_EVENT(cfg80211_reg_can_beacon,
        TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
-                enum nl80211_iftype iftype),
-       TP_ARGS(wiphy, chandef, iftype),
+                enum nl80211_iftype iftype, bool check_no_ir),
+       TP_ARGS(wiphy, chandef, iftype, check_no_ir),
        TP_STRUCT__entry(
                WIPHY_ENTRY
                CHAN_DEF_ENTRY
                __field(enum nl80211_iftype, iftype)
+               __field(bool, check_no_ir)
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
                CHAN_DEF_ASSIGN(chandef);
                __entry->iftype = iftype;
+               __entry->check_no_ir = check_no_ir;
        ),
-       TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d",
-                 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype)
+       TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s",
+                 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype,
+                 BOOL_TO_STR(__entry->check_no_ir))
 );
 
 TRACE_EVENT(cfg80211_chandef_dfs_required,
index 8965d1bb881194685d1af344cbdfd2868c2aa9fd..125d6402f64f8555ec85b42f4c39e3af0f46ef3b 100644 (file)
  *
  *      For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo)
  *            Use __get_dynamic_array_len(foo) to get the length of the array
- *            saved.
+ *            saved. Note, __get_dynamic_array_len() returns the total allocated
+ *            length of the dynamic array; __print_array() expects the second
+ *            parameter to be the number of elements. To get that, the array length
+ *            needs to be divided by the element size.
  *
  *      For __string(foo, bar) use __get_str(foo)
  *
@@ -288,7 +291,7 @@ TRACE_EVENT(foo_bar,
  *    This prints out the array that is defined by __array in a nice format.
  */
                  __print_array(__get_dynamic_array(list),
-                               __get_dynamic_array_len(list),
+                               __get_dynamic_array_len(list) / sizeof(int),
                                sizeof(int)),
                  __get_str(str), __get_bitmask(cpus))
 );
index d126c03361aef87fb1239df1e42f547c5ce36aa9..75888dd38a7fc87acf5fcd7789570ad353dce482 100644 (file)
@@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
 {
        if (substream->pcm->nonatomic) {
-               down_read(&snd_pcm_link_rwsem);
+               down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
                mutex_lock(&substream->self_group.mutex);
        } else {
                read_lock(&snd_pcm_link_rwlock);
index 442500e06b7c7b66be05ec254a68e58d009089fe..5676b849379d43468267e8d12fc7367096a2ae4c 100644 (file)
@@ -56,8 +56,11 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
                enable ? "enable" : "disable");
 
        if (enable) {
-               if (!bus->i915_power_refcount++)
+               if (!bus->i915_power_refcount++) {
                        acomp->ops->get_power(acomp->dev);
+                       snd_hdac_set_codec_wakeup(bus, true);
+                       snd_hdac_set_codec_wakeup(bus, false);
+               }
        } else {
                WARN_ON(!bus->i915_power_refcount);
                if (!--bus->i915_power_refcount)
index 745535d1840a6713e802aaa8c1d475733b88720e..735bdcb04ce827f162cc26dfc2909949e27782c4 100644 (file)
@@ -979,14 +979,16 @@ static int azx_runtime_resume(struct device *dev)
        if (!azx_has_pm_runtime(chip))
                return 0;
 
-       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
-               && hda->need_i915_power) {
-               bus =  azx_bus(chip);
-               snd_hdac_display_power(bus, true);
-               haswell_set_bclk(hda);
-               /* toggle codec wakeup bit for STATESTS read */
-               snd_hdac_set_codec_wakeup(bus, true);
-               snd_hdac_set_codec_wakeup(bus, false);
+       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+               bus = azx_bus(chip);
+               if (hda->need_i915_power) {
+                       snd_hdac_display_power(bus, true);
+                       haswell_set_bclk(hda);
+               } else {
+                       /* toggle codec wakeup bit for STATESTS read */
+                       snd_hdac_set_codec_wakeup(bus, true);
+                       snd_hdac_set_codec_wakeup(bus, false);
+               }
        }
 
        /* Read STATESTS before controller reset */
@@ -2182,6 +2184,8 @@ static const struct pci_device_id azx_ids[] = {
        /* ATI HDMI */
        { PCI_DEVICE(0x1002, 0x1308),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0x157a),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x793b),
          .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
        { PCI_DEVICE(0x1002, 0x7919),
@@ -2236,8 +2240,14 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaab0),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaac0),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaac8),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaad8),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaae8),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        /* VIA VT8251/VT8237A */
        { PCI_DEVICE(0x1106, 0x3288),
          .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
index 95158914cc6ce0c8761c04fde20f81a4e21e1931..a97db5fc8a151aa43c0c99edc6ccf7efc0eaa52a 100644 (file)
@@ -3512,6 +3512,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0070, .name = "GPU 70 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0072, .name = "GPU 72 HDMI/DP",  .patch = patch_nvhdmi },
+{ .id = 0x10de007d, .name = "GPU 7d HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
 { .id = 0x11069f81, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3576,6 +3577,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067");
 MODULE_ALIAS("snd-hda-codec-id:10de0070");
 MODULE_ALIAS("snd-hda-codec-id:10de0071");
 MODULE_ALIAS("snd-hda-codec-id:10de0072");
+MODULE_ALIAS("snd-hda-codec-id:10de007d");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
 MODULE_ALIAS("snd-hda-codec-id:11069f81");
index d35cf506a7dbc386a5aa003251d274ab9a305797..742fc626f9e17d155a74f174368bf7b4b6509b58 100644 (file)
@@ -5061,7 +5061,7 @@ static const struct hda_fixup alc269_fixups[] = {
                        { 0x14, 0x90170110 },
                        { 0x17, 0x40000008 },
                        { 0x18, 0x411111f0 },
-                       { 0x19, 0x411111f0 },
+                       { 0x19, 0x01a1913c },
                        { 0x1a, 0x411111f0 },
                        { 0x1b, 0x411111f0 },
                        { 0x1d, 0x40f89b2d },
@@ -5430,8 +5430,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {0x15, 0x0221401f}, \
        {0x1a, 0x411111f0}, \
        {0x1b, 0x411111f0}, \
-       {0x1d, 0x40700001}, \
-       {0x1e, 0x411111f0}
+       {0x1d, 0x40700001}
 
 #define ALC298_STANDARD_PINS \
        {0x18, 0x411111f0}, \
@@ -5462,6 +5461,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x17, 0x40000000},
                {0x1d, 0x40700001},
                {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x14, 0x90170130},
+               {0x17, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x01014020},
+               {0x1d, 0x4054c029},
+               {0x1e, 0x411111f0},
+               {0x21, 0x0221103f}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60160},
                {0x14, 0x90170120},
@@ -5690,35 +5700,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x13, 0x411111f0},
                {0x16, 0x01014020},
                {0x18, 0x411111f0},
-               {0x19, 0x01a19030}),
+               {0x19, 0x01a19030},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x12, 0x90a60140},
                {0x13, 0x411111f0},
                {0x16, 0x01014020},
                {0x18, 0x02a19031},
-               {0x19, 0x01a1903e}),
+               {0x19, 0x01a1903e},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x12, 0x90a60140},
                {0x13, 0x411111f0},
                {0x16, 0x411111f0},
                {0x18, 0x411111f0},
-               {0x19, 0x411111f0}),
+               {0x19, 0x411111f0},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x12, 0x40000000},
                {0x13, 0x90a60140},
                {0x16, 0x21014020},
                {0x18, 0x411111f0},
-               {0x19, 0x21a19030}),
+               {0x19, 0x21a19030},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x12, 0x40000000},
                {0x13, 0x90a60140},
                {0x16, 0x411111f0},
                {0x18, 0x411111f0},
-               {0x19, 0x411111f0}),
+               {0x19, 0x411111f0},
+               {0x1e, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC292_STANDARD_PINS,
+               {0x12, 0x40000000},
+               {0x13, 0x90a60140},
+               {0x16, 0x21014020},
+               {0x18, 0x411111f0},
+               {0x19, 0x21a19030},
+               {0x1e, 0x411111ff}),
        SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC298_STANDARD_PINS,
                {0x12, 0x90a60130},
index 1b1a89e80d1394bb253aecc3cadc683dbcff3a36..784ceb85b2d9fe7cbe989d0c8443863f1d03df34 100644 (file)
@@ -956,6 +956,7 @@ static int snd_amd7930_create(struct snd_card *card,
        if (!amd->regs) {
                snd_printk(KERN_ERR
                           "amd7930-%d: Unable to map chip registers.\n", dev);
+               kfree(amd);
                return -EIO;
        }