]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'tip/perf/urgent-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorIngo Molnar <mingo@elte.hu>
Tue, 17 Jan 2012 08:51:46 +0000 (09:51 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 17 Jan 2012 08:51:46 +0000 (09:51 +0100)
153 files changed:
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/common/pl330.c
arch/arm/configs/imx_v4_v5_defconfig
arch/arm/mach-exynos/cpu.c
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/Makefile
arch/arm/mach-imx/clock-imx35.c
arch/arm/mach-imx/mach-cpuimx35.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mm/proc-v7.S
arch/arm/plat-mxc/cpufreq.c
arch/arm/plat-mxc/include/mach/uncompress.h
arch/arm/plat-mxc/pwm.c
arch/arm/plat-orion/gpio.c
arch/arm/plat-samsung/include/plat/cpu-freq-core.h
arch/ia64/include/asm/cputime.h
arch/powerpc/include/asm/cputime.h
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/e500.c
arch/s390/include/asm/cputime.h
arch/sparc/kernel/pci_sun4v.c
arch/x86/kvm/i8254.c
arch/x86/kvm/x86.c
arch/x86/net/bpf_jit_comp.c
block/blk-map.c
block/blk-tag.c
block/cfq-iosched.c
drivers/ata/Kconfig
drivers/dma/Kconfig
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/input/mouse/sentelic.c
drivers/input/mouse/sentelic.h
drivers/iommu/iommu.c
drivers/md/bitmap.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/raid5.c
drivers/media/video/gspca/gspca.c
drivers/media/video/omap3isp/ispccdc.c
drivers/media/video/omap3isp/ispstat.c
drivers/mfd/ab5500-debugfs.c
drivers/mfd/ab8500-core.c
drivers/mfd/adp5520.c
drivers/mfd/da903x.c
drivers/mfd/jz4740-adc.c
drivers/mfd/tps6586x.c
drivers/mfd/tps65910.c
drivers/mfd/twl-core.c
drivers/mfd/twl4030-irq.c
drivers/mfd/wm8994-core.c
drivers/mmc/host/mmci.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/usb/asix.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/b43/pio.c
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/of/platform.c
drivers/rtc/interface.c
drivers/usb/dwc3/core.c
drivers/usb/gadget/epautoconf.c
drivers/usb/host/isp1760-if.c
drivers/usb/musb/musb_host.c
drivers/watchdog/coh901327_wdt.c
drivers/watchdog/hpwdt.c
drivers/watchdog/iTCO_wdt.c
drivers/watchdog/sp805_wdt.c
firmware/README.AddingFirmware
fs/btrfs/async-thread.c
fs/btrfs/inode.c
fs/ceph/dir.c
fs/cifs/connect.c
fs/fs-writeback.c
fs/locks.c
fs/minix/inode.c
fs/proc/stat.c
fs/xfs/xfs_super.c
fs/xfs/xfs_sync.c
fs/xfs/xfs_sync.h
include/asm-generic/cputime.h
include/linux/kvm.h
include/linux/lglock.h
include/linux/security.h
include/net/dst.h
include/net/flow.h
include/net/ip_vs.h
include/net/sctp/structs.h
include/net/sock.h
include/trace/events/writeback.h
kernel/exit.c
kernel/futex.c
kernel/hung_task.c
kernel/ptrace.c
kernel/signal.c
kernel/time/clockevents.c
kernel/tracepoint.c
mm/filemap.c
mm/hugetlb.c
mm/mempolicy.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/l2cap_core.c
net/bluetooth/rfcomm/core.c
net/bridge/br_netfilter.c
net/core/flow.c
net/core/net-sysfs.c
net/core/sock.c
net/ipv4/ipconfig.c
net/ipv4/route.c
net/ipv6/ip6_output.c
net/llc/af_llc.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/xt_connbytes.c
net/nfc/nci/core.c
net/packet/af_packet.c
net/sched/sch_mqprio.c
net/sched/sch_netem.c
net/sched/sch_qfq.c
net/sctp/associola.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/socket.c
net/sctp/sysctl.c
net/xfrm/xfrm_policy.c
scripts/kconfig/Makefile
scripts/recordmcount.h
security/security.c
sound/atmel/ac97c.c
sound/soc/codecs/wm8776.c
virt/kvm/assigned-dev.c

index 7945b0bd35e2ad50d7561ffa88a428c21175b50b..e2a4b5287361d25c0800954cbc79eccea88291d3 100644 (file)
@@ -1100,6 +1100,15 @@ emulate them efficiently. The fields in each entry are defined as follows:
    eax, ebx, ecx, edx: the values returned by the cpuid instruction for
          this function/index combination
 
+The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned
+as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC
+support.  Instead it is reported via
+
+  ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER)
+
+if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the
+feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
+
 4.47 KVM_PPC_GET_PVINFO
 
 Capability: KVM_CAP_PPC_GET_PVINFO
@@ -1151,6 +1160,13 @@ following flags are specified:
 /* Depends on KVM_CAP_IOMMU */
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU    (1 << 0)
 
+The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure
+isolation of the device.  Usages not specifying this flag are deprecated.
+
+Only PCI header type 0 devices with PCI BAR resources are supported by
+device assignment.  The user requesting this ioctl must have read/write
+access to the PCI sysfs resource files associated with the device.
+
 4.49 KVM_DEASSIGN_PCI_DEVICE
 
 Capability: KVM_CAP_DEVICE_DEASSIGNMENT
index 6afba60c390488acdfe818b98c823f0921a19749..62f1cd357ddf76ef0cc1c63e4a38133a3935dbe0 100644 (file)
@@ -1698,11 +1698,9 @@ F:       arch/x86/include/asm/tce.h
 
 CAN NETWORK LAYER
 M:     Oliver Hartkopp <socketcan@hartkopp.net>
-M:     Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
-M:     Urs Thuermann <urs.thuermann@volkswagen.de>
 L:     linux-can@vger.kernel.org
-L:     netdev@vger.kernel.org
-W:     http://developer.berlios.de/projects/socketcan/
+W:     http://gitorious.org/linux-can
+T:     git git://gitorious.org/linux-can/linux-can-next.git
 S:     Maintained
 F:     net/can/
 F:     include/linux/can.h
@@ -1713,9 +1711,10 @@ F:       include/linux/can/gw.h
 
 CAN NETWORK DRIVERS
 M:     Wolfgang Grandegger <wg@grandegger.com>
+M:     Marc Kleine-Budde <mkl@pengutronix.de>
 L:     linux-can@vger.kernel.org
-L:     netdev@vger.kernel.org
-W:     http://developer.berlios.de/projects/socketcan/
+W:     http://gitorious.org/linux-can
+T:     git git://gitorious.org/linux-can/linux-can-next.git
 S:     Maintained
 F:     drivers/net/can/
 F:     include/linux/can/dev.h
@@ -2700,7 +2699,7 @@ FIREWIRE SUBSYSTEM
 M:     Stefan Richter <stefanr@s5r6.in-berlin.de>
 L:     linux1394-devel@lists.sourceforge.net
 W:     http://ieee1394.wiki.kernel.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git
 S:     Maintained
 F:     drivers/firewire/
 F:     include/linux/firewire*.h
index a43733df39788a122f645804e4a343882800f352..adddd11c3b3b8a2827dc4f591b573b1fde5768a9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
index 776d76b8cb695ff052f310d9f50a737209e96f1b..b259c7c644e357a9999c3767fd34431d8c39ae8a 100644 (file)
@@ -1246,7 +1246,7 @@ config PL310_ERRATA_588369
 
 config ARM_ERRATA_720789
        bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
-       depends on CPU_V7 && SMP
+       depends on CPU_V7
        help
          This option enables the workaround for the 720789 Cortex-A9 (prior to
          r2p0) erratum. A faulty ASID can be sent to the other CPUs for the
@@ -1282,7 +1282,7 @@ config ARM_ERRATA_743622
 
 config ARM_ERRATA_751472
        bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
-       depends on CPU_V7 && SMP
+       depends on CPU_V7
        help
          This option enables the workaround for the 751472 Cortex-A9 (prior
          to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
index f407a6b35d3dd1e6ad1e72afec38837dcfdd796a..8d8df744f7a5eb72e02bc65bf85029c2b9f60a63 100644 (file)
  */
 #define MCODE_BUFF_PER_REQ     256
 
-/*
- * Mark a _pl330_req as free.
- * We do it by writing DMAEND as the first instruction
- * because no valid request is going to have DMAEND as
- * its first instruction to execute.
- */
-#define MARK_FREE(req) do { \
-                               _emit_END(0, (req)->mc_cpu); \
-                               (req)->mc_len = 0; \
-                       } while (0)
-
 /* If the _pl330_req is available to the client */
 #define IS_FREE(req)   (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
 
@@ -301,8 +290,10 @@ struct pl330_thread {
        struct pl330_dmac *dmac;
        /* Only two at a time */
        struct _pl330_req req[2];
-       /* Index of the last submitted request */
+       /* Index of the last enqueued request */
        unsigned lstenq;
+       /* Index of the last submitted request or -1 if the DMA is stopped */
+       int req_running;
 };
 
 enum pl330_dmac_state {
@@ -778,6 +769,22 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd,
        writel(0, regs + DBGCMD);
 }
 
+/*
+ * Mark a _pl330_req as free.
+ * We do it by writing DMAEND as the first instruction
+ * because no valid request is going to have DMAEND as
+ * its first instruction to execute.
+ */
+static void mark_free(struct pl330_thread *thrd, int idx)
+{
+       struct _pl330_req *req = &thrd->req[idx];
+
+       _emit_END(0, req->mc_cpu);
+       req->mc_len = 0;
+
+       thrd->req_running = -1;
+}
+
 static inline u32 _state(struct pl330_thread *thrd)
 {
        void __iomem *regs = thrd->dmac->pinfo->base;
@@ -836,31 +843,6 @@ static inline u32 _state(struct pl330_thread *thrd)
        }
 }
 
-/* If the request 'req' of thread 'thrd' is currently active */
-static inline bool _req_active(struct pl330_thread *thrd,
-               struct _pl330_req *req)
-{
-       void __iomem *regs = thrd->dmac->pinfo->base;
-       u32 buf = req->mc_bus, pc = readl(regs + CPC(thrd->id));
-
-       if (IS_FREE(req))
-               return false;
-
-       return (pc >= buf && pc <= buf + req->mc_len) ? true : false;
-}
-
-/* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */
-static inline unsigned _thrd_active(struct pl330_thread *thrd)
-{
-       if (_req_active(thrd, &thrd->req[0]))
-               return 1; /* First req active */
-
-       if (_req_active(thrd, &thrd->req[1]))
-               return 2; /* Second req active */
-
-       return 0;
-}
-
 static void _stop(struct pl330_thread *thrd)
 {
        void __iomem *regs = thrd->dmac->pinfo->base;
@@ -892,17 +874,22 @@ static bool _trigger(struct pl330_thread *thrd)
        struct _arg_GO go;
        unsigned ns;
        u8 insn[6] = {0, 0, 0, 0, 0, 0};
+       int idx;
 
        /* Return if already ACTIVE */
        if (_state(thrd) != PL330_STATE_STOPPED)
                return true;
 
-       if (!IS_FREE(&thrd->req[1 - thrd->lstenq]))
-               req = &thrd->req[1 - thrd->lstenq];
-       else if (!IS_FREE(&thrd->req[thrd->lstenq]))
-               req = &thrd->req[thrd->lstenq];
-       else
-               req = NULL;
+       idx = 1 - thrd->lstenq;
+       if (!IS_FREE(&thrd->req[idx]))
+               req = &thrd->req[idx];
+       else {
+               idx = thrd->lstenq;
+               if (!IS_FREE(&thrd->req[idx]))
+                       req = &thrd->req[idx];
+               else
+                       req = NULL;
+       }
 
        /* Return if no request */
        if (!req || !req->r)
@@ -933,6 +920,8 @@ static bool _trigger(struct pl330_thread *thrd)
        /* Only manager can execute GO */
        _execute_DBGINSN(thrd, insn, true);
 
+       thrd->req_running = idx;
+
        return true;
 }
 
@@ -1382,8 +1371,8 @@ static void pl330_dotask(unsigned long data)
 
                        thrd->req[0].r = NULL;
                        thrd->req[1].r = NULL;
-                       MARK_FREE(&thrd->req[0]);
-                       MARK_FREE(&thrd->req[1]);
+                       mark_free(thrd, 0);
+                       mark_free(thrd, 1);
 
                        /* Clear the reset flag */
                        pl330->dmac_tbd.reset_chan &= ~(1 << i);
@@ -1461,14 +1450,12 @@ int pl330_update(const struct pl330_info *pi)
 
                        thrd = &pl330->channels[id];
 
-                       active = _thrd_active(thrd);
-                       if (!active) /* Aborted */
+                       active = thrd->req_running;
+                       if (active == -1) /* Aborted */
                                continue;
 
-                       active -= 1;
-
                        rqdone = &thrd->req[active];
-                       MARK_FREE(rqdone);
+                       mark_free(thrd, active);
 
                        /* Get going again ASAP */
                        _start(thrd);
@@ -1509,7 +1496,7 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
        struct pl330_thread *thrd = ch_id;
        struct pl330_dmac *pl330;
        unsigned long flags;
-       int ret = 0, active;
+       int ret = 0, active = thrd->req_running;
 
        if (!thrd || thrd->free || thrd->dmac->state == DYING)
                return -EINVAL;
@@ -1525,28 +1512,24 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
 
                thrd->req[0].r = NULL;
                thrd->req[1].r = NULL;
-               MARK_FREE(&thrd->req[0]);
-               MARK_FREE(&thrd->req[1]);
+               mark_free(thrd, 0);
+               mark_free(thrd, 1);
                break;
 
        case PL330_OP_ABORT:
-               active = _thrd_active(thrd);
-
                /* Make sure the channel is stopped */
                _stop(thrd);
 
                /* ABORT is only for the active req */
-               if (!active)
+               if (active == -1)
                        break;
 
-               active--;
-
                thrd->req[active].r = NULL;
-               MARK_FREE(&thrd->req[active]);
+               mark_free(thrd, active);
 
                /* Start the next */
        case PL330_OP_START:
-               if (!_thrd_active(thrd) && !_start(thrd))
+               if ((active == -1) && !_start(thrd))
                        ret = -EIO;
                break;
 
@@ -1587,14 +1570,13 @@ int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus)
        else
                pstatus->faulting = false;
 
-       active = _thrd_active(thrd);
+       active = thrd->req_running;
 
-       if (!active) {
+       if (active == -1) {
                /* Indicate that the thread is not running */
                pstatus->top_req = NULL;
                pstatus->wait_req = NULL;
        } else {
-               active--;
                pstatus->top_req = thrd->req[active].r;
                pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
                                        ? thrd->req[1 - active].r : NULL;
@@ -1659,9 +1641,9 @@ void *pl330_request_channel(const struct pl330_info *pi)
                                thrd->free = false;
                                thrd->lstenq = 1;
                                thrd->req[0].r = NULL;
-                               MARK_FREE(&thrd->req[0]);
+                               mark_free(thrd, 0);
                                thrd->req[1].r = NULL;
-                               MARK_FREE(&thrd->req[1]);
+                               mark_free(thrd, 1);
                                break;
                        }
                }
@@ -1767,14 +1749,14 @@ static inline void _reset_thread(struct pl330_thread *thrd)
        thrd->req[0].mc_bus = pl330->mcode_bus
                                + (thrd->id * pi->mcbufsz);
        thrd->req[0].r = NULL;
-       MARK_FREE(&thrd->req[0]);
+       mark_free(thrd, 0);
 
        thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
                                + pi->mcbufsz / 2;
        thrd->req[1].mc_bus = thrd->req[0].mc_bus
                                + pi->mcbufsz / 2;
        thrd->req[1].r = NULL;
-       MARK_FREE(&thrd->req[1]);
+       mark_free(thrd, 1);
 }
 
 static int dmac_alloc_threads(struct pl330_dmac *pl330)
index 11a4192197c8fbaef84a10a53ce24fb497ae3947..cf497ce41dfe725bf5f4549faab3bc4c82a5728c 100644 (file)
@@ -18,9 +18,10 @@ CONFIG_ARCH_MXC=y
 CONFIG_ARCH_IMX_V4_V5=y
 CONFIG_ARCH_MX1ADS=y
 CONFIG_MACH_SCB9328=y
+CONFIG_MACH_APF9328=y
 CONFIG_MACH_MX21ADS=y
 CONFIG_MACH_MX25_3DS=y
-CONFIG_MACH_EUKREA_CPUIMX25=y
+CONFIG_MACH_EUKREA_CPUIMX25SD=y
 CONFIG_MACH_MX27ADS=y
 CONFIG_MACH_PCM038=y
 CONFIG_MACH_CPUIMX27=y
@@ -72,17 +73,16 @@ CONFIG_MTD_CFI_GEOMETRY=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_PHYSMAP=y
 CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_MXC=y
 CONFIG_MTD_UBI=y
 CONFIG_MISC_DEVICES=y
 CONFIG_EEPROM_AT24=y
 CONFIG_EEPROM_AT25=y
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
 CONFIG_DM9000=y
+CONFIG_SMC91X=y
 CONFIG_SMC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_SMSC_PHY=y
 # CONFIG_INPUT_MOUSEDEV is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
@@ -100,6 +100,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_IMX=y
 CONFIG_SPI=y
 CONFIG_SPI_IMX=y
+CONFIG_SPI_SPIDEV=y
 CONFIG_W1=y
 CONFIG_W1_MASTER_MXC=y
 CONFIG_W1_SLAVE_THERM=y
@@ -139,6 +140,7 @@ CONFIG_MMC=y
 CONFIG_MMC_MXC=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_MC13783=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
index 90ec247f3b375f498e20f46c20e25fe2c943b54a..cc8d4bd6d0f71666f4fe369dab28908bbd7af741 100644 (file)
@@ -110,11 +110,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
                .pfn            = __phys_to_pfn(EXYNOS4_PA_DMC0),
                .length         = SZ_4K,
                .type           = MT_DEVICE,
-       }, {
-               .virtual        = (unsigned long)S5P_VA_SROMC,
-               .pfn            = __phys_to_pfn(EXYNOS4_PA_SROMC),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
        }, {
                .virtual        = (unsigned long)S3C_VA_USB_HSPHY,
                .pfn            = __phys_to_pfn(EXYNOS4_PA_HSPHY),
index c44aa974e79c473d123631236269bdb8c2daa8a3..0e6f1af260b651b1894a9203f449ae8ecb5cac26 100644 (file)
@@ -132,7 +132,7 @@ config MACH_MX25_3DS
        select IMX_HAVE_PLATFORM_MXC_NAND
        select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
 
-config MACH_EUKREA_CPUIMX25
+config MACH_EUKREA_CPUIMX25SD
        bool "Support Eukrea CPUIMX25 Platform"
        select SOC_IMX25
        select IMX_HAVE_PLATFORM_FLEXCAN
@@ -148,7 +148,7 @@ config MACH_EUKREA_CPUIMX25
 
 choice
        prompt "Baseboard"
-       depends on MACH_EUKREA_CPUIMX25
+       depends on MACH_EUKREA_CPUIMX25SD
        default MACH_EUKREA_MBIMXSD25_BASEBOARD
 
 config MACH_EUKREA_MBIMXSD25_BASEBOARD
@@ -542,7 +542,7 @@ config MACH_MX35_3DS
          Include support for MX35PDK platform. This includes specific
          configurations for the board and its peripherals.
 
-config MACH_EUKREA_CPUIMX35
+config MACH_EUKREA_CPUIMX35SD
        bool "Support Eukrea CPUIMX35 Platform"
        select SOC_IMX35
        select IMX_HAVE_PLATFORM_FLEXCAN
@@ -560,7 +560,7 @@ config MACH_EUKREA_CPUIMX35
 
 choice
        prompt "Baseboard"
-       depends on MACH_EUKREA_CPUIMX35
+       depends on MACH_EUKREA_CPUIMX35SD
        default MACH_EUKREA_MBIMXSD35_BASEBOARD
 
 config MACH_EUKREA_MBIMXSD35_BASEBOARD
index aba73214c2a8cd640e0f4371caf4ce99ac53dc0f..d97f409ce98be4a9d796dc21d3dc30fb9c7de9c7 100644 (file)
@@ -24,7 +24,7 @@ obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o
 
 # i.MX25 based machines
 obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX25) += mach-eukrea_cpuimx25.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX25SD) += mach-eukrea_cpuimx25.o
 obj-$(CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD) += eukrea_mbimxsd25-baseboard.o
 
 # i.MX27 based machines
@@ -57,7 +57,7 @@ obj-$(CONFIG_MACH_BUG) += mach-bug.o
 # i.MX35 based machines
 obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o
 obj-$(CONFIG_MACH_MX35_3DS) += mach-mx35_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX35) += mach-cpuimx35.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX35SD) += mach-cpuimx35.o
 obj-$(CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD) += eukrea_mbimxsd35-baseboard.o
 obj-$(CONFIG_MACH_VPR200) += mach-vpr200.o
 
index 8116f119517d8065ca0653a603863275e29be71b..ac8238caecb98a98bf6c326e267fae24d88bb861 100644 (file)
@@ -507,7 +507,7 @@ static struct clk_lookup lookups[] = {
 
 int __init mx35_clocks_init()
 {
-       unsigned int cgr2 = 3 << 26, cgr3 = 0;
+       unsigned int cgr2 = 3 << 26;
 
 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
        cgr2 |= 3 << 16;
@@ -521,6 +521,12 @@ int __init mx35_clocks_init()
        __raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
        __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
                        CCM_BASE + CCM_CGR1);
+       __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+       __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+       clk_enable(&iim_clk);
+       imx_print_silicon_rev("i.MX35", mx35_revision());
+       clk_disable(&iim_clk);
 
        /*
         * Check if we came up in internal boot mode. If yes, we need some
@@ -529,17 +535,11 @@ int __init mx35_clocks_init()
         */
        if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
                /* Additionally turn on UART1, SCC, and IIM clocks */
-               cgr2 |= 3 << 16 | 3 << 4;
-               cgr3 |= 3 << 2;
+               clk_enable(&iim_clk);
+               clk_enable(&uart1_clk);
+               clk_enable(&scc_clk);
        }
 
-       __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
-       __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
-
-       clk_enable(&iim_clk);
-       imx_print_silicon_rev("i.MX35", mx35_revision());
-       clk_disable(&iim_clk);
-
 #ifdef CONFIG_MXC_USE_EPIT
        epit_timer_init(&epit1_clk,
                        MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
index 66af2e8f7e576dffd5372862653d397d22811069..362aae780601efc41e346a8a7beade651f610d83 100644 (file)
@@ -53,12 +53,18 @@ static const struct imxi2c_platform_data
        .bitrate =              100000,
 };
 
+#define TSC2007_IRQGPIO                IMX_GPIO_NR(3, 2)
+static int tsc2007_get_pendown_state(void)
+{
+       return !gpio_get_value(TSC2007_IRQGPIO);
+}
+
 static struct tsc2007_platform_data tsc2007_info = {
        .model                  = 2007,
        .x_plate_ohms           = 180,
+       .get_pendown_state = tsc2007_get_pendown_state,
 };
 
-#define TSC2007_IRQGPIO                IMX_GPIO_NR(3, 2)
 static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
        {
                I2C_BOARD_INFO("pcf8563", 0x51),
index 7f8915ad50990b1af2e877ed169c32cd0d15cd02..eef43e2e163e92224e23ea3c5b6250c14d2747fb 100644 (file)
@@ -3247,18 +3247,14 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
 
 /* 3430ES1-only hwmods */
 static __initdata struct omap_hwmod *omap3430es1_hwmods[] = {
-       &omap3xxx_iva_hwmod,
        &omap3430es1_dss_core_hwmod,
-       &omap3xxx_mailbox_hwmod,
        NULL
 };
 
 /* 3430ES2+-only hwmods */
 static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = {
-       &omap3xxx_iva_hwmod,
        &omap3xxx_dss_core_hwmod,
        &omap3xxx_usbhsotg_hwmod,
-       &omap3xxx_mailbox_hwmod,
        NULL
 };
 
index 2c559ac381425d325757c68d83e73c37c77e6b5d..e70a73731eaacb823b15f625ddb614c99b44c5f2 100644 (file)
@@ -363,11 +363,13 @@ __v7_setup:
        orreq   r10, r10, #1 << 6               @ set bit #6
        mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
 #endif
-#ifdef CONFIG_ARM_ERRATA_751472
-       cmp     r6, #0x30                       @ present prior to r3p0
+#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
+       ALT_SMP(cmp r6, #0x30)                  @ present prior to r3p0
+       ALT_UP_B(1f)
        mrclt   p15, 0, r10, c15, c0, 1         @ read diagnostic register
        orrlt   r10, r10, #1 << 11              @ set bit #11
        mcrlt   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+1:
 #endif
 
 3:     mov     r10, #0
index adbff706ef6f88f0757a6f13d5d2f748d56109a3..73db34bf588ae7985fc50c71e9e2a4aa3af7a5f8 100644 (file)
@@ -98,7 +98,7 @@ static int mxc_set_target(struct cpufreq_policy *policy,
        return ret;
 }
 
-static int __init mxc_cpufreq_init(struct cpufreq_policy *policy)
+static int mxc_cpufreq_init(struct cpufreq_policy *policy)
 {
        int ret;
        int i;
index 88fd40452567a30aeb6f2b55ba1e3f4684959a78..477971b009308a929c53c19ce40c4b35c425d623 100644 (file)
@@ -98,6 +98,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
        case MACH_TYPE_PCM043:
        case MACH_TYPE_LILLY1131:
        case MACH_TYPE_VPR200:
+       case MACH_TYPE_EUKREA_CPUIMX35SD:
                uart_base = MX3X_UART1_BASE_ADDR;
                break;
        case MACH_TYPE_MAGX_ZN5:
index 845de59f07edef9058a40c0afa5197d6313f8ce0..e032717f7d02c211ee8cad0d0200cec5f83974cf 100644 (file)
@@ -77,6 +77,15 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
                do_div(c, period_ns);
                duty_cycles = c;
 
+               /*
+                * according to imx pwm RM, the real period value should be
+                * PERIOD value in PWMPR plus 2.
+                */
+               if (period_cycles > 2)
+                       period_cycles -= 2;
+               else
+                       period_cycles = 0;
+
                writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
                writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
 
index 41ab97ebe4cfc8877fc58cd09f446c02636a030d..10d160888133c72101101f8e1dfafb53ddaf1af9 100644 (file)
@@ -384,12 +384,16 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
        struct orion_gpio_chip *ochip;
        struct irq_chip_generic *gc;
        struct irq_chip_type *ct;
+       char gc_label[16];
 
        if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips))
                return;
 
+       snprintf(gc_label, sizeof(gc_label), "orion_gpio%d",
+               orion_gpio_chip_count);
+
        ochip = orion_gpio_chips + orion_gpio_chip_count;
-       ochip->chip.label = "orion_gpio";
+       ochip->chip.label = kstrdup(gc_label, GFP_KERNEL);
        ochip->chip.request = orion_gpio_request;
        ochip->chip.direction_input = orion_gpio_direction_input;
        ochip->chip.get = orion_gpio_get;
index dac4760c0f0aeb58b4c68b5b01913f41dc1430b4..95509d8eb140fda1367658d048660790253b35d5 100644 (file)
@@ -202,14 +202,6 @@ extern int s3c_plltab_register(struct cpufreq_frequency_table *plls,
 extern struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void);
 extern struct s3c_iotimings *s3c_cpufreq_getiotimings(void);
 
-extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
-                                    struct s3c_cpufreq_config *cfg,
-                                    union s3c_iobank *iob);
-
-extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
-                                    struct s3c_cpufreq_config *cfg,
-                                    union s3c_iobank *iob);
-
 #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS
 #define s3c_cpufreq_debugfs_call(x) x
 #else
@@ -226,6 +218,10 @@ extern void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg);
 extern void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg);
 
 #ifdef CONFIG_S3C2410_IOTIMING
+extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
+                                    struct s3c_cpufreq_config *cfg,
+                                    union s3c_iobank *iob);
+
 extern int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg,
                                 struct s3c_iotimings *iot);
 
@@ -235,6 +231,7 @@ extern int s3c2410_iotiming_get(struct s3c_cpufreq_config *cfg,
 extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
                                 struct s3c_iotimings *iot);
 #else
+#define s3c2410_iotiming_debugfs NULL
 #define s3c2410_iotiming_calc NULL
 #define s3c2410_iotiming_get NULL
 #define s3c2410_iotiming_set NULL
@@ -242,8 +239,10 @@ extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
 
 /* S3C2412 compatible routines */
 
-extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
-                               struct s3c_iotimings *timings);
+#ifdef CONFIG_S3C2412_IOTIMING
+extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
+                                    struct s3c_cpufreq_config *cfg,
+                                    union s3c_iobank *iob);
 
 extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
                                struct s3c_iotimings *timings);
@@ -253,6 +252,12 @@ extern int s3c2412_iotiming_calc(struct s3c_cpufreq_config *cfg,
 
 extern void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg,
                                 struct s3c_iotimings *iot);
+#else
+#define s3c2412_iotiming_debugfs NULL
+#define s3c2412_iotiming_calc NULL
+#define s3c2412_iotiming_get NULL
+#define s3c2412_iotiming_set NULL
+#endif /* CONFIG_S3C2412_IOTIMING */
 
 #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUG
 #define s3c_freq_dbg(x...) printk(KERN_INFO x)
index 6073b187528a26a8ac16826aa98cd7986c5724ed..5a274af31b2b82b8f1e561fe66d7c2f0ce58d96b 100644 (file)
@@ -60,6 +60,7 @@ typedef u64 cputime64_t;
  */
 #define cputime_to_usecs(__ct)         ((__ct) / NSEC_PER_USEC)
 #define usecs_to_cputime(__usecs)      ((__usecs) * NSEC_PER_USEC)
+#define usecs_to_cputime64(__usecs)    usecs_to_cputime(__usecs)
 
 /*
  * Convert cputime <-> seconds
index 1cf20bdfbecaada5bb2b88d439d776c32af2558c..98b7c4b49c9d0586cce4d7132593ee359c3fd803 100644 (file)
@@ -150,6 +150,8 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
        return ct;
 }
 
+#define usecs_to_cputime64(us)         usecs_to_cputime(us)
+
 /*
  * Convert cputime <-> seconds
  */
index d4df013ad77964353fdf5a59c8ed6afde4ed1ce2..69c7377d2071aa821fd1ff9529cafd4e49d10081 100644 (file)
@@ -381,39 +381,6 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
 }
 #endif
 
-static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
-                                            unsigned long pte_index)
-{
-       unsigned long rb, va_low;
-
-       rb = (v & ~0x7fUL) << 16;               /* AVA field */
-       va_low = pte_index >> 3;
-       if (v & HPTE_V_SECONDARY)
-               va_low = ~va_low;
-       /* xor vsid from AVA */
-       if (!(v & HPTE_V_1TB_SEG))
-               va_low ^= v >> 12;
-       else
-               va_low ^= v >> 24;
-       va_low &= 0x7ff;
-       if (v & HPTE_V_LARGE) {
-               rb |= 1;                        /* L field */
-               if (cpu_has_feature(CPU_FTR_ARCH_206) &&
-                   (r & 0xff000)) {
-                       /* non-16MB large page, must be 64k */
-                       /* (masks depend on page size) */
-                       rb |= 0x1000;           /* page encoding in LP field */
-                       rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
-                       rb |= (va_low & 0xfe);  /* AVAL field (P7 doesn't seem to care) */
-               }
-       } else {
-               /* 4kB page */
-               rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of VA */
-       }
-       rb |= (v >> 54) & 0x300;                /* B field */
-       return rb;
-}
-
 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
  * instruction for the OSI hypercalls */
 #define OSI_SC_MAGIC_R3                        0x113724FA
index e43fe42b9875308b49e13faa8a67ef91fe125200..d0ac94f98f9e3a2dcca21ff90631b841e4ab5060 100644 (file)
@@ -29,4 +29,37 @@ static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
 
 #define SPAPR_TCE_SHIFT                12
 
+static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+                                            unsigned long pte_index)
+{
+       unsigned long rb, va_low;
+
+       rb = (v & ~0x7fUL) << 16;               /* AVA field */
+       va_low = pte_index >> 3;
+       if (v & HPTE_V_SECONDARY)
+               va_low = ~va_low;
+       /* xor vsid from AVA */
+       if (!(v & HPTE_V_1TB_SEG))
+               va_low ^= v >> 12;
+       else
+               va_low ^= v >> 24;
+       va_low &= 0x7ff;
+       if (v & HPTE_V_LARGE) {
+               rb |= 1;                        /* L field */
+               if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+                   (r & 0xff000)) {
+                       /* non-16MB large page, must be 64k */
+                       /* (masks depend on page size) */
+                       rb |= 0x1000;           /* page encoding in LP field */
+                       rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
+                       rb |= (va_low & 0xfe);  /* AVAL field (P7 doesn't seem to care) */
+               }
+       } else {
+               /* 4kB page */
+               rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of VA */
+       }
+       rb |= (v >> 54) & 0x300;                /* B field */
+       return rb;
+}
+
 #endif /* __ASM_KVM_BOOK3S_64_H__ */
index 0cb137a9b0381f1175d5962c67aac18f566af662..336983da9e726c1e144b9a29be03934308800958 100644 (file)
@@ -538,7 +538,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
        tpaca->kvm_hstate.napping = 0;
        vcpu->cpu = vc->pcpu;
        smp_wmb();
-#ifdef CONFIG_PPC_ICP_NATIVE
+#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
        if (vcpu->arch.ptid) {
                tpaca->cpu_start = 0x80;
                wmb();
index 3c791e1eb675299c96e7b428b499e7bec4e5f434..e2cfb9e1e20ebdea21c726e946e1f37aef1c8c6f 100644 (file)
@@ -658,10 +658,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        ulong cmd = kvmppc_get_gpr(vcpu, 3);
                        int i;
 
+#ifdef CONFIG_KVM_BOOK3S_64_PR
                        if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
                                r = RESUME_GUEST;
                                break;
                        }
+#endif
 
                        run->papr_hcall.nr = cmd;
                        for (i = 0; i < 9; ++i) {
index 26d20903f2bc5c8cce1a94637f58ea9dea73a696..8c0d45a6faf7f49db9aef9c9a495c1ef20d155d8 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kvm_host.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/export.h>
 
 #include <asm/reg.h>
 #include <asm/cputable.h>
index 0814348782966ac99c571990c89975d06aedffe1..b9acaaa175d8af35a25ba42d8157f5f5bec299df 100644 (file)
@@ -87,6 +87,8 @@ usecs_to_cputime(const unsigned int m)
        return (cputime_t) m * 4096;
 }
 
+#define usecs_to_cputime64(m)          usecs_to_cputime(m)
+
 /*
  * Convert cputime to milliseconds and back.
  */
index b272cda35a0125904bc6f17eb1c5d14054dec24e..af5755d20fbe91eb43b682e99aa8f836bfafa6ad 100644 (file)
@@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
        if (!irq)
                return -ENOMEM;
 
-       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
-               return -EINVAL;
        if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
                return -EINVAL;
+       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+               return -EINVAL;
 
        return irq;
 }
index 76e3f1cd03696997964db7814de98a578a9eb4a7..405f2620392f5e32d393166520e707b2d039b3bb 100644 (file)
@@ -338,11 +338,15 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
                return HRTIMER_NORESTART;
 }
 
-static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
+static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
 {
+       struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
        struct kvm_timer *pt = &ps->pit_timer;
        s64 interval;
 
+       if (!irqchip_in_kernel(kvm))
+               return;
+
        interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
 
        pr_debug("create pit timer, interval is %llu nsec\n", interval);
@@ -394,13 +398,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
         /* FIXME: enhance mode 4 precision */
        case 4:
                if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
-                       create_pit_timer(ps, val, 0);
+                       create_pit_timer(kvm, val, 0);
                }
                break;
        case 2:
        case 3:
                if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
-                       create_pit_timer(ps, val, 1);
+                       create_pit_timer(kvm, val, 1);
                }
                break;
        default:
index c38efd7b792eec9aab414892c23818446ad3fad0..4c938da2ba00b40dc430d5a7854b0ee62b6ce631 100644 (file)
@@ -602,7 +602,6 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
        struct kvm_lapic *apic = vcpu->arch.apic;
-       u32 timer_mode_mask;
 
        best = kvm_find_cpuid_entry(vcpu, 1, 0);
        if (!best)
@@ -615,15 +614,12 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
                        best->ecx |= bit(X86_FEATURE_OSXSAVE);
        }
 
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
-               best->function == 0x1) {
-               best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER);
-               timer_mode_mask = 3 << 17;
-       } else
-               timer_mode_mask = 1 << 17;
-
-       if (apic)
-               apic->lapic_timer.timer_mode_mask = timer_mode_mask;
+       if (apic) {
+               if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
+                       apic->lapic_timer.timer_mode_mask = 3 << 17;
+               else
+                       apic->lapic_timer.timer_mode_mask = 1 << 17;
+       }
 }
 
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -2135,6 +2131,9 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_TSC_CONTROL:
                r = kvm_has_tsc_control;
                break;
+       case KVM_CAP_TSC_DEADLINE_TIMER:
+               r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
+               break;
        default:
                r = 0;
                break;
index bfab3fa10edc63e50b4184d00e75566678242a0d..7b65f752c5f8fd79af2c6b4afb342988bdd8d56c 100644 (file)
@@ -568,8 +568,8 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf] - addrs[i];
                                        break;
                                }
                                if (filter[i].jt != 0) {
-                                       if (filter[i].jf)
-                                               t_offset += is_near(f_offset) ? 2 : 6;
+                                       if (filter[i].jf && f_offset)
+                                               t_offset += is_near(f_offset) ? 2 : 5;
                                        EMIT_COND_JMP(t_op, t_offset);
                                        if (filter[i].jf)
                                                EMIT_JMP(f_offset);
index 164cd0059706214e53c01b780b353fca98d9b829..623e1cd4cffe997e71fbb54577bd42f220af64b3 100644 (file)
@@ -311,7 +311,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       if (rq_data_dir(rq) == WRITE)
+       if (!reading)
                bio->bi_rw |= REQ_WRITE;
 
        if (do_copy)
index e74d6d13838f3ae9f717911a38d21885623029c7..4af6f5cc1167a65494dc52b39cd3f1dfd1271087 100644 (file)
@@ -282,18 +282,9 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
 {
        struct blk_queue_tag *bqt = q->queue_tags;
-       int tag = rq->tag;
+       unsigned tag = rq->tag; /* negative tags invalid */
 
-       BUG_ON(tag == -1);
-
-       if (unlikely(tag >= bqt->max_depth)) {
-               /*
-                * This can happen after tag depth has been reduced.
-                * But tag shouldn't be larger than real_max_depth.
-                */
-               WARN_ON(tag >= bqt->real_max_depth);
-               return;
-       }
+       BUG_ON(tag >= bqt->real_max_depth);
 
        list_del_init(&rq->queuelist);
        rq->cmd_flags &= ~REQ_QUEUED;
index 4c12869fcf77e044f75dfd3f4728c3343d9ef1f3..3548705b04e482a4405097217011256105c4bdca 100644 (file)
@@ -1655,6 +1655,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
                    struct request *next)
 {
        struct cfq_queue *cfqq = RQ_CFQQ(rq);
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+
        /*
         * reposition in fifo if next is older than rq
         */
@@ -1669,6 +1671,16 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
        cfq_remove_request(next);
        cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
                                        rq_data_dir(next), rq_is_sync(next));
+
+       cfqq = RQ_CFQQ(next);
+       /*
+        * all requests of this queue are merged to other queues, delete it
+        * from the service tree. If it's the active_queue,
+        * cfq_dispatch_requests() will choose to expire it or do idle
+        */
+       if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
+           cfqq != cfqd->active_queue)
+               cfq_del_cfqq_rr(cfqd, cfqq);
 }
 
 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
index 6bdedd7cca2cd3cd630370732b71b8b530992ca9..cf047c406d92797777d6255122f23b5a8297c58a 100644 (file)
@@ -820,7 +820,7 @@ config PATA_PLATFORM
 
 config PATA_OF_PLATFORM
        tristate "OpenFirmware platform device PATA support"
-       depends on PATA_PLATFORM && OF
+       depends on PATA_PLATFORM && OF && OF_IRQ
        help
          This option enables support for generic directly connected ATA
          devices commonly found on embedded systems with OpenFirmware
index ab8f469f5cf8154a244a59759cbadac01279b1d9..5a99bb3f255ae7c34fedc540949ab0163a15d5a5 100644 (file)
@@ -124,7 +124,7 @@ config MV_XOR
 
 config MX3_IPU
        bool "MX3x Image Processing Unit support"
-       depends on ARCH_MX3
+       depends on SOC_IMX31 || SOC_IMX35
        select DMA_ENGINE
        default y
        help
@@ -216,7 +216,7 @@ config PCH_DMA
 
 config IMX_SDMA
        tristate "i.MX SDMA support"
-       depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+       depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
        select DMA_ENGINE
        help
          Support the i.MX SDMA engine. This engine is integrated into
index c681dc149d2a121fe55354143f50465d6b0b75d6..b9da8900ae4eaef10c12f2d68da79a226f9f3260 100644 (file)
@@ -756,9 +756,9 @@ intel_enable_semaphores(struct drm_device *dev)
        if (i915_semaphores >= 0)
                return i915_semaphores;
 
-       /* Enable semaphores on SNB when IO remapping is off */
+       /* Disable semaphores on SNB */
        if (INTEL_INFO(dev)->gen == 6)
-               return !intel_iommu_enabled;
+               return 0;
 
        return 1;
 }
index d809b038ca88a8e29276773c06d3da8663395e3d..daa5743ccbd63ad87a45697bf49b13fb5538d9e1 100644 (file)
@@ -7922,13 +7922,11 @@ static bool intel_enable_rc6(struct drm_device *dev)
                return 0;
 
        /*
-        * Enable rc6 on Sandybridge if DMA remapping is disabled
+        * Disable rc6 on Sandybridge
         */
        if (INTEL_INFO(dev)->gen == 6) {
-               DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
-                                intel_iommu_enabled ? "true" : "false",
-                                !intel_iommu_enabled ? "en" : "dis");
-               return !intel_iommu_enabled;
+               DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
+               return 0;
        }
        DRM_DEBUG_DRIVER("RC6 enabled\n");
        return 1;
index 5e00d1670aa9964b8d49d0e289e9da5211e501d2..92c9628c572daa2495a685b0a7898273957ffc73 100644 (file)
@@ -3276,6 +3276,18 @@ int evergreen_init(struct radeon_device *rdev)
                        rdev->accel_working = false;
                }
        }
+
+       /* Don't start up if the MC ucode is missing on BTC parts.
+        * The default clocks and voltages before the MC ucode
+        * is loaded are not suffient for advanced operations.
+        */
+       if (ASIC_IS_DCE5(rdev)) {
+               if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+                       DRM_ERROR("radeon: MC ucode required for NI+.\n");
+                       return -EINVAL;
+               }
+       }
+
        return 0;
 }
 
index d24baf30efcb8e583ab38b0540fe508369e47423..5082d17d14dcda9733ca6d79b8d70513a7bd9944 100644 (file)
@@ -2560,7 +2560,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
 
        rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
        rdev->pm.current_clock_mode_index = 0;
-       rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+       if (rdev->pm.default_power_state_index >= 0)
+               rdev->pm.current_vddc =
+                       rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+       else
+               rdev->pm.current_vddc = 0;
 }
 
 void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
index 8aa1dbb45c67efad383e6f938383efeb11589210..f94b33ae221546a9d9170f3e0eb6b8292776721b 100644 (file)
@@ -1093,7 +1093,6 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        struct vmw_surface *surface = NULL;
        struct vmw_dma_buffer *bo = NULL;
        struct ttm_base_object *user_obj;
-       u64 required_size;
        int ret;
 
        /**
@@ -1102,8 +1101,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
         * requested framebuffer.
         */
 
-       required_size = mode_cmd->pitch * mode_cmd->height;
-       if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+       if (!vmw_kms_validate_mode_vram(dev_priv,
+                                       mode_cmd->pitch,
+                                       mode_cmd->height)) {
                DRM_ERROR("VRAM size is too small for requested mode.\n");
                return ERR_PTR(-ENOMEM);
        }
index c5b12d2e955a5cdb8c968c3e31d86bcb99e416b3..86d6f39178b0d556364df951ca33575598bfa416 100644 (file)
@@ -2,7 +2,7 @@
  * Finger Sensing Pad PS/2 mouse driver.
  *
  * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2010 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
@@ -162,7 +162,7 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val)
        ps2_sendbyte(ps2dev, v, FSP_CMD_TIMEOUT2);
 
        if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
-               return -1;
+               goto out;
 
        if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
                /* inversion is required */
@@ -261,7 +261,7 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val)
        ps2_sendbyte(ps2dev, 0x88, FSP_CMD_TIMEOUT2);
 
        if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
-               return -1;
+               goto out;
 
        if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
                ps2_sendbyte(ps2dev, 0x47, FSP_CMD_TIMEOUT2);
@@ -309,7 +309,7 @@ static int fsp_get_buttons(struct psmouse *psmouse, int *btn)
        };
        int val;
 
-       if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS1, &val) == -1)
+       if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS, &val) == -1)
                return -EIO;
 
        *btn = buttons[(val & 0x30) >> 4];
index ed1395ac7b8b3e11960ae51c7c92c7f51b615fc9..2e4af24f8c1586b6ecfb3c7f8325cbeb34eb4ae5 100644 (file)
@@ -2,7 +2,7 @@
  * Finger Sensing Pad PS/2 mouse driver.
  *
  * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2009 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
@@ -33,6 +33,7 @@
 /* Finger-sensing Pad control registers */
 #define        FSP_REG_SYSCTL1         0x10
 #define        FSP_BIT_EN_REG_CLK      BIT(5)
+#define        FSP_REG_TMOD_STATUS     0x20
 #define        FSP_REG_OPC_QDOWN       0x31
 #define        FSP_BIT_EN_OPC_TAG      BIT(7)
 #define        FSP_REG_OPTZ_XLO        0x34
index 2fb2963df55376a3a8efbf09490457e08b28b836..5b5fa5cdaa3108da74b7358ae187dd4ee8a00181 100644 (file)
@@ -90,7 +90,7 @@ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
        if (bus == NULL || bus->iommu_ops == NULL)
                return NULL;
 
-       domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
        if (!domain)
                return NULL;
 
index b6907118283a627a656fbe408e368da9853ff634..6d03774b176ec8236d9cfb72654bedf600f052d2 100644 (file)
@@ -1393,9 +1393,6 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                         atomic_read(&bitmap->behind_writes),
                         bitmap->mddev->bitmap_info.max_write_behind);
        }
-       if (bitmap->mddev->degraded)
-               /* Never clear bits or update events_cleared when degraded */
-               success = 0;
 
        while (sectors) {
                sector_t blocks;
@@ -1409,7 +1406,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                        return;
                }
 
-               if (success &&
+               if (success && !bitmap->mddev->degraded &&
                    bitmap->events_cleared < bitmap->mddev->events) {
                        bitmap->events_cleared = bitmap->mddev->events;
                        bitmap->need_sync = 1;
index c3273efd08cb6dce7a19cd8d193e98ff23600bb7..627456542fb3d0d1f1d18780db24a5531b29ce32 100644 (file)
@@ -230,6 +230,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
                return -EINVAL;
 
        rdev->raid_disk = rdev->saved_raid_disk;
+       rdev->saved_raid_disk = -1;
 
        newconf = linear_conf(mddev,mddev->raid_disks+1);
 
index ee981737edfcf3ff674749116d44d89bd45be751..f47f1f8ac44bc16677b212f35b398dae093dab87 100644 (file)
@@ -7360,8 +7360,7 @@ static int remove_and_add_spares(struct mddev *mddev)
                                        spares++;
                                        md_new_event(mddev);
                                        set_bit(MD_CHANGE_DEVS, &mddev->flags);
-                               } else
-                                       break;
+                               }
                        }
                }
        }
index 31670f8d6b65789c071c1fd8d212c168138e14d8..858fdbb7eb07a24ceb18a4d6214218c9205089e7 100644 (file)
@@ -3065,11 +3065,17 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        }
                } else if (test_bit(In_sync, &rdev->flags))
                        set_bit(R5_Insync, &dev->flags);
-               else {
+               else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
                        /* in sync if before recovery_offset */
-                       if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
-                               set_bit(R5_Insync, &dev->flags);
-               }
+                       set_bit(R5_Insync, &dev->flags);
+               else if (test_bit(R5_UPTODATE, &dev->flags) &&
+                        test_bit(R5_Expanded, &dev->flags))
+                       /* If we've reshaped into here, we assume it is Insync.
+                        * We will shortly update recovery_offset to make
+                        * it official.
+                        */
+                       set_bit(R5_Insync, &dev->flags);
+
                if (rdev && test_bit(R5_WriteError, &dev->flags)) {
                        clear_bit(R5_Insync, &dev->flags);
                        if (!test_bit(Faulty, &rdev->flags)) {
index 881e04c7ffe6dd91f5a96f12aaebe236a7bfba5c..2ca10dfec91fd2347532d35e22ea44fd4ee01298 100644 (file)
@@ -838,13 +838,13 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
        gspca_dev->usb_err = 0;
 
        /* do the specific subdriver stuff before endpoint selection */
-       gspca_dev->alt = 0;
+       intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+       gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0;
        if (gspca_dev->sd_desc->isoc_init) {
                ret = gspca_dev->sd_desc->isoc_init(gspca_dev);
                if (ret < 0)
                        goto unlock;
        }
-       intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
        xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
                                   : USB_ENDPOINT_XFER_ISOC;
 
@@ -957,7 +957,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
                                ret = -EIO;
                                goto out;
                        }
-                       alt = ep_tb[--alt_idx].alt;
+                       gspca_dev->alt = ep_tb[--alt_idx].alt;
                }
        }
 out:
index b0b0fa5a3572fb834a87c45dba668b798524dfad..54a4a3f22e2e4187c5651aaa42cf73dcf9141059 100644 (file)
@@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
 {
        struct isp_pipeline *pipe =
                to_isp_pipeline(&ccdc->video_out.video.entity);
-       struct video_device *vdev = &ccdc->subdev.devnode;
+       struct video_device *vdev = ccdc->subdev.devnode;
        struct v4l2_event event;
 
        memset(&event, 0, sizeof(event));
index 68d539456c552aa0c5ee16bd999ed09f41dee712..bc0b2c7349b97894d62c6fcdb876eff8613634f7 100644 (file)
@@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
 
 static void isp_stat_queue_event(struct ispstat *stat, int err)
 {
-       struct video_device *vdev = &stat->subdev.devnode;
+       struct video_device *vdev = stat->subdev.devnode;
        struct v4l2_event event;
        struct omap3isp_stat_event_status *status = (void *)event.u.data;
 
index 43c0ebb81956188b22f7ce2953eddc936f39ae3a..b7b2d3483fd4e1e1f8bb5b6a2feb6e7c349c57f3 100644 (file)
@@ -4,7 +4,7 @@
  * Debugfs support for the AB5500 MFD driver
  */
 
-#include <linux/export.h>
+#include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/mfd/ab5500/ab5500.h>
index 1e9173804ede2bacb93a7179d5d7c817c79dfcf5..d3d572b2317b888be174bf52d3d17c65b391ef07 100644 (file)
@@ -620,6 +620,7 @@ static struct resource __devinitdata ab8500_fg_resources[] = {
 
 static struct resource __devinitdata ab8500_chargalg_resources[] = {};
 
+#ifdef CONFIG_DEBUG_FS
 static struct resource __devinitdata ab8500_debug_resources[] = {
        {
                .name   = "IRQ_FIRST",
@@ -634,6 +635,7 @@ static struct resource __devinitdata ab8500_debug_resources[] = {
                .flags  = IORESOURCE_IRQ,
        },
 };
+#endif
 
 static struct resource __devinitdata ab8500_usb_resources[] = {
        {
index f1d88483112cac13565cce2a8f88d3092c9d516c..8d816cce8322ebecdf8d40e29f0c0b0a3aedcbdb 100644 (file)
@@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
 
        ret = __adp5520_read(chip->client, reg, &reg_val);
 
-       if (!ret && ((reg_val & bit_mask) == 0)) {
+       if (!ret && ((reg_val & bit_mask) != bit_mask)) {
                reg_val |= bit_mask;
                ret = __adp5520_write(chip->client, reg, reg_val);
        }
index 1b79c37fd59901b882fc0b995e6182fe43a4eedb..1924b857a0fbf6355d9acfcd96fa387b841c6976 100644 (file)
@@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
        if (ret)
                goto out;
 
-       if ((reg_val & bit_mask) == 0) {
+       if ((reg_val & bit_mask) != bit_mask) {
                reg_val |= bit_mask;
                ret = __da903x_write(chip->client, reg, reg_val);
        }
@@ -549,6 +549,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
        struct da903x_chip *chip = i2c_get_clientdata(client);
 
        da903x_remove_subdevs(chip);
+       free_irq(client->irq, chip);
        kfree(chip);
        return 0;
 }
index 1e9ee533eacb8d204e860f9881d58e2675adf126..ef39528088f2298a47e7f62f69317b2a6af428ee 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/err.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
index bba26d96c24075a3cbca9a320fabe3177b4de1a8..a5ddf31b60ca89d3f228997f7da526b126449d94 100644 (file)
@@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
        if (ret)
                goto out;
 
-       if ((reg_val & bit_mask) == 0) {
+       if ((reg_val & bit_mask) != bit_mask) {
                reg_val |= bit_mask;
                ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
        }
index 6f5b8cf2f652b8edf6accf9db107d8f63ea72c23..c1da84bc1573f563c4b698f0bc786068e7f911bc 100644 (file)
@@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
                goto out;
        }
 
-       data &= mask;
+       data &= ~mask;
        err = tps65910_i2c_write(tps65910, reg, 1, &data);
        if (err)
                dev_err(tps65910->dev, "write to reg %x failed\n", reg);
index bfbd66021afd383703fad96e890975853c2bf063..61e70cfaa774fb977adcba8cc697a06edd673210 100644 (file)
@@ -363,13 +363,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
-       sid = twl_map[mod_no].sid;
-       twl = &twl_modules[sid];
-
        if (unlikely(!inuse)) {
-               pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+               pr_err("%s: not initialized\n", DRIVER_NAME);
                return -EPERM;
        }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
        mutex_lock(&twl->xfer_lock);
        /*
         * [MSG1]: fill the register address data
@@ -420,13 +420,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
-       sid = twl_map[mod_no].sid;
-       twl = &twl_modules[sid];
-
        if (unlikely(!inuse)) {
-               pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+               pr_err("%s: not initialized\n", DRIVER_NAME);
                return -EPERM;
        }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
        mutex_lock(&twl->xfer_lock);
        /* [MSG1] fill the register address data */
        msg = &twl->xfer_msg[0];
index f062c8cc6c38f3e40337444b91111646afefde88..29f11e0765feef54093b839b6e288ae56bc0bf3f 100644 (file)
@@ -432,6 +432,7 @@ struct sih_agent {
        u32                     edge_change;
 
        struct mutex            irq_lock;
+       char                    *irq_name;
 };
 
 /*----------------------------------------------------------------------*/
@@ -589,7 +590,7 @@ static inline int sih_read_isr(const struct sih *sih)
  * Generic handler for SIH interrupts ... we "know" this is called
  * in task context, with IRQs enabled.
  */
-static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+static irqreturn_t handle_twl4030_sih(int irq, void *data)
 {
        struct sih_agent *agent = irq_get_handler_data(irq);
        const struct sih *sih = agent->sih;
@@ -602,7 +603,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
                pr_err("twl4030: %s SIH, read ISR error %d\n",
                        sih->name, isr);
                /* REVISIT:  recover; eventually mask it all, etc */
-               return;
+               return IRQ_HANDLED;
        }
 
        while (isr) {
@@ -616,6 +617,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
                        pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
                                sih->name, irq);
        }
+       return IRQ_HANDLED;
 }
 
 static unsigned twl4030_irq_next;
@@ -668,18 +670,19 @@ int twl4030_sih_setup(int module)
                activate_irq(irq);
        }
 
-       status = irq_base;
        twl4030_irq_next += i;
 
        /* replace generic PIH handler (handle_simple_irq) */
        irq = sih_mod + twl4030_irq_base;
        irq_set_handler_data(irq, agent);
-       irq_set_chained_handler(irq, handle_twl4030_sih);
+       agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
+       status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+                                     agent->irq_name ?: sih->name, NULL);
 
        pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
                        irq, irq_base, twl4030_irq_next - 1);
 
-       return status;
+       return status < 0 ? status : irq_base;
 }
 
 /* FIXME need a call to reverse twl4030_sih_setup() ... */
@@ -733,8 +736,9 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
        }
 
        /* install an irq handler to demultiplex the TWL4030 interrupt */
-       status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
-                                       "TWL4030-PIH", NULL);
+       status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
+                                     IRQF_ONESHOT,
+                                     "TWL4030-PIH", NULL);
        if (status < 0) {
                pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
                goto fail_rqirq;
index 5d6ba132837e8efb5f470d48784d5bb5f9809509..61894fced8ea281570bf80320fd593efa0d25740 100644 (file)
@@ -239,6 +239,7 @@ static int wm8994_suspend(struct device *dev)
 
        switch (wm8994->type) {
        case WM8958:
+       case WM1811:
                ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
                if (ret < 0) {
                        dev_err(dev, "Failed to read power status: %d\n", ret);
index 50b5f9926f6462d7836434c89a71213f418faa9c..0726e59fd418fb4b8520de940d76214db15c2ccc 100644 (file)
@@ -675,7 +675,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
              unsigned int status)
 {
        /* First check for errors */
-       if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+       if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+                     MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
                u32 remain, success;
 
                /* Terminate the DMA transfer */
@@ -754,8 +755,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
        }
 
        if (!cmd->data || cmd->error) {
-               if (host->data)
+               if (host->data) {
+                       /* Terminate the DMA transfer */
+                       if (dma_inprogress(host))
+                               mmci_dma_data_error(host);
                        mmci_stop_data(host);
+               }
                mmci_request_end(host, cmd->mrq);
        } else if (!(cmd->data->flags & MMC_DATA_READ)) {
                mmci_start_data(host, cmd->data);
@@ -955,8 +960,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
                dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
 
                data = host->data;
-               if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
-                             MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
+               if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+                             MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
+                             MCI_DATABLOCKEND) && data)
                        mmci_data_irq(host, data, status);
 
                cmd = host->cmd;
index 5272f9d4dda9448faece5a061d413c71620f6da4..9de37642f09f24371f857df61eaecb89b3bbb416 100644 (file)
@@ -23,8 +23,8 @@ if NET_VENDOR_FREESCALE
 config FEC
        bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
        depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
-                  ARCH_MXC || ARCH_MXS)
-       default ARCH_MXC || ARCH_MXS if ARM
+                  ARCH_MXC || SOC_IMX28)
+       default ARCH_MXC || SOC_IMX28 if ARM
        select PHYLIB
        ---help---
          Say Y here if you want to use the built-in 10/100 Fast ethernet
index c7b60839ac9951caa4b3de0f0577bfd7d39bb1ca..dea0cb4400e25e20edbdd3fcf730d9280e23bb1a 100644 (file)
@@ -2606,6 +2606,9 @@ static int skge_up(struct net_device *dev)
        spin_unlock_irq(&hw->hw_lock);
 
        napi_enable(&skge->napi);
+
+       skge_set_multicast(dev);
+
        return 0;
 
  free_tx_ring:
index 227997d775e858b08a7cde4e92036738c803acc9..5829e0b47e7e5963a8d5913505f2c3f5392dfbab 100644 (file)
@@ -147,6 +147,7 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
        mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
        if (priv->mdev->dev->caps.comp_pool && cq->vector)
                mlx4_release_eq(priv->mdev->dev, cq->vector);
+       cq->vector = 0;
        cq->buf_size = 0;
        cq->buf = NULL;
 }
index 67bf0781999200d656e09e990be95709700cbce1..c8f47f17186f1449a10dd8eb14fb2e1f81a3c978 100644 (file)
@@ -477,7 +477,6 @@ enum rtl_register_content {
        /* Config1 register p.24 */
        LEDS1           = (1 << 7),
        LEDS0           = (1 << 6),
-       MSIEnable       = (1 << 5),     /* Enable Message Signaled Interrupt */
        Speed_down      = (1 << 4),
        MEMMAP          = (1 << 3),
        IOMAP           = (1 << 2),
@@ -485,6 +484,7 @@ enum rtl_register_content {
        PMEnable        = (1 << 0),     /* Power Management Enable */
 
        /* Config2 register p. 25 */
+       MSIEnable       = (1 << 5),     /* 8169 only. Reserved in the 8168. */
        PCI_Clock_66MHz = 0x01,
        PCI_Clock_33MHz = 0x00,
 
@@ -3426,22 +3426,24 @@ static const struct rtl_cfg_info {
 };
 
 /* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
                            const struct rtl_cfg_info *cfg)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
        unsigned msi = 0;
        u8 cfg2;
 
        cfg2 = RTL_R8(Config2) & ~MSIEnable;
        if (cfg->features & RTL_FEATURE_MSI) {
-               if (pci_enable_msi(pdev)) {
-                       dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+               if (pci_enable_msi(tp->pci_dev)) {
+                       netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
                } else {
                        cfg2 |= MSIEnable;
                        msi = RTL_FEATURE_MSI;
                }
        }
-       RTL_W8(Config2, cfg2);
+       if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+               RTL_W8(Config2, cfg2);
        return msi;
 }
 
@@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                tp->features |= RTL_FEATURE_WOL;
        if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
                tp->features |= RTL_FEATURE_WOL;
-       tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+       tp->features |= rtl_try_msi(tp, cfg);
        RTL_W8(Cfg9346, Cfg9346_Lock);
 
        if (rtl_tbi_enabled(tp)) {
index dca9d3369cdd9de8eb5f1156778369d76006c246..c97d2f59085504274eddcc2d5a25ecf352c824f0 100644 (file)
@@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
        chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
 
        /* handle completed packets */
+       spin_unlock_irqrestore(&chan->lock, flags);
        do {
                ret = __cpdma_chan_process(chan);
                if (ret < 0)
                        break;
        } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+       spin_lock_irqsave(&chan->lock, flags);
 
        /* remaining packets haven't been tx/rx'ed, clean them up */
        while (chan->head) {
index e6fed4d4cb77f70c8c6d62af16d53a57ed31c99c..e95f0e60a9bc7bc9dd18587986c53e5af8f79d42 100644 (file)
@@ -1655,6 +1655,10 @@ static const struct usb_device_id        products [] = {
        // ASIX 88772a
        USB_DEVICE(0x0db0, 0xa877),
        .driver_info = (unsigned long) &ax88772_info,
+}, {
+       // Asus USB Ethernet Adapter
+       USB_DEVICE (0x0b95, 0x7e2b),
+       .driver_info = (unsigned long) &ax88772_info,
 },
        { },            // END
 };
index d2348a5a7809bd1e2233f39a2b6f192443234f2f..a9c5ae75277e18b0993af04bdfff063955fc8ce2 100644 (file)
@@ -1843,6 +1843,9 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
        struct ath_softc *sc = hw->priv;
        struct ath_node *an = (struct ath_node *) sta->drv_priv;
 
+       if (!(sc->sc_flags & SC_OP_TXAGGR))
+               return;
+
        switch (cmd) {
        case STA_NOTIFY_SLEEP:
                an->sleeping = true;
index 888abc2be3a547d6f85ad32723fd2d2910c5582e..528d5f3e868c712a7b2372dc8616e1581205bddc 100644 (file)
@@ -1271,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc,
 
        ath_rc_priv->max_valid_rate = k;
        ath_rc_sort_validrates(rate_table, ath_rc_priv);
-       ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
+       ath_rc_priv->rate_max_phy = (k > 4) ?
+                                       ath_rc_priv->valid_rate_index[k-4] :
+                                       ath_rc_priv->valid_rate_index[k-1];
        ath_rc_priv->rate_table = rate_table;
 
        ath_dbg(common, ATH_DBG_CONFIG,
index fcff923b3c18b25d44ae4e266747825b8b571ebf..279a53eae4c57108b26b8e5e25fdf27a053ca9af 100644 (file)
@@ -617,9 +617,19 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
        const char *err_msg = NULL;
        struct b43_rxhdr_fw4 *rxhdr =
                (struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
+       size_t rxhdr_size = sizeof(*rxhdr);
 
        BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
-       memset(rxhdr, 0, sizeof(*rxhdr));
+       switch (dev->fw.hdr_format) {
+       case B43_FW_HDR_410:
+       case B43_FW_HDR_351:
+               rxhdr_size -= sizeof(rxhdr->format_598) -
+                       sizeof(rxhdr->format_351);
+               break;
+       case B43_FW_HDR_598:
+               break;
+       }
+       memset(rxhdr, 0, rxhdr_size);
 
        /* Check if we have data and wait for it to get ready. */
        if (q->rev >= 8) {
@@ -657,11 +667,11 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
 
        /* Get the preamble (RX header) */
        if (q->rev >= 8) {
-               b43_block_read(dev, rxhdr, sizeof(*rxhdr),
+               b43_block_read(dev, rxhdr, rxhdr_size,
                               q->mmio_base + B43_PIO8_RXDATA,
                               sizeof(u32));
        } else {
-               b43_block_read(dev, rxhdr, sizeof(*rxhdr),
+               b43_block_read(dev, rxhdr, rxhdr_size,
                               q->mmio_base + B43_PIO_RXDATA,
                               sizeof(u16));
        }
index a7a6def40d05aa29fe6d739daf1f0d247bf45a80..5c7c17c7166ac7a307407ddff67eee59ca148771 100644 (file)
@@ -606,8 +606,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
                        if (ctx->ht.enabled) {
                                /* if HT40 is used, it should not change
                                 * after associated except channel switch */
-                               if (iwl_is_associated_ctx(ctx) &&
-                                    !ctx->ht.is_40mhz)
+                               if (!ctx->ht.is_40mhz ||
+                                               !iwl_is_associated_ctx(ctx))
                                        iwlagn_config_ht40(conf, ctx);
                        } else
                                ctx->ht.is_40mhz = false;
index 35a6b71f358ce7a563f6507b58f0743eb6ded16a..df1540ca6102f641ed491cd50899e5eec17b8963 100644 (file)
@@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
                tx_cmd->tid_tspec = qc[0] & 0xf;
                tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
        } else {
-               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               else
+                       tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
        }
 
        iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
index bacc06c95e7ac798449e11ca883e5b5dc1def717..e0e9a3dfbc00a2febd8ce146bc8c3d02bc8e312a 100644 (file)
@@ -2850,6 +2850,9 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
        int ret;
        u8 sta_id;
 
+       if (ctx->ctxid != IWL_RXON_CTX_PAN)
+               return 0;
+
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->shrd->mutex);
 
@@ -2898,6 +2901,9 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
        struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
        struct iwl_rxon_context *ctx = vif_priv->ctx;
 
+       if (ctx->ctxid != IWL_RXON_CTX_PAN)
+               return;
+
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->shrd->mutex);
 
index ce918980e97799a51fc3b0b9a732d28fe706c84a..5f17ab8e76bacbc7e4521951c15a716a532afc45 100644 (file)
@@ -1197,9 +1197,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
 
        /* Set up entry for this TFD in Tx byte-count array */
-       if (is_agg)
-               iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
-                                              le16_to_cpu(tx_cmd->len));
+       iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
        dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
                        DMA_BIDIRECTIONAL);
index ac278156d390ea901d37eda9819abdf2de26edcf..6e0a3eaecf7070bcdcce90eb91e63d7fe6cc6aab 100644 (file)
@@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
 {
        struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
        unsigned long cmd_flags;
-       unsigned long cmd_pending_q_flags;
        unsigned long scan_pending_q_flags;
        uint16_t cancel_scan_cmd = false;
 
@@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                cmd_node = adapter->curr_cmd;
                cmd_node->wait_q_enabled = false;
                cmd_node->cmd_flag |= CMD_F_CANCELED;
-               spin_lock_irqsave(&adapter->cmd_pending_q_lock,
-                                 cmd_pending_q_flags);
-               list_del(&cmd_node->list);
-               spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
-                                      cmd_pending_q_flags);
                mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+               mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+               adapter->curr_cmd = NULL;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
        }
 
@@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
        }
        adapter->cmd_wait_q.status = -1;
-       mwifiex_complete_cmd(adapter, adapter->curr_cmd);
 }
 
 /*
index ea4a29b7e331c4bccf646deb92f414c4825a5796..1679c2593b7ba7d2975f584e250241a1892b341d 100644 (file)
@@ -55,9 +55,14 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
 {
        bool cancel_flag = false;
        int status = adapter->cmd_wait_q.status;
-       struct cmd_ctrl_node *cmd_queued = adapter->cmd_queued;
+       struct cmd_ctrl_node *cmd_queued;
 
+       if (!adapter->cmd_queued)
+               return 0;
+
+       cmd_queued = adapter->cmd_queued;
        adapter->cmd_queued = NULL;
+
        dev_dbg(adapter->dev, "cmd pending\n");
        atomic_inc(&adapter->cmd_pending);
 
index cbd5d701c7e086f632f74bf8bec29fe1a23e6a55..63b3ec48c203a43f3d8a9d395e459d765fd7f7b0 100644 (file)
@@ -314,7 +314,7 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
        if (!lookup)
                return NULL;
 
-       for(; lookup->name != NULL; lookup++) {
+       for(; lookup->compatible != NULL; lookup++) {
                if (!of_device_is_compatible(np, lookup->compatible))
                        continue;
                if (of_address_to_resource(np, 0, &res))
index 3bcc7cfcaba7ff1e370fee78cf85fb5bb22a6358..8e286259a007fbc5921b4c569cdb959f62762f7a 100644 (file)
@@ -73,8 +73,6 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
                err = -EINVAL;
 
        mutex_unlock(&rtc->ops_lock);
-       /* A timer might have just expired */
-       schedule_work(&rtc->irqwork);
        return err;
 }
 EXPORT_SYMBOL_GPL(rtc_set_time);
@@ -114,8 +112,6 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
                err = -EINVAL;
 
        mutex_unlock(&rtc->ops_lock);
-       /* A timer might have just expired */
-       schedule_work(&rtc->irqwork);
 
        return err;
 }
@@ -323,20 +319,6 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 }
 EXPORT_SYMBOL_GPL(rtc_read_alarm);
 
-static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
-{
-       int err;
-
-       if (!rtc->ops)
-               err = -ENODEV;
-       else if (!rtc->ops->set_alarm)
-               err = -EINVAL;
-       else
-               err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
-       return err;
-}
-
 static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
        struct rtc_time tm;
@@ -360,7 +342,14 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
         * over right here, before we set the alarm.
         */
 
-       return ___rtc_set_alarm(rtc, alarm);
+       if (!rtc->ops)
+               err = -ENODEV;
+       else if (!rtc->ops->set_alarm)
+               err = -EINVAL;
+       else
+               err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+       return err;
 }
 
 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -407,8 +396,6 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
                timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
        }
        mutex_unlock(&rtc->ops_lock);
-       /* maybe that was in the past.*/
-       schedule_work(&rtc->irqwork);
        return err;
 }
 EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
@@ -776,20 +763,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
        return 0;
 }
 
-static void rtc_alarm_disable(struct rtc_device *rtc)
-{
-       struct rtc_wkalrm alarm;
-       struct rtc_time tm;
-
-       __rtc_read_time(rtc, &tm);
-
-       alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
-                                    ktime_set(300, 0)));
-       alarm.enabled = 0;
-
-       ___rtc_set_alarm(rtc, &alarm);
-}
-
 /**
  * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
  * @rtc rtc device
@@ -811,10 +784,8 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
                struct rtc_wkalrm alarm;
                int err;
                next = timerqueue_getnext(&rtc->timerqueue);
-               if (!next) {
-                       rtc_alarm_disable(rtc);
+               if (!next)
                        return;
-               }
                alarm.time = rtc_ktime_to_tm(next->expires);
                alarm.enabled = 1;
                err = __rtc_set_alarm(rtc, &alarm);
@@ -876,8 +847,7 @@ void rtc_timer_do_work(struct work_struct *work)
                err = __rtc_set_alarm(rtc, &alarm);
                if (err == -ETIME)
                        goto again;
-       } else
-               rtc_alarm_disable(rtc);
+       }
 
        mutex_unlock(&rtc->ops_lock);
 }
index 717ebc9ff941808e2ddd52c93db09cc998456565..600d82348511068b141dc65f7a6e7e7c79037a14 100644 (file)
@@ -264,7 +264,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
                ret = -ENODEV;
                goto err0;
        }
-       dwc->revision = reg & DWC3_GSNPSREV_MASK;
+       dwc->revision = reg;
 
        dwc3_core_soft_reset(dwc);
 
index 596a0b464e61f069f93c8278a3c8193ab44a7873..4dff83d2f265235338f3b92f0e5b59690a96de25 100644 (file)
@@ -130,9 +130,6 @@ ep_matches (
                        num_req_streams = ep_comp->bmAttributes & 0x1f;
                        if (num_req_streams > ep->max_streams)
                                return 0;
-                       /* Update the ep_comp descriptor if needed */
-                       if (num_req_streams != ep->max_streams)
-                               ep_comp->bmAttributes = ep->max_streams;
                }
 
        }
index a7dc1e1d45f2a77e1cfce4de339b449b8863f778..2ac4ac2e4ef95208be6d1ae2b42af1840cb42bb9 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "isp1760-hcd.h"
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -31,7 +31,7 @@
 #include <linux/pci.h>
 #endif
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 struct isp1760 {
        struct usb_hcd *hcd;
        int rst_gpio;
@@ -437,7 +437,7 @@ static int __init isp1760_init(void)
        ret = platform_driver_register(&isp1760_plat_driver);
        if (!ret)
                any_ret = 0;
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        ret = platform_driver_register(&isp1760_of_driver);
        if (!ret)
                any_ret = 0;
@@ -457,7 +457,7 @@ module_init(isp1760_init);
 static void __exit isp1760_exit(void)
 {
        platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        platform_driver_unregister(&isp1760_of_driver);
 #endif
 #ifdef CONFIG_PCI
index 60ddba8066ea201181b0b6ce3c69a847eafc618a..79cb0af779fa07dac0702ee9b2b2d43e0a22b738 100644 (file)
@@ -774,6 +774,10 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
                        if (musb->double_buffer_not_ok)
                                musb_writew(epio, MUSB_TXMAXP,
                                                hw_ep->max_packet_sz_tx);
+                       else if (can_bulk_split(musb, qh->type))
+                               musb_writew(epio, MUSB_TXMAXP, packet_sz
+                                       | ((hw_ep->max_packet_sz_tx /
+                                               packet_sz) - 1) << 11);
                        else
                                musb_writew(epio, MUSB_TXMAXP,
                                                qh->maxpacket |
index 03f449a430d253ca997f0e84e721748707fb674f..5b89f7d6cd0ff4bb6d2f305e025b2ad3ebd18c5b 100644 (file)
@@ -76,8 +76,6 @@ static int irq;
 static void __iomem *virtbase;
 static unsigned long coh901327_users;
 static unsigned long boot_status;
-static u16 wdogenablestore;
-static u16 irqmaskstore;
 static struct device *parent;
 
 /*
@@ -461,6 +459,10 @@ static int __init coh901327_probe(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM
+
+static u16 wdogenablestore;
+static u16 irqmaskstore;
+
 static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
 {
        irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
index 3774c9b8dac9c6868a28c5eda079e7915b7d24cb..8464ea1c36a1080f4c4b4045333a70b963d08b5d 100644 (file)
@@ -231,6 +231,7 @@ static int __devinit cru_detect(unsigned long map_entry,
 
        cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
 
+       set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
        asminline_call(&cmn_regs, bios32_entrypoint);
 
        if (cmn_regs.u1.ral != 0) {
@@ -248,8 +249,10 @@ static int __devinit cru_detect(unsigned long map_entry,
                if ((physical_bios_base + physical_bios_offset)) {
                        cru_rom_addr =
                                ioremap(cru_physical_address, cru_length);
-                       if (cru_rom_addr)
+                       if (cru_rom_addr) {
+                               set_memory_x((unsigned long)cru_rom_addr, cru_length);
                                retval = 0;
+                       }
                }
 
                printk(KERN_DEBUG "hpwdt: CRU Base Address:   0x%lx\n",
index ba6ad662635ae97776cf51c16b5fba482cc8a8c8..99796c5d913db2c9f354dbd06b503d8d65580cdf 100644 (file)
@@ -384,10 +384,10 @@ MODULE_PARM_DESC(nowayout,
        "Watchdog cannot be stopped once started (default="
                                __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-static int turn_SMI_watchdog_clear_off = 0;
+static int turn_SMI_watchdog_clear_off = 1;
 module_param(turn_SMI_watchdog_clear_off, int, 0);
 MODULE_PARM_DESC(turn_SMI_watchdog_clear_off,
-       "Turn off SMI clearing watchdog (default=0)");
+       "Turn off SMI clearing watchdog (depends on TCO-version)(default=1)");
 
 /*
  * Some TCO specific functions
@@ -813,7 +813,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
                ret = -EIO;
                goto out_unmap;
        }
-       if (turn_SMI_watchdog_clear_off) {
+       if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
                /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
                val32 = inl(SMI_EN);
                val32 &= 0xffffdfff;    /* Turn off SMI clearing watchdog */
index cc2cfbe33b30d441a2ef6449d2babe0b1eb43a3d..bfaf9bb1ee0d1ff384a014742f5c8148e96fc040 100644 (file)
@@ -351,7 +351,7 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev)
        return 0;
 }
 
-static struct amba_id sp805_wdt_ids[] __initdata = {
+static struct amba_id sp805_wdt_ids[] = {
        {
                .id     = 0x00141805,
                .mask   = 0x00ffffff,
index e24cd8986d8badacc41ce764a15cc99f2524fb8c..ea78c3a17eecd8d4bf38e182fcb4ca1ce0389492 100644 (file)
@@ -12,7 +12,7 @@ here.
 This directory is _NOT_ for adding arbitrary new firmware images. The
 place to add those is the separate linux-firmware repository:
 
-    git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git
+    git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
 
 That repository contains all these firmware images which have been
 extracted from older drivers, as well various new firmware images which
@@ -22,6 +22,7 @@ been permitted to redistribute under separate cover.
 To submit firmware to that repository, please send either a git binary
 diff or preferably a git pull request to:
       David Woodhouse <dwmw2@infradead.org>
+      Ben Hutchings <ben@decadent.org.uk>
 
 Your commit should include an update to the WHENCE file clearly
 identifying the licence under which the firmware is available, and
index cb97174e236603edde0a64a8d6f0ffbb07b572f1..0b394580d8603becf8c5c031745687f6ba9e9aeb 100644 (file)
@@ -563,8 +563,8 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
        struct list_head *fallback;
        int ret;
 
-again:
        spin_lock_irqsave(&workers->lock, flags);
+again:
        worker = next_worker(workers);
 
        if (!worker) {
@@ -579,6 +579,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
                        spin_unlock_irqrestore(&workers->lock, flags);
                        /* we're below the limit, start another worker */
                        ret = __btrfs_start_workers(workers);
+                       spin_lock_irqsave(&workers->lock, flags);
                        if (ret)
                                goto fallback;
                        goto again;
index 0a6b928813a41f4ca3176cf2e0a228d166492bab..fd1a06df5bc637c5dad0b0b7e5ce1ce88d5c1616 100644 (file)
@@ -4590,10 +4590,6 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
        int err = btrfs_add_link(trans, dir, inode,
                                 dentry->d_name.name, dentry->d_name.len,
                                 backref, index);
-       if (!err) {
-               d_instantiate(dentry, inode);
-               return 0;
-       }
        if (err > 0)
                err = -EEXIST;
        return err;
@@ -4655,6 +4651,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
        else {
                init_special_inode(inode, inode->i_mode, rdev);
                btrfs_update_inode(trans, root, inode);
+               d_instantiate(dentry, inode);
        }
 out_unlock:
        nr = trans->blocks_used;
@@ -4722,6 +4719,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                inode->i_mapping->a_ops = &btrfs_aops;
                inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+               d_instantiate(dentry, inode);
        }
 out_unlock:
        nr = trans->blocks_used;
@@ -4779,6 +4777,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                struct dentry *parent = dentry->d_parent;
                err = btrfs_update_inode(trans, root, inode);
                BUG_ON(err);
+               d_instantiate(dentry, inode);
                btrfs_log_new_name(trans, inode, NULL, parent);
        }
 
@@ -7245,6 +7244,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                drop_inode = 1;
 
 out_unlock:
+       if (!err)
+               d_instantiate(dentry, inode);
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
        if (drop_inode) {
index 3eeb976612625500b1a30b1fbc4896bdffa867c9..98954003a8d313007386d4cfc214c5dba9296647 100644 (file)
@@ -1094,42 +1094,19 @@ static int ceph_snapdir_d_revalidate(struct dentry *dentry,
 /*
  * Set/clear/test dir complete flag on the dir's dentry.
  */
-static struct dentry * __d_find_any_alias(struct inode *inode)
-{
-       struct dentry *alias;
-
-       if (list_empty(&inode->i_dentry))
-               return NULL;
-       alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
-       return alias;
-}
-
 void ceph_dir_set_complete(struct inode *inode)
 {
-       struct dentry *dentry = __d_find_any_alias(inode);
-       
-       if (dentry && ceph_dentry(dentry)) {
-               dout(" marking %p (%p) complete\n", inode, dentry);
-               set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
-       }
+       /* not yet implemented */
 }
 
 void ceph_dir_clear_complete(struct inode *inode)
 {
-       struct dentry *dentry = __d_find_any_alias(inode);
-
-       if (dentry && ceph_dentry(dentry)) {
-               dout(" marking %p (%p) NOT complete\n", inode, dentry);
-               clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
-       }
+       /* not yet implemented */
 }
 
 bool ceph_dir_test_complete(struct inode *inode)
 {
-       struct dentry *dentry = __d_find_any_alias(inode);
-
-       if (dentry && ceph_dentry(dentry))
-               return test_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
+       /* not yet implemented */
        return false;
 }
 
index 8cd4b52d42174ee0a4d524d3176b59102751c61e..f3670cf72587beee30ca9f9bf8152e169d6bd085 100644 (file)
@@ -282,7 +282,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
        byte_count = be32_to_cpu(pTargetSMB->smb_buf_length);
        byte_count += total_in_buf2;
        /* don't allow buffer to overflow */
-       if (byte_count > CIFSMaxBufSize)
+       if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
                return -ENOBUFS;
        pTargetSMB->smb_buf_length = cpu_to_be32(byte_count);
 
@@ -2122,7 +2122,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
                warned_on_ntlm = true;
                cERROR(1, "default security mechanism requested.  The default "
                        "security mechanism will be upgraded from ntlm to "
-                       "ntlmv2 in kernel release 3.2");
+                       "ntlmv2 in kernel release 3.3");
        }
        ses->overrideSecFlg = volume_info->secFlg;
 
index ac86f8b3e3cb1cf050753148c3f3d065b18e6cf9..517f211a3bd45c60f607e5da7d957bc739d55d47 100644 (file)
@@ -47,17 +47,6 @@ struct wb_writeback_work {
        struct completion *done;        /* set if the caller waits */
 };
 
-const char *wb_reason_name[] = {
-       [WB_REASON_BACKGROUND]          = "background",
-       [WB_REASON_TRY_TO_FREE_PAGES]   = "try_to_free_pages",
-       [WB_REASON_SYNC]                = "sync",
-       [WB_REASON_PERIODIC]            = "periodic",
-       [WB_REASON_LAPTOP_TIMER]        = "laptop_timer",
-       [WB_REASON_FREE_MORE_MEM]       = "free_more_memory",
-       [WB_REASON_FS_FREE_SPACE]       = "fs_free_space",
-       [WB_REASON_FORKER_THREAD]       = "forker_thread"
-};
-
 /*
  * Include the creation of the trace points after defining the
  * wb_writeback_work structure so that the definition remains local to this
index 3b0d05dcd7c1cc8f098205e781536cc2d1605acf..637694bf3a03c5652d780700686a9e719fca2709 100644 (file)
@@ -1205,6 +1205,8 @@ int __break_lease(struct inode *inode, unsigned int mode)
        int want_write = (mode & O_ACCMODE) != O_RDONLY;
 
        new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
+       if (IS_ERR(new_fl))
+               return PTR_ERR(new_fl);
 
        lock_flocks();
 
@@ -1221,12 +1223,6 @@ int __break_lease(struct inode *inode, unsigned int mode)
                if (fl->fl_owner == current->files)
                        i_have_this_lease = 1;
 
-       if (IS_ERR(new_fl) && !i_have_this_lease
-                       && ((mode & O_NONBLOCK) == 0)) {
-               error = PTR_ERR(new_fl);
-               goto out;
-       }
-
        break_time = 0;
        if (lease_break_time > 0) {
                break_time = jiffies + lease_break_time * HZ;
@@ -1284,8 +1280,7 @@ int __break_lease(struct inode *inode, unsigned int mode)
 
 out:
        unlock_flocks();
-       if (!IS_ERR(new_fl))
-               locks_free_lock(new_fl);
+       locks_free_lock(new_fl);
        return error;
 }
 
index 1d9e33966db089eb2ba5ed3f7aab9967bd2c0b4e..4d46a6a5907052de73638a347a9aa4238fa2bb37 100644 (file)
@@ -263,23 +263,6 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
                goto out_no_root;
        }
 
-       ret = -ENOMEM;
-       s->s_root = d_alloc_root(root_inode);
-       if (!s->s_root)
-               goto out_iput;
-
-       if (!(s->s_flags & MS_RDONLY)) {
-               if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
-                       ms->s_state &= ~MINIX_VALID_FS;
-               mark_buffer_dirty(bh);
-       }
-       if (!(sbi->s_mount_state & MINIX_VALID_FS))
-               printk("MINIX-fs: mounting unchecked file system, "
-                       "running fsck is recommended\n");
-       else if (sbi->s_mount_state & MINIX_ERROR_FS)
-               printk("MINIX-fs: mounting file system with errors, "
-                       "running fsck is recommended\n");
-
        /* Apparently minix can create filesystems that allocate more blocks for
         * the bitmaps than needed.  We simply ignore that, but verify it didn't
         * create one with not enough blocks and bail out if so.
@@ -300,6 +283,23 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
                goto out_iput;
        }
 
+       ret = -ENOMEM;
+       s->s_root = d_alloc_root(root_inode);
+       if (!s->s_root)
+               goto out_iput;
+
+       if (!(s->s_flags & MS_RDONLY)) {
+               if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
+                       ms->s_state &= ~MINIX_VALID_FS;
+               mark_buffer_dirty(bh);
+       }
+       if (!(sbi->s_mount_state & MINIX_VALID_FS))
+               printk("MINIX-fs: mounting unchecked file system, "
+                       "running fsck is recommended\n");
+       else if (sbi->s_mount_state & MINIX_ERROR_FS)
+               printk("MINIX-fs: mounting file system with errors, "
+                       "running fsck is recommended\n");
+
        return 0;
 
 out_iput:
index 2a30d67dd6b81ea03c71b3bdea4fa50913a09ae4..0855e6f20391715c945c32487bb2346f11f0e8a8 100644 (file)
@@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu)
                idle = kstat_cpu(cpu).cpustat.idle;
                idle = cputime64_add(idle, arch_idle_time(cpu));
        } else
-               idle = nsecs_to_jiffies64(1000 * idle_time);
+               idle = usecs_to_cputime64(idle_time);
 
        return idle;
 }
@@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu)
                /* !NO_HZ so we can rely on cpustat.iowait */
                iowait = kstat_cpu(cpu).cpustat.iowait;
        else
-               iowait = nsecs_to_jiffies64(1000 * iowait_time);
+               iowait = usecs_to_cputime64(iowait_time);
 
        return iowait;
 }
index 3eca58f51ae9040b0fcc5fe6b809df38dbf0b4ac..8a899496fd5fe55ef50f4cfdcfcce20e7875a60c 100644 (file)
@@ -868,27 +868,6 @@ xfs_fs_dirty_inode(
        XFS_I(inode)->i_update_core = 1;
 }
 
-STATIC int
-xfs_log_inode(
-       struct xfs_inode        *ip)
-{
-       struct xfs_mount        *mp = ip->i_mount;
-       struct xfs_trans        *tp;
-       int                     error;
-
-       tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
-       error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       return xfs_trans_commit(tp, 0);
-}
-
 STATIC int
 xfs_fs_write_inode(
        struct inode            *inode,
@@ -902,10 +881,8 @@ xfs_fs_write_inode(
 
        if (XFS_FORCED_SHUTDOWN(mp))
                return -XFS_ERROR(EIO);
-       if (!ip->i_update_core)
-               return 0;
 
-       if (wbc->sync_mode == WB_SYNC_ALL) {
+       if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) {
                /*
                 * Make sure the inode has made it it into the log.  Instead
                 * of forcing it all the way to stable storage using a
@@ -913,11 +890,14 @@ xfs_fs_write_inode(
                 * ->sync_fs call do that for thus, which reduces the number
                 * of synchronous log forces dramatically.
                 */
-               error = xfs_log_inode(ip);
+               error = xfs_log_dirty_inode(ip, NULL, 0);
                if (error)
                        goto out;
                return 0;
        } else {
+               if (!ip->i_update_core)
+                       return 0;
+
                /*
                 * We make this non-blocking if the inode is contended, return
                 * EAGAIN to indicate to the caller that they did not succeed.
index be5c51d8f7572d715fc51f0c86563b46aba380c8..f0994aedcd158c2db3d6f9b2bf4d21a4f819bec6 100644 (file)
@@ -336,6 +336,32 @@ xfs_sync_fsdata(
        return error;
 }
 
+int
+xfs_log_dirty_inode(
+       struct xfs_inode        *ip,
+       struct xfs_perag        *pag,
+       int                     flags)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       int                     error;
+
+       if (!ip->i_update_core)
+               return 0;
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+       error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       return xfs_trans_commit(tp, 0);
+}
+
 /*
  * When remounting a filesystem read-only or freezing the filesystem, we have
  * two phases to execute. This first phase is syncing the data before we
@@ -359,6 +385,16 @@ xfs_quiesce_data(
 {
        int                     error, error2 = 0;
 
+       /*
+        * Log all pending size and timestamp updates.  The vfs writeback
+        * code is supposed to do this, but due to its overagressive
+        * livelock detection it will skip inodes where appending writes
+        * were written out in the first non-blocking sync phase if their
+        * completion took long enough that it happened after taking the
+        * timestamp for the cut-off in the blocking phase.
+        */
+       xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0);
+
        xfs_qm_sync(mp, SYNC_TRYLOCK);
        xfs_qm_sync(mp, SYNC_WAIT);
 
index 941202e7ac6e594e2c423c19bc89248397e39516..fa965479d788d29da66b0e85bd59123c1fe08c65 100644 (file)
@@ -34,6 +34,8 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
 
 void xfs_flush_inodes(struct xfs_inode *ip);
 
+int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags);
+
 int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
 int xfs_reclaim_inodes_count(struct xfs_mount *mp);
 void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
index 62ce6823c0f2ac82ccdf285cf99dc8d36a9bb80b..12a1764f612b2709360eb90f9c084b2dff3a8bb9 100644 (file)
@@ -40,6 +40,7 @@ typedef u64 cputime64_t;
  */
 #define cputime_to_usecs(__ct)         jiffies_to_usecs(__ct)
 #define usecs_to_cputime(__msecs)      usecs_to_jiffies(__msecs)
+#define usecs_to_cputime64(__msecs)    nsecs_to_jiffies64((__msecs) * 1000)
 
 /*
  * Convert cputime to seconds and back.
index c3892fc1d5389c86c664a43ca57bffa0f008d4b8..68e67e50d028e95681f37e47ba98d2e047b01b8b 100644 (file)
@@ -557,6 +557,7 @@ struct kvm_ppc_pvinfo {
 #define KVM_CAP_MAX_VCPUS 66       /* returns max vcpus per vm */
 #define KVM_CAP_PPC_PAPR 68
 #define KVM_CAP_S390_GMAP 71
+#define KVM_CAP_TSC_DEADLINE_TIMER 72
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index f549056fb20bd5533555918cc1b1f9805c2cdcc3..87f402ccec55567330943ab774ffb12ae21c7da8 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/spinlock.h>
 #include <linux/lockdep.h>
 #include <linux/percpu.h>
+#include <linux/cpu.h>
 
 /* can make br locks by using local lock for read side, global lock for write */
 #define br_lock_init(name)     name##_lock_init()
 
 #define DEFINE_LGLOCK(name)                                            \
                                                                        \
+ DEFINE_SPINLOCK(name##_cpu_lock);                                     \
+ cpumask_t name##_cpus __read_mostly;                                  \
  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                         \
  DEFINE_LGLOCK_LOCKDEP(name);                                          \
                                                                        \
+ static int                                                            \
+ name##_lg_cpu_callback(struct notifier_block *nb,                     \
+                               unsigned long action, void *hcpu)       \
+ {                                                                     \
+       switch (action & ~CPU_TASKS_FROZEN) {                           \
+       case CPU_UP_PREPARE:                                            \
+               spin_lock(&name##_cpu_lock);                            \
+               cpu_set((unsigned long)hcpu, name##_cpus);              \
+               spin_unlock(&name##_cpu_lock);                          \
+               break;                                                  \
+       case CPU_UP_CANCELED: case CPU_DEAD:                            \
+               spin_lock(&name##_cpu_lock);                            \
+               cpu_clear((unsigned long)hcpu, name##_cpus);            \
+               spin_unlock(&name##_cpu_lock);                          \
+       }                                                               \
+       return NOTIFY_OK;                                               \
+ }                                                                     \
+ static struct notifier_block name##_lg_cpu_notifier = {               \
+       .notifier_call = name##_lg_cpu_callback,                        \
+ };                                                                    \
  void name##_lock_init(void) {                                         \
        int i;                                                          \
        LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
                lock = &per_cpu(name##_lock, i);                        \
                *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
        }                                                               \
+       register_hotcpu_notifier(&name##_lg_cpu_notifier);              \
+       get_online_cpus();                                              \
+       for_each_online_cpu(i)                                          \
+               cpu_set(i, name##_cpus);                                \
+       put_online_cpus();                                              \
  }                                                                     \
  EXPORT_SYMBOL(name##_lock_init);                                      \
                                                                        \
                                                                        \
  void name##_global_lock_online(void) {                                        \
        int i;                                                          \
-       preempt_disable();                                              \
+       spin_lock(&name##_cpu_lock);                                    \
        rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_online_cpu(i) {                                        \
+       for_each_cpu(i, &name##_cpus) {                                 \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_lock(lock);                                   \
  void name##_global_unlock_online(void) {                              \
        int i;                                                          \
        rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_online_cpu(i) {                                        \
+       for_each_cpu(i, &name##_cpus) {                                 \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_unlock(lock);                                 \
        }                                                               \
-       preempt_enable();                                               \
+       spin_unlock(&name##_cpu_lock);                                  \
  }                                                                     \
  EXPORT_SYMBOL(name##_global_unlock_online);                           \
                                                                        \
index 19d8e04e16884c2bfb860fe2f72715306753ea15..e8c619d39291b4c18b028224299d882dbf56aa16 100644 (file)
@@ -2056,7 +2056,7 @@ static inline int security_old_inode_init_security(struct inode *inode,
                                                   char **name, void **value,
                                                   size_t *len)
 {
-       return 0;
+       return -EOPNOTSUPP;
 }
 
 static inline int security_inode_create(struct inode *dir,
index 6faec1a6021629d73896fb425b49f7820720c151..75766b42660e2dd385b26a0144c96dc90b49cbaf 100644 (file)
@@ -53,6 +53,7 @@ struct dst_entry {
 #define DST_NOHASH             0x0008
 #define DST_NOCACHE            0x0010
 #define DST_NOCOUNT            0x0020
+#define DST_NOPEER             0x0040
 
        short                   error;
        short                   obsolete;
index a09447749e2d59a467c51cde514b35cef79c9b1b..57f15a7f1cddcb4a254cd52f46c914cce176caea 100644 (file)
@@ -207,6 +207,7 @@ extern struct flow_cache_object *flow_cache_lookup(
                u8 dir, flow_resolve_t resolver, void *ctx);
 
 extern void flow_cache_flush(void);
+extern void flow_cache_flush_deferred(void);
 extern atomic_t flow_cache_genid;
 
 #endif
index 873d5be7926c1bef17c83ea7c79eecafede9149e..e5a7b9aaf5526b160d747f1ef321477a9f2679cb 100644 (file)
@@ -1207,7 +1207,7 @@ extern void ip_vs_control_cleanup(void);
 extern struct ip_vs_dest *
 ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
                __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
-               __u16 protocol, __u32 fwmark);
+               __u16 protocol, __u32 fwmark, __u32 flags);
 extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
 
 
index e90e7a9935ddc5c70c8e920487bc1196c2cc49e2..a15432da27c3a911ef2669c736e43cdd18b2daa0 100644 (file)
@@ -241,6 +241,9 @@ extern struct sctp_globals {
         * bits is an indicator of when to send and window update SACK.
         */
        int rwnd_update_shift;
+
+       /* Threshold for autoclose timeout, in seconds. */
+       unsigned long max_autoclose;
 } sctp_globals;
 
 #define sctp_rto_initial               (sctp_globals.rto_initial)
@@ -281,6 +284,7 @@ extern struct sctp_globals {
 #define sctp_auth_enable               (sctp_globals.auth_enable)
 #define sctp_checksum_disable          (sctp_globals.checksum_disable)
 #define sctp_rwnd_upd_shift            (sctp_globals.rwnd_update_shift)
+#define sctp_max_autoclose             (sctp_globals.max_autoclose)
 
 /* SCTP Socket type: UDP or TCP style. */
 typedef enum {
index abb6e0f0c3c3e59b79f23fc72d2e9d8d389e3774..32e39371fba627ebfaab66713e645889ccb7ba47 100644 (file)
@@ -637,12 +637,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 
 /*
  * Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
  */
 static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
 {
        unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
-       return qsize + skb->truesize > sk->sk_rcvbuf;
+       return qsize > sk->sk_rcvbuf;
 }
 
 /* The per-socket spinlock must be held here. */
index b99caa8b780c624af834caeab4130549479e3844..99d1d0decf88e41a7c0c038d463330e8351046b1 100644 (file)
                {I_REFERENCED,          "I_REFERENCED"}         \
        )
 
+#define WB_WORK_REASON                                                 \
+               {WB_REASON_BACKGROUND,          "background"},          \
+               {WB_REASON_TRY_TO_FREE_PAGES,   "try_to_free_pages"},   \
+               {WB_REASON_SYNC,                "sync"},                \
+               {WB_REASON_PERIODIC,            "periodic"},            \
+               {WB_REASON_LAPTOP_TIMER,        "laptop_timer"},        \
+               {WB_REASON_FREE_MORE_MEM,       "free_more_memory"},    \
+               {WB_REASON_FS_FREE_SPACE,       "fs_free_space"},       \
+               {WB_REASON_FORKER_THREAD,       "forker_thread"}
+
 struct wb_writeback_work;
 
 DECLARE_EVENT_CLASS(writeback_work_class,
@@ -55,7 +65,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
                  __entry->for_kupdate,
                  __entry->range_cyclic,
                  __entry->for_background,
-                 wb_reason_name[__entry->reason]
+                 __print_symbolic(__entry->reason, WB_WORK_REASON)
        )
 );
 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -184,7 +194,8 @@ TRACE_EVENT(writeback_queue_io,
                __entry->older, /* older_than_this in jiffies */
                __entry->age,   /* older_than_this in relative milliseconds */
                __entry->moved,
-               wb_reason_name[__entry->reason])
+               __print_symbolic(__entry->reason, WB_WORK_REASON)
+       )
 );
 
 TRACE_EVENT(global_dirty_state,
index d0b7d988f8735beb6e1e7b5f7b42ecbd04b91bf5..e6e01b959a0ef3b3efd0fc0624a3286a57770fc8 100644 (file)
@@ -1540,8 +1540,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
        }
 
        /* dead body doesn't have much to contribute */
-       if (p->exit_state == EXIT_DEAD)
+       if (unlikely(p->exit_state == EXIT_DEAD)) {
+               /*
+                * But do not ignore this task until the tracer does
+                * wait_task_zombie()->do_notify_parent().
+                */
+               if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
+                       wo->notask_error = 0;
                return 0;
+       }
 
        /* slay zombie? */
        if (p->exit_state == EXIT_ZOMBIE) {
index ea87f4d2f455c8c99164cb555287ad1c9fb165e3..1614be20173dcb19ef45e65fa25d07f8d3c65194 100644 (file)
@@ -314,17 +314,29 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
 #endif
 
        lock_page(page_head);
+
+       /*
+        * If page_head->mapping is NULL, then it cannot be a PageAnon
+        * page; but it might be the ZERO_PAGE or in the gate area or
+        * in a special mapping (all cases which we are happy to fail);
+        * or it may have been a good file page when get_user_pages_fast
+        * found it, but truncated or holepunched or subjected to
+        * invalidate_complete_page2 before we got the page lock (also
+        * cases which we are happy to fail).  And we hold a reference,
+        * so refcount care in invalidate_complete_page's remove_mapping
+        * prevents drop_caches from setting mapping to NULL beneath us.
+        *
+        * The case we do have to guard against is when memory pressure made
+        * shmem_writepage move it from filecache to swapcache beneath us:
+        * an unlikely race, but we do need to retry for page_head->mapping.
+        */
        if (!page_head->mapping) {
+               int shmem_swizzled = PageSwapCache(page_head);
                unlock_page(page_head);
                put_page(page_head);
-               /*
-               * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
-               * trying to find one. RW mapping would have COW'd (and thus
-               * have a mapping) so this page is RO and won't ever change.
-               */
-               if ((page_head == ZERO_PAGE(address)))
-                       return -EFAULT;
-               goto again;
+               if (shmem_swizzled)
+                       goto again;
+               return -EFAULT;
        }
 
        /*
index 8b1748d0172c7e35672c98c9ab6e93b4d815f850..2e48ec0c2e91cf099642f8bf43e90476fb6457a3 100644 (file)
@@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
 
        /*
         * Ensure the task is not frozen.
-        * Also, when a freshly created task is scheduled once, changes
-        * its state to TASK_UNINTERRUPTIBLE without having ever been
-        * switched out once, it musn't be checked.
+        * Also, skip vfork and any other user process that freezer should skip.
         */
-       if (unlikely(t->flags & PF_FROZEN || !switch_count))
+       if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
+           return;
+
+       /*
+        * When a freshly created task is scheduled once, changes its state to
+        * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
+        * musn't be checked.
+        */
+       if (unlikely(!switch_count))
                return;
 
        if (switch_count != t->last_switch_count) {
index 24d04477b2575f0239782cf6287298a47d50f48c..78ab24a7b0e440e2b44d75cc08bc353ab6f939c2 100644 (file)
@@ -96,9 +96,20 @@ void __ptrace_unlink(struct task_struct *child)
         */
        if (!(child->flags & PF_EXITING) &&
            (child->signal->flags & SIGNAL_STOP_STOPPED ||
-            child->signal->group_stop_count))
+            child->signal->group_stop_count)) {
                child->jobctl |= JOBCTL_STOP_PENDING;
 
+               /*
+                * This is only possible if this thread was cloned by the
+                * traced task running in the stopped group, set the signal
+                * for the future reports.
+                * FIXME: we should change ptrace_init_task() to handle this
+                * case.
+                */
+               if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
+                       child->jobctl |= SIGSTOP;
+       }
+
        /*
         * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
         * @child in the butt.  Note that @resume should be used iff @child
index b3f78d09a1053b67f62719a8879f6619ae12253b..206551563cce5e9c09796c32674d634f8802d69b 100644 (file)
@@ -1994,8 +1994,6 @@ static bool do_signal_stop(int signr)
                 */
                if (!(sig->flags & SIGNAL_STOP_STOPPED))
                        sig->group_exit_code = signr;
-               else
-                       WARN_ON_ONCE(!current->ptrace);
 
                sig->group_stop_count = 0;
 
index c4eb71c8b2ea3290c6b83f2f6d72b3c8c62a7787..1ecd6ba36d6c6d3d0ce404450878ea5da047e01d 100644 (file)
@@ -387,7 +387,6 @@ void clockevents_exchange_device(struct clock_event_device *old,
         * released list and do a notify add later.
         */
        if (old) {
-               old->event_handler = clockevents_handle_noop;
                clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
                list_del(&old->list);
                list_add(&old->list, &clockevents_released);
index db110b8ae0309a39cfdef20e75351b989431cf2a..f1539decd99d853d1a5c44fb3296072444aabf1a 100644 (file)
@@ -634,10 +634,11 @@ static int tracepoint_module_coming(struct module *mod)
        int ret = 0;
 
        /*
-        * We skip modules that tain the kernel, especially those with different
-        * module header (for forced load), to make sure we don't cause a crash.
+        * We skip modules that taint the kernel, especially those with different
+        * module headers (for forced load), to make sure we don't cause a crash.
+        * Staging and out-of-tree GPL modules are fine.
         */
-       if (mod->taints)
+       if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
                return 0;
        mutex_lock(&tracepoints_mutex);
        tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
index c106d3b3cc640d56266c609f5c840e29c6fcae01..5f0a3c91fdac043437392bbe141b45c30cdcf66e 100644 (file)
@@ -1828,7 +1828,7 @@ static struct page *__read_cache_page(struct address_space *mapping,
                page = __page_cache_alloc(gfp | __GFP_COLD);
                if (!page)
                        return ERR_PTR(-ENOMEM);
-               err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+               err = add_to_page_cache_lru(page, mapping, index, gfp);
                if (unlikely(err)) {
                        page_cache_release(page);
                        if (err == -EEXIST)
@@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
  * @gfp:       the page allocator flags to use if allocating
  *
  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
  *
  * If the page does not get brought uptodate, return -EIO.
  */
index 73f17c0293c0a0e57a62f65f11c969b9319532f5..2316840b337a37447d2d8cbe294f53df71e53575 100644 (file)
@@ -901,7 +901,6 @@ static int gather_surplus_pages(struct hstate *h, int delta)
        h->resv_huge_pages += delta;
        ret = 0;
 
-       spin_unlock(&hugetlb_lock);
        /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
                if ((--needed) < 0)
@@ -915,6 +914,7 @@ static int gather_surplus_pages(struct hstate *h, int delta)
                VM_BUG_ON(page_count(page));
                enqueue_huge_page(h, page);
        }
+       spin_unlock(&hugetlb_lock);
 
        /* Free unnecessary surplus pages to the buddy allocator */
 free:
index adc395481813532efe82dced222e79115b075b0e..c3fdbcb17658ce405131e5b0310e1857fd6558bc 100644 (file)
@@ -636,6 +636,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
        struct vm_area_struct *prev;
        struct vm_area_struct *vma;
        int err = 0;
+       pgoff_t pgoff;
        unsigned long vmstart;
        unsigned long vmend;
 
@@ -643,13 +644,21 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
        if (!vma || vma->vm_start > start)
                return -EFAULT;
 
+       if (start > vma->vm_start)
+               prev = vma;
+
        for (; vma && vma->vm_start < end; prev = vma, vma = next) {
                next = vma->vm_next;
                vmstart = max(start, vma->vm_start);
                vmend   = min(end, vma->vm_end);
 
+               if (mpol_equal(vma_policy(vma), new_pol))
+                       continue;
+
+               pgoff = vma->vm_pgoff +
+                       ((vmstart - vma->vm_start) >> PAGE_SHIFT);
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
-                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+                                 vma->anon_vma, vma->vm_file, pgoff,
                                  new_pol);
                if (prev) {
                        vma = prev;
index e0af7237cd9245fedfc8886ec446f670ddc48500..c1c597e3e198e9ffba26dd4dcf2e59e2ffc923c3 100644 (file)
@@ -673,7 +673,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
                goto encrypt;
 
 auth:
-       if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+       if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
                return 0;
 
        if (!hci_conn_auth(conn, sec_level, auth_type))
index be84ae33ae36bec88f4f67502780b4fe5333e270..b84458dcc2261259d83e1bad2aecef95ed622113 100644 (file)
@@ -613,7 +613,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        if (!test_bit(HCI_RAW, &hdev->flags)) {
                set_bit(HCI_INIT, &hdev->flags);
                __hci_request(hdev, hci_reset_req, 0,
-                                       msecs_to_jiffies(HCI_INIT_TIMEOUT));
+                                       msecs_to_jiffies(250));
                clear_bit(HCI_INIT, &hdev->flags);
        }
 
index 5ea94a1eecf2f9a4338aa116d645b03b77fa7bd6..17b5b1cd96579e352b2f3bff0bb5d9b7d9406ff2 100644 (file)
@@ -2152,7 +2152,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
        void *ptr = req->data;
        int type, olen;
        unsigned long val;
-       struct l2cap_conf_rfc rfc;
+       struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
 
        BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
 
@@ -2271,6 +2271,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
                }
        }
 
+       /* Use sane default values in case a misbehaving remote device
+        * did not send an RFC option.
+        */
+       rfc.mode = chan->mode;
+       rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+       rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+       rfc.max_pdu_size = cpu_to_le16(chan->imtu);
+
+       BT_ERR("Expected RFC option was not found, using defaults");
+
 done:
        switch (rfc.mode) {
        case L2CAP_MODE_ERTM:
index 4e32e18211f9187d8a98a27772a9ba97f5cf7112..2d28dfe983890fc74a2a8ebf5bc5f7e5716058c5 100644 (file)
@@ -1146,6 +1146,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
                        if (list_empty(&s->dlcs)) {
                                s->state = BT_DISCONN;
                                rfcomm_send_disc(s, 0);
+                               rfcomm_session_clear_timer(s);
                        }
 
                        break;
index d6ec3720c77e448c4fd014f78397dfe5fb52e592..fa8b8f763580a0fcf82cf9afe5c6fb91d1f64f8a 100644 (file)
@@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
        return NULL;
 }
 
+static unsigned int fake_mtu(const struct dst_entry *dst)
+{
+       return dst->dev->mtu;
+}
+
 static struct dst_ops fake_dst_ops = {
        .family =               AF_INET,
        .protocol =             cpu_to_be16(ETH_P_IP),
        .update_pmtu =          fake_update_pmtu,
        .cow_metrics =          fake_cow_metrics,
        .neigh_lookup =         fake_neigh_lookup,
+       .mtu =                  fake_mtu,
 };
 
 /*
@@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
        rt->dst.dev = br->dev;
        rt->dst.path = &rt->dst;
        dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
-       rt->dst.flags   = DST_NOXFRM;
+       rt->dst.flags   = DST_NOXFRM | DST_NOPEER;
        rt->dst.ops = &fake_dst_ops;
 }
 
index 8ae42de9c79e78379691f20207987c5b2ea338b6..e318c7e98042ffcfd1fc43a1d04b5cf816d651a8 100644 (file)
@@ -358,6 +358,18 @@ void flow_cache_flush(void)
        put_online_cpus();
 }
 
+static void flow_cache_flush_task(struct work_struct *work)
+{
+       flow_cache_flush();
+}
+
+static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+
+void flow_cache_flush_deferred(void)
+{
+       schedule_work(&flow_cache_flush_work);
+}
+
 static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
 {
        struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
index c71c434a4c053e440dc816682d944c521e05c50f..385aefe536489548ed3f53e61094faaac0d08afd 100644 (file)
@@ -665,11 +665,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
        if (count) {
                int i;
 
-               if (count > 1<<30) {
+               if (count > INT_MAX)
+                       return -EINVAL;
+               count = roundup_pow_of_two(count);
+               if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
+                               / sizeof(struct rps_dev_flow)) {
                        /* Enforce a limit to prevent overflow */
                        return -EINVAL;
                }
-               count = roundup_pow_of_two(count);
                table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
                if (!table)
                        return -ENOMEM;
index 4ed7b1d12f5ecde5b8c2119c0d4cfaaa765ff470..b23f174ab84c3b6c72302834d8e13695d22a1c6c 100644 (file)
@@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
 
-       /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
-          number of warnings when compiling with -W --ANK
-        */
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf) {
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
                atomic_inc(&sk->sk_drops);
                trace_sock_rcvqueue_full(sk, skb);
                return -ENOMEM;
index 0da2afc97f32ffae2773098391aba0dcbd004903..99ec116bef145e1ac0432918894b447b2abf8da3 100644 (file)
@@ -253,6 +253,10 @@ static int __init ic_open_devs(void)
                }
        }
 
+       /* no point in waiting if we could not bring up at least one device */
+       if (!ic_first_dev)
+               goto have_carrier;
+
        /* wait for a carrier on at least one device */
        start = jiffies;
        while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
index 46af62363b8c1e9ef452b6d750886f21e56f08b1..94cdbc55ca7ead13879d563608f8a191ca3b424c 100644 (file)
@@ -91,6 +91,7 @@
 #include <linux/rcupdate.h>
 #include <linux/times.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include <net/dst.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
 
 static int ip_rt_max_size;
 static int ip_rt_gc_timeout __read_mostly      = RT_GC_TIMEOUT;
+static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
 static int ip_rt_redirect_number __read_mostly = 9;
 static int ip_rt_redirect_load __read_mostly   = HZ / 50;
@@ -133,6 +135,9 @@ static int ip_rt_min_advmss __read_mostly   = 256;
 static int rt_chain_length_max __read_mostly   = 20;
 static int redirect_genid;
 
+static struct delayed_work expires_work;
+static unsigned long expires_ljiffies;
+
 /*
  *     Interface to generic destination cache.
  */
@@ -830,6 +835,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
        return ONE;
 }
 
+static void rt_check_expire(void)
+{
+       static unsigned int rover;
+       unsigned int i = rover, goal;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
+       unsigned long samples = 0;
+       unsigned long sum = 0, sum2 = 0;
+       unsigned long delta;
+       u64 mult;
+
+       delta = jiffies - expires_ljiffies;
+       expires_ljiffies = jiffies;
+       mult = ((u64)delta) << rt_hash_log;
+       if (ip_rt_gc_timeout > 1)
+               do_div(mult, ip_rt_gc_timeout);
+       goal = (unsigned int)mult;
+       if (goal > rt_hash_mask)
+               goal = rt_hash_mask + 1;
+       for (; goal > 0; goal--) {
+               unsigned long tmo = ip_rt_gc_timeout;
+               unsigned long length;
+
+               i = (i + 1) & rt_hash_mask;
+               rthp = &rt_hash_table[i].chain;
+
+               if (need_resched())
+                       cond_resched();
+
+               samples++;
+
+               if (rcu_dereference_raw(*rthp) == NULL)
+                       continue;
+               length = 0;
+               spin_lock_bh(rt_hash_lock_addr(i));
+               while ((rth = rcu_dereference_protected(*rthp,
+                                       lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
+                       prefetch(rth->dst.rt_next);
+                       if (rt_is_expired(rth)) {
+                               *rthp = rth->dst.rt_next;
+                               rt_free(rth);
+                               continue;
+                       }
+                       if (rth->dst.expires) {
+                               /* Entry is expired even if it is in use */
+                               if (time_before_eq(jiffies, rth->dst.expires)) {
+nofree:
+                                       tmo >>= 1;
+                                       rthp = &rth->dst.rt_next;
+                                       /*
+                                        * We only count entries on
+                                        * a chain with equal hash inputs once
+                                        * so that entries for different QOS
+                                        * levels, and other non-hash input
+                                        * attributes don't unfairly skew
+                                        * the length computation
+                                        */
+                                       length += has_noalias(rt_hash_table[i].chain, rth);
+                                       continue;
+                               }
+                       } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
+                               goto nofree;
+
+                       /* Cleanup aged off entries. */
+                       *rthp = rth->dst.rt_next;
+                       rt_free(rth);
+               }
+               spin_unlock_bh(rt_hash_lock_addr(i));
+               sum += length;
+               sum2 += length*length;
+       }
+       if (samples) {
+               unsigned long avg = sum / samples;
+               unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+               rt_chain_length_max = max_t(unsigned long,
+                                       ip_rt_gc_elasticity,
+                                       (avg + 4*sd) >> FRACT_BITS);
+       }
+       rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+       rt_check_expire();
+       schedule_delayed_work(&expires_work, ip_rt_gc_interval);
+}
+
 /*
  * Perturbation of rt_genid by a small quantity [1..256]
  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1271,7 +1367,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
 {
        struct rtable *rt = (struct rtable *) dst;
 
-       if (rt) {
+       if (rt && !(rt->dst.flags & DST_NOPEER)) {
                if (rt->peer == NULL)
                        rt_bind_peer(rt, rt->rt_dst, 1);
 
@@ -1282,7 +1378,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
                        iph->id = htons(inet_getid(rt->peer, more));
                        return;
                }
-       } else
+       } else if (!rt)
                printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
                       __builtin_return_address(0));
 
@@ -3178,6 +3274,13 @@ static ctl_table ipv4_route_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
+       {
+               .procname       = "gc_interval",
+               .data           = &ip_rt_gc_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
        {
                .procname       = "redirect_load",
                .data           = &ip_rt_redirect_load,
@@ -3388,6 +3491,11 @@ int __init ip_rt_init(void)
        devinet_init();
        ip_fib_init();
 
+       INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
+       expires_ljiffies = jiffies;
+       schedule_delayed_work(&expires_work,
+               net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
+
        if (ip_rt_proc_init())
                printk(KERN_ERR "Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
index 84d0bd5cac939814edaed4379f09464a958d61bf..ec562713db9b16e3ee6a3b03ca17a13af2024e77 100644 (file)
@@ -603,7 +603,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
        static atomic_t ipv6_fragmentation_id;
        int old, new;
 
-       if (rt) {
+       if (rt && !(rt->dst.flags & DST_NOPEER)) {
                struct inet_peer *peer;
 
                if (!rt->rt6i_peer)
index dfd3a648a55107bda2ff14adb6f9e91c06449240..a18e6c3d36e37e699089ed5e0910c857da073d1c 100644 (file)
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
                copied += used;
                len -= used;
 
+               /* For non stream protcols we get one packet per recvmsg call */
+               if (sk->sk_type != SOCK_STREAM)
+                       goto copy_uaddr;
+
                if (!(flags & MSG_PEEK)) {
                        sk_eat_skb(sk, skb, 0);
                        *seq = 0;
                }
 
-               /* For non stream protcols we get one packet per recvmsg call */
-               if (sk->sk_type != SOCK_STREAM)
-                       goto copy_uaddr;
-
                /* Partial read */
                if (used + offset < skb->len)
                        continue;
@@ -857,6 +857,12 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
        }
        if (llc_sk(sk)->cmsg_flags)
                llc_cmsg_rcv(msg, skb);
+
+       if (!(flags & MSG_PEEK)) {
+                       sk_eat_skb(sk, skb, 0);
+                       *seq = 0;
+       }
+
        goto out;
 }
 
index 12571fb2881c2c3670aab3a765f7a6e0cf3afcf7..29fa5badde757d6de0ac2fc0359cc27ba37ff83b 100644 (file)
@@ -616,7 +616,7 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
        if ((cp) && (!cp->dest)) {
                dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
                                       cp->dport, &cp->vaddr, cp->vport,
-                                      cp->protocol, cp->fwmark);
+                                      cp->protocol, cp->fwmark, cp->flags);
                ip_vs_bind_dest(cp, dest);
                return dest;
        } else
index 008bf97cc91a58b14a0ef9fd6924eb6f71063f11..e1a66cf37f9a6fb1af5714fffa2199cd9f9bf27b 100644 (file)
@@ -619,15 +619,21 @@ struct ip_vs_dest *ip_vs_find_dest(struct net  *net, int af,
                                   const union nf_inet_addr *daddr,
                                   __be16 dport,
                                   const union nf_inet_addr *vaddr,
-                                  __be16 vport, __u16 protocol, __u32 fwmark)
+                                  __be16 vport, __u16 protocol, __u32 fwmark,
+                                  __u32 flags)
 {
        struct ip_vs_dest *dest;
        struct ip_vs_service *svc;
+       __be16 port = dport;
 
        svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
        if (!svc)
                return NULL;
-       dest = ip_vs_lookup_dest(svc, daddr, dport);
+       if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
+               port = 0;
+       dest = ip_vs_lookup_dest(svc, daddr, port);
+       if (!dest)
+               dest = ip_vs_lookup_dest(svc, daddr, port ^ dport);
        if (dest)
                atomic_inc(&dest->refcnt);
        ip_vs_service_put(svc);
index 3cdd479f9b5d179292182bb2711ec243b57736ee..2b6678c0ce147117de1a5b384eea4540ef4e79ee 100644 (file)
@@ -740,7 +740,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
                 * but still handled.
                 */
                dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
-                                      param->vport, protocol, fwmark);
+                                      param->vport, protocol, fwmark, flags);
 
                /*  Set the approprite ativity flag */
                if (protocol == IPPROTO_TCP) {
index ef21b221f0363a8700fbbc3aadd316423c4f9d66..257e77256c5cda4dc9d4038e6996f50a4ea7dbe9 100644 (file)
@@ -135,7 +135,7 @@ ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
 static inline int
 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
 {
-       long timeout = (ct->timeout.expires - jiffies) / HZ;
+       long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
 
        if (timeout < 0)
                timeout = 0;
@@ -1358,12 +1358,15 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
                                                    nf_ct_protonum(ct));
                if (helper == NULL) {
                        rcu_read_unlock();
+                       spin_unlock_bh(&nf_conntrack_lock);
 #ifdef CONFIG_MODULES
                        if (request_module("nfct-helper-%s", helpname) < 0) {
+                               spin_lock_bh(&nf_conntrack_lock);
                                err = -EOPNOTSUPP;
                                goto err1;
                        }
 
+                       spin_lock_bh(&nf_conntrack_lock);
                        rcu_read_lock();
                        helper = __nf_conntrack_helper_find(helpname,
                                                            nf_ct_l3num(ct),
@@ -1638,7 +1641,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
                          const struct nf_conntrack_expect *exp)
 {
        struct nf_conn *master = exp->master;
-       long timeout = (exp->timeout.expires - jiffies) / HZ;
+       long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
        struct nf_conn_help *help;
 
        if (timeout < 0)
@@ -1869,25 +1872,30 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
 
        err = -ENOMEM;
        skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (skb2 == NULL)
+       if (skb2 == NULL) {
+               nf_ct_expect_put(exp);
                goto out;
+       }
 
        rcu_read_lock();
        err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
                                      nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
        rcu_read_unlock();
+       nf_ct_expect_put(exp);
        if (err <= 0)
                goto free;
 
-       nf_ct_expect_put(exp);
+       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       if (err < 0)
+               goto out;
 
-       return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       return 0;
 
 free:
        kfree_skb(skb2);
 out:
-       nf_ct_expect_put(exp);
-       return err;
+       /* this avoids a loop in nfnetlink. */
+       return err == -EAGAIN ? -ENOBUFS : err;
 }
 
 static int
index 5b138506690ec578105911ffb64309b5297d6458..9ddf1c3bfb39c5f67cb8f8c24ccdd22d03c38f10 100644 (file)
@@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
                break;
        }
 
-       if (sinfo->count.to)
+       if (sinfo->count.to >= sinfo->count.from)
                return what <= sinfo->count.to && what >= sinfo->count.from;
-       else
-               return what >= sinfo->count.from;
+       else /* inverted */
+               return what < sinfo->count.to || what > sinfo->count.from;
 }
 
 static int connbytes_mt_check(const struct xt_mtchk_param *par)
index 3925c6578767ea61be8cc66933d38a6c500cd1d2..ea66034499ce6bf54c3be5f02ec66ff2140f69a8 100644 (file)
@@ -69,7 +69,7 @@ static int __nci_request(struct nci_dev *ndev,
        __u32 timeout)
 {
        int rc = 0;
-       unsigned long completion_rc;
+       long completion_rc;
 
        ndev->req_status = NCI_REQ_PEND;
 
index 82a6f34d39d012fb35d9a0d490503fcc2048e6e2..d9d4970b9b07c0da22f97e29a8ac0f38ec4caff1 100644 (file)
@@ -1630,8 +1630,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        if (snaplen > res)
                snaplen = res;
 
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf)
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                goto drop_n_acct;
 
        if (skb_shared(skb)) {
@@ -1762,8 +1761,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        if (po->tp_version <= TPACKET_V2) {
                if (macoff + snaplen > po->rx_ring.frame_size) {
                        if (po->copy_thresh &&
-                               atomic_read(&sk->sk_rmem_alloc) + skb->truesize
-                               < (unsigned)sk->sk_rcvbuf) {
+                           atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
                                if (skb_shared(skb)) {
                                        copy_skb = skb_clone(skb, GFP_ATOMIC);
                                } else {
@@ -2450,8 +2448,12 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
 {
        struct packet_sock *po = pkt_sk(sk);
 
-       if (po->fanout)
+       if (po->fanout) {
+               if (dev)
+                       dev_put(dev);
+
                return -EINVAL;
+       }
 
        lock_sock(sk);
 
index f88256cbacbfe4b89c202d591514fb28feaac11b..28de43092330abc125423d5328babc709b1f986f 100644 (file)
@@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
        if (!netif_is_multiqueue(dev))
                return -EOPNOTSUPP;
 
-       if (nla_len(opt) < sizeof(*qopt))
+       if (!opt || nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
 
        qopt = nla_data(opt);
index eb3b9a86c6ed93d502a4629f241a0d2496b01385..a4ab207cdc5986f2ed1421d7981ecad01382f3b3 100644 (file)
@@ -488,7 +488,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
                return -EINVAL;
 
        s = sizeof(struct disttable) + n * sizeof(s16);
-       d = kmalloc(s, GFP_KERNEL);
+       d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
        if (!d)
                d = vmalloc(s);
        if (!d)
@@ -501,9 +501,10 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
        root_lock = qdisc_root_sleeping_lock(sch);
 
        spin_lock_bh(root_lock);
-       dist_free(q->delay_dist);
-       q->delay_dist = d;
+       swap(q->delay_dist, d);
        spin_unlock_bh(root_lock);
+
+       dist_free(d);
        return 0;
 }
 
index 103343408593589e8f2343987f3f4b44afca5edb..7b0325459e718c6c0fb8d259ef7e10ecc6353130 100644 (file)
@@ -817,11 +817,11 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
 static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
 {
        unsigned long mask;
-       uint32_t limit, roundedF;
+       u64 limit, roundedF;
        int slot_shift = cl->grp->slot_shift;
 
        roundedF = qfq_round_down(cl->F, slot_shift);
-       limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
+       limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
 
        if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
                /* timestamp was stale */
index 152b5b3c3fffa978ae0d725d182f9aa54c8a9cf6..acd2edbc073ebf4ad334a4b0a16ff7b45413fac5 100644 (file)
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-               (unsigned long)sp->autoclose * HZ;
+               min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
 
        /* Initializes the timers */
        for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
index 08b3cead6503c62f91dc8e97d9b817de7a79ffb9..817174eb5f41a50147dddded99bf222e421001b6 100644 (file)
@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
        /* Keep track of how many bytes are in flight to the receiver. */
        asoc->outqueue.outstanding_bytes += datasize;
 
-       /* Update our view of the receiver's rwnd. Include sk_buff overhead
-        * while updating peer.rwnd so that it reduces the chances of a
-        * receiver running out of receive buffer space even when receive
-        * window is still open. This can happen when a sender is sending
-        * sending small messages.
-        */
-       datasize += sizeof(struct sk_buff);
+       /* Update our view of the receiver's rwnd. */
        if (datasize < rwnd)
                rwnd -= datasize;
        else
index 14c2b06028ffb1bea3acde6243ff9386d053616e..cfeb1d4a1ee6ca730595959946ced911b7442baa 100644 (file)
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                                        chunk->transport->flight_size -=
                                                        sctp_data_size(chunk);
                                q->outstanding_bytes -= sctp_data_size(chunk);
-                               q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                                       sizeof(struct sk_buff));
+                               q->asoc->peer.rwnd += sctp_data_size(chunk);
                        }
                        continue;
                }
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                         * (Section 7.2.4)), add the data size of those
                         * chunks to the rwnd.
                         */
-                       q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                               sizeof(struct sk_buff));
+                       q->asoc->peer.rwnd += sctp_data_size(chunk);
                        q->outstanding_bytes -= sctp_data_size(chunk);
                        if (chunk->transport)
                                transport->flight_size -= sctp_data_size(chunk);
index 61b9fca5a173bba9057f9a09dc2ac6cf45f34bc9..6f6ad8686833920fee313ad2dbd4b0cb4a23cade 100644 (file)
@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
        sctp_max_instreams              = SCTP_DEFAULT_INSTREAMS;
        sctp_max_outstreams             = SCTP_DEFAULT_OUTSTREAMS;
 
+       /* Initialize maximum autoclose timeout. */
+       sctp_max_autoclose              = INT_MAX / HZ;
+
        /* Initialize handle used for association ids. */
        idr_init(&sctp_assocs_id);
 
index 13bf5fcdbff1b9f80d2d0c6288ab98762d499e74..54a7cd2fdd7af5c96d9a0354bfb6bfd56b27f09d 100644 (file)
@@ -2200,8 +2200,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
                return -EINVAL;
        if (copy_from_user(&sp->autoclose, optval, optlen))
                return -EFAULT;
-       /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
-       sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
 
        return 0;
 }
index 6b3952961b858369d8a63b908b0911357a9e28e7..60ffbd067ff75643ac3f5cc61e4ba20c2b8ef3b9 100644 (file)
@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
 static int sack_timer_max = 500;
 static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
 static int rwnd_scale_max = 16;
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+       (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+       ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
 
 extern long sysctl_sctp_mem[3];
 extern int sysctl_sctp_rmem[3];
@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
                .extra1         = &one,
                .extra2         = &rwnd_scale_max,
        },
+       {
+               .procname       = "max_autoclose",
+               .data           = &sctp_max_autoclose,
+               .maxlen         = sizeof(unsigned long),
+               .mode           = 0644,
+               .proc_handler   = &proc_doulongvec_minmax,
+               .extra1         = &max_autoclose_min,
+               .extra2         = &max_autoclose_max,
+       },
 
        { /* sentinel */ }
 };
index 2118d6446630e3ef64ae3e65b0e7066daf5a85c2..9049a5caeb257d783db1008224eabea7cefa7cef 100644 (file)
@@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
 {
        struct dst_entry *head, *next;
 
-       flow_cache_flush();
-
        spin_lock_bh(&xfrm_policy_sk_bundle_lock);
        head = xfrm_policy_sk_bundles;
        xfrm_policy_sk_bundles = NULL;
@@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
        }
 }
 
+static void xfrm_garbage_collect(struct net *net)
+{
+       flow_cache_flush();
+       __xfrm_garbage_collect(net);
+}
+
+static void xfrm_garbage_collect_deferred(struct net *net)
+{
+       flow_cache_flush_deferred();
+       __xfrm_garbage_collect(net);
+}
+
 static void xfrm_init_pmtu(struct dst_entry *dst)
 {
        do {
@@ -2422,7 +2432,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                if (likely(dst_ops->neigh_lookup == NULL))
                        dst_ops->neigh_lookup = xfrm_neigh_lookup;
                if (likely(afinfo->garbage_collect == NULL))
-                       afinfo->garbage_collect = __xfrm_garbage_collect;
+                       afinfo->garbage_collect = xfrm_garbage_collect_deferred;
                xfrm_policy_afinfo[afinfo->family] = afinfo;
        }
        write_unlock_bh(&xfrm_policy_afinfo_lock);
@@ -2516,7 +2526,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
 
        switch (event) {
        case NETDEV_DOWN:
-               __xfrm_garbage_collect(dev_net(dev));
+               xfrm_garbage_collect(dev_net(dev));
        }
        return NOTIFY_DONE;
 }
index ba573fe7c74d5bfe0495372931ebff69406f35c2..914833d99b06f78242fa12584c568ecef6a5e65a 100644 (file)
@@ -60,8 +60,8 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
            --directory=$(srctree) --directory=$(objtree)           \
            --output $(obj)/config.pot
        $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
-       $(Q)ln -fs Kconfig.x86 arch/um/Kconfig
-       $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`;    \
+       $(Q)(for i in `ls $(srctree)/arch/*/Kconfig      \
+           $(srctree)/arch/*/um/Kconfig`;               \
            do                                           \
                echo "  GEN $$i";                        \
                $(obj)/kxgettext $$i                     \
@@ -69,7 +69,6 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
            done )
        $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
            --output $(obj)/linux.pot
-       $(Q)rm -f $(srctree)/arch/um/Kconfig
        $(Q)rm -f $(obj)/config.pot
 
 PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
index f40a6af6bf40068f2a074bf6bff86d68b2a05440..54e35c1e5948c521f9fdebf57c27072b6d844f34 100644 (file)
@@ -462,7 +462,7 @@ __has_rel_mcount(Elf_Shdr const *const relhdr,  /* is SHT_REL or SHT_RELA */
                succeed_file();
        }
        if (w(txthdr->sh_type) != SHT_PROGBITS ||
-           !(w(txthdr->sh_flags) & SHF_EXECINSTR))
+           !(_w(txthdr->sh_flags) & SHF_EXECINSTR))
                return NULL;
        return txtname;
 }
index 0c6cc69c8f86d68fb24f1b954c64370e8da88c4a..e2f684aeb70c152a61038c0d2d19525774144d59 100644 (file)
@@ -381,7 +381,7 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir,
                                     void **value, size_t *len)
 {
        if (unlikely(IS_PRIVATE(inode)))
-               return 0;
+               return -EOPNOTSUPP;
        return security_ops->inode_init_security(inode, dir, qstr, name, value,
                                                 len);
 }
index 6e5addeb236b49595ea563e7525d5a262a32e295..73516f69ac7ca8a33244cb300df8958ac2d77e20 100644 (file)
@@ -899,6 +899,10 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip)
                /* AC97 v2.2 specifications says minimum 1 us. */
                udelay(2);
                gpio_set_value(chip->reset_pin, 1);
+       } else {
+               ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA);
+               udelay(2);
+               ac97c_writel(chip, MR, AC97C_MR_ENA);
        }
 }
 
index bfdc52370ad02de96bd9cf1859614db91e682949..d3b0a20744f1950ee2c0c882f77d72b6ab0a1959 100644 (file)
@@ -235,6 +235,7 @@ static int wm8776_hw_params(struct snd_pcm_substream *substream,
        switch (snd_pcm_format_width(params_format(params))) {
        case 16:
                iface = 0;
+               break;
        case 20:
                iface = 0x10;
                break;
index 3ad0925d23a9c85e39b508021ef9fcf2bbb1ad81..758e3b36d4cfd525846a1968987d80bea8e4bcce 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
 #include "irq.h"
 
 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
@@ -480,12 +482,76 @@ static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
        return r;
 }
 
+/*
+ * We want to test whether the caller has been granted permissions to
+ * use this device.  To be able to configure and control the device,
+ * the user needs access to PCI configuration space and BAR resources.
+ * These are accessed through PCI sysfs.  PCI config space is often
+ * passed to the process calling this ioctl via file descriptor, so we
+ * can't rely on access to that file.  We can check for permissions
+ * on each of the BAR resource files, which is a pretty clear
+ * indicator that the user has been granted access to the device.
+ */
+static int probe_sysfs_permissions(struct pci_dev *dev)
+{
+#ifdef CONFIG_SYSFS
+       int i;
+       bool bar_found = false;
+
+       for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
+               char *kpath, *syspath;
+               struct path path;
+               struct inode *inode;
+               int r;
+
+               if (!pci_resource_len(dev, i))
+                       continue;
+
+               kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
+               if (!kpath)
+                       return -ENOMEM;
+
+               /* Per sysfs-rules, sysfs is always at /sys */
+               syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
+               kfree(kpath);
+               if (!syspath)
+                       return -ENOMEM;
+
+               r = kern_path(syspath, LOOKUP_FOLLOW, &path);
+               kfree(syspath);
+               if (r)
+                       return r;
+
+               inode = path.dentry->d_inode;
+
+               r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
+               path_put(&path);
+               if (r)
+                       return r;
+
+               bar_found = true;
+       }
+
+       /* If no resources, probably something special */
+       if (!bar_found)
+               return -EPERM;
+
+       return 0;
+#else
+       return -EINVAL; /* No way to control the device without sysfs */
+#endif
+}
+
 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
                                      struct kvm_assigned_pci_dev *assigned_dev)
 {
        int r = 0, idx;
        struct kvm_assigned_dev_kernel *match;
        struct pci_dev *dev;
+       u8 header_type;
+
+       if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
+               return -EINVAL;
 
        mutex_lock(&kvm->lock);
        idx = srcu_read_lock(&kvm->srcu);
@@ -513,6 +579,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
                r = -EINVAL;
                goto out_free;
        }
+
+       /* Don't allow bridges to be assigned */
+       pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+       if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
+               r = -EPERM;
+               goto out_put;
+       }
+
+       r = probe_sysfs_permissions(dev);
+       if (r)
+               goto out_put;
+
        if (pci_enable_device(dev)) {
                printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
                r = -EBUSY;
@@ -544,16 +622,14 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
 
        list_add(&match->list, &kvm->arch.assigned_dev_head);
 
-       if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
-               if (!kvm->arch.iommu_domain) {
-                       r = kvm_iommu_map_guest(kvm);
-                       if (r)
-                               goto out_list_del;
-               }
-               r = kvm_assign_device(kvm, match);
+       if (!kvm->arch.iommu_domain) {
+               r = kvm_iommu_map_guest(kvm);
                if (r)
                        goto out_list_del;
        }
+       r = kvm_assign_device(kvm, match);
+       if (r)
+               goto out_list_del;
 
 out:
        srcu_read_unlock(&kvm->srcu, idx);
@@ -593,8 +669,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
                goto out;
        }
 
-       if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
-               kvm_deassign_device(kvm, match);
+       kvm_deassign_device(kvm, match);
 
        kvm_free_assigned_device(kvm, match);